From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 01 Nov 2024 02:11:33 +0000 Subject: [PATCH] add xenomai --- kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c | 40 kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h | 54 kernel/xenomai-v3.2.4/lib/analogy/descriptor.c | 503 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h | 93 kernel/arch/arm/kernel/entry-header.S | 23 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h | 31 kernel/arch/x86/kernel/cpu/mce/threshold.c | 2 kernel/arch/arm/include/asm/outercache.h | 7 kernel/xenomai-v3.2.4/lib/analogy/calibration.h | 68 kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h | 2 kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig | 29 kernel/include/xenomai/cobalt/uapi/sched.h | 1 kernel/kernel/xenomai/posix/monitor.h | 1 kernel/xenomai-v3.2.4/include/rtdm/analogy.h | 264 kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h | 212 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c | 4446 kernel/arch/arm64/kernel/irq_pipeline.c | 23 kernel/xenomai-v3.2.4/testsuite/Makefile.am | 21 kernel/drivers/dma/Kconfig | 18 kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c | 608 kernel/drivers/xenomai/net/drivers/tulip/eeprom.c | 1 kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c | 5 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-8.c | 66 kernel/kernel/trace/trace_output.c | 9 kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am | 33 kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h | 1 kernel/drivers/pinctrl/intel/pinctrl-cherryview.c | 5 kernel/arch/arm64/kernel/vdso.c | 28 kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-2.c | 123 kernel/lib/dump_stack.c | 36 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c | 194 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile | 8 kernel/xenomai-v3.2.4/lib/copperplate/cluster.c | 601 kernel/kernel/xenomai/posix/monitor.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h | 94 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c | 174 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile | 14 kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-common.c | 290 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h | 24 kernel/include/xenomai/cobalt/kernel/intr.h | 1 kernel/xenomai-v3.2.4/utils/analogy/insn_read.c | 462 kernel/drivers/clocksource/timer-imx-gpt.c | 8 kernel/drivers/cpuidle/cpuidle.c | 18 kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc | 37 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h | 91 kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile | 13 kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am | 8 kernel/kernel/irq_work.c | 9 kernel/xenomai-v3.2.4/include/boilerplate/shavl.h | 30 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile | 15 kernel/include/linux/smp.h | 15 kernel/xenomai-v3.2.4/lib/copperplate/regd/Makefile.am | 23 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net.h | 55 kernel/kernel/printk/printk.c | 73 kernel/security/selinux/include/classmap.h | 4 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h | 30 kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h | 248 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h | 81 kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h | 1 kernel/kernel/sched/core.c | 317 kernel/kernel/stop_machine.c | 4 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/server.c | 178 kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c | 286 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h | 1 kernel/drivers/xenomai/net/stack/iovec.c | 1 kernel/drivers/xenomai/net/drivers/igb/igb.h | 1 kernel/drivers/xenomai/net/stack/ipv4/ip_output.c | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h | 55 kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig | 92 kernel/drivers/xenomai/net/stack/rtwlan.c | 1 kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h | 559 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c | 347 kernel/xenomai-v3.2.4/include/cobalt/trace.h | 52 kernel/arch/x86/include/asm/thread_info.h | 18 kernel/drivers/misc/eeprom/at24.c | 53 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-2.c | 98 kernel/drivers/tty/serial/8250/8250_core.c | 45 kernel/arch/x86/kernel/smp.c | 15 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h | 21 kernel/xenomai-v3.2.4/include/rtdm/udd.h | 26 kernel/arch/x86/include/asm/pgtable.h | 5 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h | 1 kernel/xenomai-v3.2.4/lib/analogy/calibration.c | 473 kernel/mm/ioremap.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/synch.c | 1185 kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/Makefile.am | 9 kernel/arch/x86/kernel/nmi.c | 4 kernel/kernel/fork.c | 6 kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/Makefile.am | 10 kernel/drivers/xenomai/gpio/gpio-xilinx.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h | 56 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c | 1175 kernel/include/xenomai/cobalt/uapi/syscall.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/current.c | 156 kernel/arch/x86/kernel/apic/x2apic_phys.c | 4 kernel/xenomai-v3.2.4/lib/cobalt/current.h | 98 kernel/xenomai-v3.2.4/lib/analogy/Makefile.am | 23 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h | 1 kernel/xenomai-v3.2.4/include/boilerplate/hash.h | 224 kernel/xenomai-v3.2.4/lib/boilerplate/ancillaries.c | 542 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h | 28 kernel/xenomai-v3.2.4/include/copperplate/heapobj.h | 529 kernel/arch/arm/kernel/signal.c | 34 kernel/xenomai-v3.2.4/testsuite/smokey/setsched/setsched.c | 149 kernel/kernel/xenomai/posix/timerfd.h | 1 kernel/xenomai-v3.2.4/lib/trank/posix.c | 175 kernel/kernel/xenomai/posix/timerfd.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h | 25 kernel/drivers/irqchip/irq-ti-sci-inta.c | 2 kernel/xenomai-v3.2.4/kernel/cobalt/registry.c | 954 kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c | 1 kernel/include/linux/spinlock.h | 96 kernel/arch/x86/kernel/kvm.c | 17 kernel/drivers/xenomai/net/stack/ipv4/route.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/Makefile.am | 2 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h | 66 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README | 67 kernel/xenomai-v3.2.4/kernel/cobalt/tree.c | 57 kernel/xenomai-v3.2.4/lib/alchemy/heap.c | 679 kernel/xenomai-v3.2.4/lib/alchemy/heap.h | 49 kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/select.c | 48 kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig | 9 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c | 53 kernel/include/linux/console.h | 1 kernel/xenomai-v3.2.4/include/trank/native/pipe.h | 35 kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc | 72 kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.c | 214 kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h | 36 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h | 391 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.h | 31 kernel/include/asm-generic/xenomai/wrappers.h | 1 kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test.in | 116 kernel/kernel/xenomai/posix/extension.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/cpu-affinity.c | 252 kernel/xenomai-v3.2.4/utils/net/rtroute.c | 393 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c | 541 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am | 12 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h | 674 kernel/drivers/xenomai/analogy/national_instruments/tio_common.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/init.c | 325 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c | 1854 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c | 722 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h | 1 kernel/arch/arm64/include/asm/irq_pipeline.h | 141 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h | 104 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-9.c | 85 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-3.c | 68 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c | 902 kernel/kernel/xenomai/sched-rt.c | 1 kernel/xenomai-v3.2.4/lib/boilerplate/time.c | 85 kernel/drivers/xenomai/can/mscan/rtcan_mscan.h | 1 kernel/kernel/xenomai/posix/thread.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h | 142 kernel/arch/arm/kernel/ptrace.c | 2 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h | 1 kernel/arch/x86/kernel/irq.c | 20 kernel/kernel/xenomai/posix/thread.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c | 1651 kernel/xenomai-v3.2.4/include/boilerplate/scope.h | 78 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c | 1093 kernel/drivers/xenomai/net/stack/include/ethernet/eth.h | 1 kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h | 95 kernel/xenomai-v3.2.4/include/alchemy/Makefile.am | 15 kernel/drivers/xenomai/analogy/testing/Makefile | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h | 27 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting | 251 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c | 1401 kernel/drivers/xenomai/serial/rt_imx_uart.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/malloc.c | 32 kernel/arch/arm64/xenomai/dovetail/machine.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c | 51 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile | 5 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c | 219 kernel/drivers/xenomai/can/mscan/rtcan_mscan.c | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h | 1 kernel/drivers/xenomai/net/stack/rtmac/Makefile | 1 kernel/drivers/xenomai/net/addons/Kconfig | 1 kernel/include/dovetail/mm_info.h | 12 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c | 1 kernel/include/asm-generic/xenomai/syscall.h | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h | 92 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile | 26 kernel/xenomai-v3.2.4/utils/slackspot/Makefile.am | 7 kernel/kernel/xenomai/posix/nsem.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c | 663 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h | 85 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h | 92 kernel/arch/x86/kernel/apic/io_apic.c | 85 kernel/include/linux/irqflags.h | 37 kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h | 1 kernel/xenomai-v3.2.4/include/vxworks/rngLib.h | 61 kernel/arch/arm/mm/context.c | 18 kernel/drivers/xenomai/net/stack/stack_mgr.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/pshared.c | 121 kernel/xenomai-v3.2.4/lib/cobalt/Makefile.am | 68 kernel/xenomai-v3.2.4/utils/hdb/Makefile.am | 15 kernel/arch/x86/include/asm/mmu_context.h | 7 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h | 1 kernel/xenomai-v3.2.4/CONTRIBUTING.md | 118 kernel/include/linux/mm_types.h | 5 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h | 71 kernel/arch/arm64/kernel/vdso32/vdso.lds.S | 3 kernel/include/xenomai/pipeline/vdso_fallback.h | 1 kernel/include/xenomai/cobalt/kernel/thread.h | 1 kernel/drivers/xenomai/gpio/gpio-mxc.c | 1 kernel/xenomai-v3.2.4/include/trank/posix/pthread.h | 93 kernel/xenomai-v3.2.4/lib/cobalt/sigshadow.c | 133 kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c | 1 kernel/xenomai-v3.2.4/lib/alchemy/task.h | 78 kernel/drivers/xenomai/net/drivers/e1000e/defines.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/sched-quota.c | 335 kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/udp.c | 75 kernel/xenomai-v3.2.4/kernel/cobalt/lock.c | 65 kernel/xenomai-v3.2.4/lib/alchemy/task.c | 2181 kernel/include/linux/vmalloc.h | 1 kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h | 67 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/packet_raw.c | 122 kernel/arch/arm64/kernel/process.c | 36 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h | 27 kernel/xenomai-v3.2.4/include/cobalt/syslog.h | 51 kernel/fs/fcntl.c | 2 kernel/xenomai-v3.2.4/lib/copperplate/syncobj.c | 626 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig | 17 kernel/include/linux/thread_info.h | 66 kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig | 489 kernel/drivers/xenomai/net/stack/rtnet_rtpc.c | 1 kernel/drivers/tty/serial/st-asc.c | 26 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c | 906 kernel/drivers/xenomai/gpio/Makefile | 1 kernel/kernel/irq/settings.h | 34 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile | 5 kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h | 407 kernel/xenomai-v3.2.4/lib/psos/pt.c | 311 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c | 132 kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc | 138 kernel/init/main.c | 8 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h | 108 kernel/drivers/xenomai/net/drivers/e1000e/netdev.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile | 5 kernel/xenomai-v3.2.4/lib/psos/pt.h | 46 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h | 143 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-6.c | 44 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h | 667 kernel/xenomai-v3.2.4/lib/psos/Makefile.am | 38 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c | 161 kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c | 1 kernel/xenomai-v3.2.4/include/trank/native/misc.h | 57 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h | 38 kernel/xenomai-v3.2.4/kernel/cobalt/timer.c | 719 kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h | 1 kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h | 46 kernel/drivers/xenomai/net/drivers/rt_macb.h | 1 kernel/arch/arm64/configs/rockchip_linux_defconfig | 319 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig | 44 kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile | 1 kernel/include/xenomai/cobalt/kernel/vdso.h | 1 kernel/include/xenomai/cobalt/uapi/thread.h | 1 kernel/drivers/xenomai/net/stack/ipv4/ip_input.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c | 158 kernel/kernel/locking/spinlock_debug.c | 3 kernel/xenomai-v3.2.4/utils/net/rtnet.conf.in | 79 kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h | 74 kernel/xenomai-v3.2.4/testsuite/xeno-test/Makefile.am | 16 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/lst-1.c | 144 kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c | 1 kernel/drivers/xenomai/spi/spi-device.h | 1 kernel/drivers/xenomai/spi/spi-device.c | 1 kernel/arch/arm64/kernel/irq.c | 10 kernel/arch/x86/kernel/time.c | 2 kernel/xenomai-v3.2.4/lib/cobalt/mutex.c | 1006 kernel/drivers/base/regmap/internal.h | 5 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h | 45 kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h | 203 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h | 35 kernel/arch/arm/kernel/irq_pipeline.c | 20 kernel/xenomai-v3.2.4/lib/cobalt/init.c | 370 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile | 10 kernel/drivers/xenomai/gpio/gpio-cherryview.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c | 393 kernel/include/xenomai/pipeline/sirq.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c | 1406 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h | 1 kernel/xenomai-v3.2.4/include/boilerplate/tunables.h | 126 kernel/xenomai-v3.2.4/lib/cobalt/COPYING | 458 kernel/xenomai-v3.2.4/lib/alchemy/event.c | 622 kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc | 55 kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h | 44 kernel/include/linux/socket.h | 4 kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/Makefile.am | 10 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/packet_dgram.c | 81 kernel/xenomai-v3.2.4/lib/boilerplate/init/Makefile.am | 32 kernel/xenomai-v3.2.4/lib/alchemy/event.h | 39 kernel/init/Makefile | 2 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-5.c | 103 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h | 1 kernel/drivers/xenomai/net/drivers/tulip/interrupt.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h | 38 kernel/drivers/xenomai/can/mscan/Kconfig | 1 kernel/include/xenomai/rtdm/ipc.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h | 48 kernel/kernel/printk/printk_safe.c | 3 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/Makefile.am | 9 kernel/include/xenomai/cobalt/kernel/bufd.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h | 55 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c | 333 kernel/xenomai-v3.2.4/lib/copperplate/timerobj.c | 288 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h | 37 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c | 81 kernel/drivers/xenomai/analogy/driver.c | 1 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c | 1481 kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h | 1186 kernel/xenomai-v3.2.4/scripts/prepare-kernel.sh | 472 kernel/drivers/xenomai/analogy/instruction.c | 1 kernel/drivers/xenomai/net/stack/ipv4/Makefile | 1 kernel/xenomai-v3.2.4/include/smokey/smokey.h | 274 kernel/xenomai-v3.2.4/lib/copperplate/COPYING | 458 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c | 1538 kernel/kernel/xenomai/time.c | 1 kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h | 155 kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c | 427 kernel/kernel/xenomai/tree.c | 1 kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h | 522 kernel/xenomai-v3.2.4/include/rtdm/can.h | 239 kernel/kernel/Makefile | 3 kernel/kernel/time/tick-broadcast.c | 17 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c | 593 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/syscall.h | 216 kernel/drivers/xenomai/net/drivers/at91_ether.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c | 1373 kernel/kernel/notifier.c | 3 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h | 56 kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h | 141 kernel/lib/vdso/gettimeofday.c | 286 kernel/xenomai-v3.2.4/utils/Makefile.am | 5 kernel/drivers/irqchip/irq-gic-v3-mbi.c | 2 kernel/drivers/xenomai/can/sja1000/rtcan_mem.c | 1 kernel/xenomai-v3.2.4/include/trank/native/queue.h | 26 kernel/arch/arm64/include/asm/thread_info.h | 21 kernel/include/xenomai/pipeline/tick.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile | 10 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h | 31 kernel/xenomai-v3.2.4/lib/vxworks/wdLib.h | 35 kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/posix-clock.c | 458 kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c | 464 kernel/xenomai-v3.2.4/include/boilerplate/namegen.h | 47 kernel/drivers/xenomai/net/stack/Makefile | 1 kernel/xenomai-v3.2.4/include/copperplate/traceobj.h | 97 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h | 1 kernel/xenomai-v3.2.4/utils/analogy/Makefile.am | 100 kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c | 224 kernel/xenomai-v3.2.4/lib/vxworks/wdLib.c | 169 kernel/xenomai-v3.2.4/include/trank/Makefile.am | 10 kernel/drivers/xenomai/udd/Kconfig | 1 kernel/net/packet/af_packet.c | 1 kernel/xenomai-v3.2.4/lib/boilerplate/Makefile.am | 121 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.c | 1011 kernel/drivers/xenomai/ipc/bufp.c | 1 kernel/include/xenomai/cobalt/uapi/sem.h | 1 kernel/drivers/xenomai/analogy/rtdm_interface.c | 1 kernel/xenomai-v3.2.4/include/alchemy/buffer.h | 148 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c | 1823 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c | 1057 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig | 16 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.h | 43 kernel/drivers/xenomai/spi/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h | 23 kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile | 1 kernel/xenomai-v3.2.4/include/trank/native/buffer.h | 23 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/features.c | 106 kernel/kernel/xenomai/posix/process.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c | 2027 kernel/xenomai-v3.2.4/kernel/drivers/Kconfig | 35 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h | 27 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c | 1 kernel/kernel/xenomai/posix/process.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c | 424 kernel/arch/arm64/include/asm/efi.h | 6 kernel/drivers/irqchip/irq-bcm2835.c | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig | 147 kernel/xenomai-v3.2.4/lib/smokey/helpers.c | 397 kernel/drivers/xenomai/can/rtcan_version.h | 1 kernel/drivers/xenomai/ipc/xddp.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c | 1230 kernel/xenomai-v3.2.4/utils/net/rtnet.in | 371 kernel/kernel/sched/sched.h | 2 kernel/arch/arm/include/asm/atomic.h | 16 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-2.c | 65 kernel/arch/x86/xen/Kconfig | 2 kernel/drivers/clocksource/mmio.c | 503 kernel/kernel/xenomai/vfile.c | 1 kernel/xenomai-v3.2.4/utils/analogy/cmd_write.c | 551 kernel/drivers/xenomai/net/drivers/tulip/media.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c | 460 kernel/drivers/clocksource/arm_arch_timer.c | 11 kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h | 56 kernel/drivers/clocksource/clksrc_st_lpc.c | 2 kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h | 23 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c | 99 kernel/arch/arm64/include/asm/syscall.h | 5 kernel/arch/arm/mach-imx/gpc.c | 21 kernel/include/xenomai/rtdm/cobalt.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c | 842 kernel/xenomai-v3.2.4/lib/psos/testsuite/Makefile | 49 kernel/xenomai-v3.2.4/utils/hdb/hdb.c | 148 kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am | 23 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h | 48 kernel/drivers/xenomai/autotune/Makefile | 1 kernel/drivers/gpio/gpio-davinci.c | 2 kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am | 136 kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c | 756 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h | 116 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/wd-1.c | 92 kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h | 40 kernel/drivers/xenomai/ipc/rtipc.c | 1 kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc | 117 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h | 109 kernel/include/xenomai/rtdm/driver.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h | 35 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c | 853 kernel/arch/arm/include/asm/bitops.h | 24 kernel/net/sched/sch_oob.c | 294 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h | 852 kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h | 75 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c | 200 kernel/arch/x86/kernel/alternative.c | 14 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h | 37 kernel/xenomai-v3.2.4/demo/Makefile.am | 2 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/Makefile | 70 kernel/kernel/trace/trace_preemptirq.c | 52 kernel/xenomai-v3.2.4/utils/analogy/cmd_read.c | 435 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h | 1 kernel/drivers/xenomai/ipc/Makefile | 1 kernel/drivers/xenomai/spi/spi-sun6i.c | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h | 365 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-1.c | 38 kernel/xenomai-v3.2.4/scripts/Kconfig.frag | 49 kernel/drivers/xenomai/net/stack/include/rtmac.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/syscall.h | 82 kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/Makefile.am | 9 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h | 30 kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h | 33 kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c | 194 kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/Makefile.am | 9 kernel/kernel/xenomai/posix/corectl.c | 1 kernel/drivers/xenomai/net/stack/rtdev_mgr.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libalchemy-test.c | 65 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c | 2493 kernel/xenomai-v3.2.4/include/copperplate/syncobj.h | 233 kernel/arch/arm64/kernel/vdso/vdso.lds.S | 3 kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h | 1 kernel/drivers/xenomai/can/mscan/Makefile | 1 kernel/include/xenomai/cobalt/kernel/ancillaries.h | 1 kernel/include/xenomai/rtdm/uapi/can.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c | 40 kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c | 1 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-4.c | 103 kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig | 1 kernel/xenomai-v3.2.4/testsuite/spitest/Makefile.am | 19 kernel/include/linux/dmaengine.h | 41 kernel/xenomai-v3.2.4/lib/copperplate/clockobj.c | 396 kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h | 1 kernel/include/linux/interrupt.h | 29 kernel/drivers/xenomai/net/stack/rtcfg/Makefile | 1 kernel/drivers/xenomai/net/stack/include/stack_mgr.h | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h | 37 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h | 36 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h | 75 kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c | 67 kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h | 152 kernel/xenomai-v3.2.4/kernel/cobalt/COPYING | 281 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h | 1 kernel/arch/arm/include/asm/irq_pipeline.h | 135 kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile | 14 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h | 210 kernel/drivers/xenomai/net/drivers/e1000e/hw.h | 1 kernel/kernel/rcu/update.c | 31 kernel/kernel/xenomai/posix/corectl.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h | 40 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c | 361 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy | 74 kernel/kernel/irq/Makefile | 2 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c | 53 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-4.c | 74 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c | 83 kernel/arch/x86/kernel/tsc.c | 19 kernel/xenomai-v3.2.4/lib/cobalt/clock.c | 489 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h | 74 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h | 95 kernel/kernel/xenomai/clock.c | 1 kernel/xenomai-v3.2.4/include/vxworks/types.h | 40 kernel/xenomai-v3.2.4/testsuite/latency/Makefile.am | 18 kernel/xenomai-v3.2.4/README | 74 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c | 256 kernel/xenomai-v3.2.4/lib/cobalt/internal.h | 135 kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h | 30 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S | 87 kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c | 220 kernel/xenomai-v3.2.4/include/copperplate/debug.h | 42 kernel/xenomai-v3.2.4/lib/cobalt/internal.c | 600 kernel/xenomai-v3.2.4/include/vxworks/sysLib.h | 41 kernel/drivers/xenomai/net/addons/cap.c | 1 kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig | 1 kernel/arch/x86/Kconfig | 4 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig | 18 kernel/include/xenomai/rtdm/uapi/udd.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h | 40 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/setup.c | 518 kernel/drivers/xenomai/net/stack/include/rtnet_port.h | 1 kernel/kernel/xenomai/pipeline/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h | 176 kernel/drivers/xenomai/udd/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c | 1438 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h | 31 kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am | 22 kernel/drivers/xenomai/analogy/national_instruments/mio_common.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h | 71 kernel/include/dovetail/spinlock.h | 21 kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-2.c | 68 kernel/include/xenomai/cobalt/kernel/list.h | 1 kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h | 65 kernel/include/xenomai/rtdm/can.h | 1 kernel/kernel/xenomai/sched.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile | 63 kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c | 529 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h | 624 kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c | 1 kernel/xenomai-v3.2.4/lib/psos/COPYING | 458 kernel/drivers/xenomai/serial/16550A_io.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h | 263 kernel/arch/arm/vfp/entry.S | 2 kernel/xenomai-v3.2.4/lib/alchemy/queue.h | 59 kernel/xenomai-v3.2.4/lib/cobalt/thread.c | 819 kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile | 1 kernel/drivers/xenomai/analogy/testing/Kconfig | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h | 45 kernel/xenomai-v3.2.4/testsuite/smokey/xddp/Makefile.am | 10 kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc | 25 kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h | 1 kernel/xenomai-v3.2.4/utils/can/rtcansend.c | 306 kernel/kernel/xenomai/arith.c | 1 kernel/net/core/dev.c | 98 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c | 5590 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c | 1654 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h | 62 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c | 220 kernel/xenomai-v3.2.4/lib/cobalt/timer.c | 255 kernel/xenomai-v3.2.4/lib/psos/rn.c | 343 kernel/xenomai-v3.2.4/lib/alchemy/queue.c | 1198 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h | 73 kernel/xenomai-v3.2.4/testsuite/smokey/Makefile.am | 119 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c | 70 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile | 5 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c | 2341 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c | 831 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c | 443 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h | 270 kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile | 1 kernel/arch/x86/include/asm/apic.h | 7 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/Makefile.am | 2 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/alarm-1.c | 88 kernel/xenomai-v3.2.4/lib/boilerplate/setup.c | 737 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c | 42 kernel/include/xenomai/cobalt/kernel/registry.h | 1 kernel/drivers/xenomai/net/stack/ipv4/protocol.c | 1 kernel/xenomai-v3.2.4/include/boilerplate/setup.h | 120 kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.h | 37 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c | 106 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h | 44 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools | 117 kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.c | 107 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig | 16 kernel/arch/arm64/kernel/fpsimd.c | 194 kernel/arch/x86/kernel/cpu/mtrr/generic.c | 12 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h | 1 kernel/arch/x86/lib/usercopy.c | 2 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c | 1 kernel/arch/arm64/kernel/smp.c | 107 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h | 39 kernel/kernel/xenomai/pipeline/sched.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h | 97 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h | 31 kernel/include/xenomai/cobalt/uapi/time.h | 1 kernel/kernel/irq/debug.h | 2 kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h | 136 kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile | 1 kernel/xenomai-v3.2.4/kernel/cobalt/time.c | 38 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/time.h | 16 kernel/arch/arm64/kernel/signal.c | 38 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h | 387 kernel/kernel/debug/debug_core.c | 19 kernel/include/linux/rcupdate.h | 14 kernel/arch/arm64/include/asm/uaccess.h | 8 kernel/xenomai-v3.2.4/include/boilerplate/debug.h | 142 kernel/include/xenomai/cobalt/kernel/sched-tp.h | 1 kernel/xenomai-v3.2.4/include/trank/trank.h | 57 kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h | 61 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c | 271 kernel/xenomai-v3.2.4/lib/trank/internal.c | 99 kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h | 1 kernel/drivers/xenomai/analogy/intel/Makefile | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h | 141 kernel/xenomai-v3.2.4/utils/can/README | 150 kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h | 174 kernel/xenomai-v3.2.4/include/smokey/Makefile.am | 3 kernel/drivers/xenomai/net/drivers/tulip/21142.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h | 34 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h | 1 kernel/xenomai-v3.2.4/include/alchemy/mutex.h | 102 kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c | 497 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c | 84 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-1.c | 113 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c | 37 kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c | 354 kernel/xenomai-v3.2.4/lib/cobalt/cobalt.wrappers | 120 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/event-1.c | 97 kernel/xenomai-v3.2.4/lib/trank/internal.h | 54 kernel/drivers/xenomai/net/stack/packet/af_packet.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c | 544 kernel/drivers/xenomai/net/addons/Makefile | 1 kernel/include/xenomai/cobalt/kernel/select.h | 1 kernel/xenomai-v3.2.4/utils/autotune/Makefile.am | 17 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h | 101 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c | 351 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h | 45 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-6.c | 107 kernel/xenomai-v3.2.4/include/vxworks/intLib.h | 39 kernel/xenomai-v3.2.4/include/vxworks/semLib.h | 63 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h | 32 kernel/drivers/gpio/gpio-zynq.c | 4 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c | 1 kernel/xenomai-v3.2.4/scripts/make-release.sh | 35 kernel/kernel/time/clockevents.c | 77 kernel/drivers/xenomai/gpio/gpio-omap.c | 1 kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c | 1 kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h | 42 kernel/kernel/xenomai/posix/gen-syscall-entries.sh | 1 kernel/xenomai-v3.2.4/lib/psos/rn.h | 49 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h | 99 kernel/xenomai-v3.2.4/config/Makefile.am | 7 kernel/kernel/xenomai/posix/syscall_entries.h | 232 kernel/drivers/xenomai/net/stack/eth.c | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h | 1 kernel/drivers/xenomai/net/drivers/pcnet32.c | 1 kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-3.c | 115 kernel/drivers/xenomai/can/rtcan_list.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c | 680 kernel/xenomai-v3.2.4/include/alchemy/alarm.h | 86 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c | 994 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h | 59 kernel/arch/arm64/include/asm/irqflags.h | 46 kernel/include/asm-generic/cmpxchg-local.h | 8 kernel/include/xenomai/cobalt/uapi/corectl.h | 1 kernel/kernel/irq/irqptorture.c | 254 kernel/include/uapi/asm-generic/dovetail.h | 7 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c | 344 kernel/drivers/xenomai/net/stack/rtskb.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c | 139 kernel/xenomai-v3.2.4/lib/psos/task.h | 76 kernel/xenomai-v3.2.4/lib/psos/task.c | 763 kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h | 19 kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c | 96 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c | 1657 kernel/drivers/xenomai/net/drivers/tulip/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c | 256 kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h | 28 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing | 117 kernel/fs/udf/truncate.c | 48 kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am | 43 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c | 1607 kernel/include/xenomai/rtdm/gpiopwm.h | 1 kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h | 88 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h | 51 kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.h | 18 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h | 161 kernel/xenomai-v3.2.4/include/boilerplate/atomic.h | 89 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h | 1 kernel/xenomai-v3.2.4/include/cobalt/fcntl.h | 44 kernel/drivers/xenomai/net/drivers/e1000e/82571.c | 1 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/buffer-1.c | 108 kernel/drivers/xenomai/gpio/gpio-bcm2835.c | 1 kernel/xenomai-v3.2.4/include/copperplate/reference.h | 101 kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.c | 35 kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl | 5 kernel/kernel/xenomai/select.c | 1 kernel/kernel/irq/msi.c | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c | 648 kernel/fs/eventfd.c | 10 kernel/include/xenomai/rtdm/rtdm.h | 1 kernel/xenomai-v3.2.4/lib/vxworks/COPYING | 458 kernel/xenomai-v3.2.4/lib/vxworks/taskInfo.c | 175 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h | 56 kernel/drivers/xenomai/net/drivers/e1000e/param.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/Makefile.am | 5 kernel/include/xenomai/rtdm/uapi/serial.h | 1 kernel/arch/arm/vfp/vfpmodule.c | 54 kernel/xenomai-v3.2.4/utils/net/nomaccfg.c | 109 kernel/xenomai-v3.2.4/testsuite/switchtest/Makefile.am | 18 kernel/kernel/irq/chip.c | 269 kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc | 80 kernel/arch/arm64/kernel/debug-monitors.c | 2 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h | 93 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/tsc.h | 38 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h | 99 kernel/xenomai-v3.2.4/lib/alchemy/init.c | 135 kernel/xenomai-v3.2.4/lib/alchemy/alarm.h | 43 kernel/xenomai-v3.2.4/include/cobalt/semaphore.h | 65 kernel/xenomai-v3.2.4/include/rtdm/net.h | 38 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169 | 42 kernel/xenomai-v3.2.4/lib/analogy/internal.h | 58 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c | 158 kernel/drivers/xenomai/testing/rtdmtest.c | 1 kernel/arch/x86/kernel/cpu/acrn.c | 3 kernel/xenomai-v3.2.4/lib/alchemy/alarm.c | 411 kernel/mm/vmalloc.c | 6 kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig | 56 kernel/arch/arm64/include/asm/vdso.h | 5 kernel/kernel/trace/trace_functions_graph.c | 8 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c | 249 kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c | 1 kernel/xenomai-v3.2.4/lib/analogy/async.c | 471 kernel/arch/x86/xen/enlighten_hvm.c | 3 kernel/xenomai-v3.2.4/utils/chkkconf/kconf-checklist | 51 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h | 293 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c | 963 kernel/kernel/kthread.c | 4 kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/Makefile.am | 10 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h | 30 kernel/drivers/xenomai/serial/16550A_pnp.h | 1 kernel/include/xenomai/cobalt/kernel/sched-idle.h | 1 kernel/include/xenomai/rtdm/uapi/rtdm.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h | 66 kernel/kernel/irq/resend.c | 8 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c | 168 kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h | 908 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/features.c | 24 kernel/xenomai-v3.2.4/testsuite/clocktest/clocktest.c | 395 kernel/drivers/xenomai/net/stack/packet/Kconfig | 1 kernel/xenomai-v3.2.4/lib/smokey/Makefile.am | 13 kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h | 23 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h | 1 kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc | 70 kernel/arch/x86/entry/entry_64.S | 5 kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile | 5 kernel/include/linux/hardirq.h | 18 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h | 85 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c | 81 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-1.c | 103 kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h | 1 kernel/drivers/xenomai/net/drivers/e1000e/e1000.h | 1 kernel/arch/arm64/kernel/traps.c | 36 kernel/drivers/xenomai/can/rtcan_virt.c | 1 kernel/include/xenomai/rtdm/analogy/context.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h | 14 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-2.c | 104 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h | 148 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c | 127 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c | 393 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/features.h | 27 kernel/xenomai-v3.2.4/lib/copperplate/internal.c | 298 kernel/include/asm-generic/percpu.h | 24 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-heapmem.c | 118 kernel/drivers/soc/qcom/smp2p.c | 1 kernel/xenomai-v3.2.4/include/cobalt/wrappers.h | 55 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile | 5 kernel/include/linux/dw_apb_timer.h | 2 kernel/include/linux/poll.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c | 1430 kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.h | 56 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h | 40 kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.c | 177 kernel/xenomai-v3.2.4/utils/net/rtping.c | 183 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c | 442 kernel/drivers/xenomai/analogy/national_instruments/Makefile | 1 kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/vdso-access.c | 31 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h | 81 kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h | 68 kernel/xenomai-v3.2.4/include/cobalt/time.h | 77 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c | 1603 kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c | 1 kernel/include/xenomai/cobalt/uapi/kernel/synch.h | 1 kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c | 384 kernel/xenomai-v3.2.4/lib/analogy/sync.c | 426 kernel/include/xenomai/cobalt/uapi/cond.h | 1 kernel/kernel/xenomai/bufd.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h | 34 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h | 109 kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig | 1 kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h | 1 kernel/include/linux/tracepoint.h | 6 kernel/xenomai-v3.2.4/include/cobalt/unistd.h | 44 kernel/xenomai-v3.2.4/testsuite/smokey/bufp/bufp.c | 174 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h | 1 kernel/include/xenomai/rtdm/analogy/instruction.h | 1 kernel/kernel/time/tick-proxy.c | 466 kernel/xenomai-v3.2.4/include/cobalt/sys/select.h | 38 kernel/modules-only.symvers | 8 kernel/include/xenomai/cobalt/kernel/vfile.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/Makefile | 27 kernel/drivers/dma/dmaengine.c | 10 kernel/drivers/xenomai/udd/udd.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/Makefile.am | 9 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/timerfd.c | 389 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c | 1 kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h | 108 kernel/drivers/xenomai/autotune/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile | 5 kernel/xenomai-v3.2.4/testsuite/spitest/spitest.c | 466 kernel/arch/arm64/include/asm/daifflags.h | 14 kernel/drivers/xenomai/gpiopwm/Makefile | 1 kernel/drivers/xenomai/analogy/testing/fake.c | 1 kernel/arch/arm64/include/asm/vdso/gettimeofday.h | 65 kernel/arch/arm/include/asm/irqflags.h | 52 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h | 42 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h | 435 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile | 8 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c | 567 kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile | 6 kernel/xenomai-v3.2.4/scripts/Makefile.am | 30 kernel/drivers/base/regmap/regmap.c | 41 kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h | 581 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile | 9 kernel/arch/x86/kernel/asm-offsets.c | 3 kernel/drivers/xenomai/net/drivers/tulip/pnic.c | 1 kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/features.h | 30 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c | 839 kernel/drivers/xenomai/spi/spi-master.h | 1 kernel/include/xenomai/rtdm/autotune.h | 1 kernel/kernel/trace/trace.h | 7 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h | 1 kernel/kernel/trace/trace.c | 16 kernel/drivers/tty/serial/imx.c | 48 kernel/drivers/xenomai/spi/spi-master.c | 1 kernel/include/dovetail/irq.h | 11 kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h | 55 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-10.c | 72 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h | 1 kernel/include/xenomai/cobalt/uapi/kernel/thread.h | 1 kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h | 64 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/time.h | 16 kernel/kernel/xenomai/map.c | 1 kernel/include/xenomai/pipeline/inband_work.h | 1 kernel/xenomai-v3.2.4/lib/alchemy/COPYING | 458 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h | 133 kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c | 667 kernel/xenomai-v3.2.4/include/boilerplate/libc.h | 296 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile | 8 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig | 14 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c | 389 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/Makefile.am | 13 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h | 27 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-5.c | 69 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/Makefile.am | 10 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h | 27 kernel/include/xenomai/cobalt/uapi/signal.h | 1 kernel/net/Kconfig | 3 kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h | 23 kernel/arch/arm64/kernel/entry.S | 62 kernel/include/linux/mm.h | 1 kernel/xenomai-v3.2.4/lib/alchemy/mutex.c | 521 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/Makefile | 43 kernel/xenomai-v3.2.4/lib/vxworks/lstLib.c | 121 kernel/include/net/netoob.h | 17 kernel/xenomai-v3.2.4/include/copperplate/eventobj.h | 108 kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h | 23 kernel/xenomai-v3.2.4/lib/alchemy/mutex.h | 42 kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/Makefile.am | 9 kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h | 55 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h | 105 kernel/include/linux/stop_machine.h | 3 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c | 42 kernel/xenomai-v3.2.4/lib/Makefile.am | 32 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h | 15 kernel/include/xenomai/cobalt/uapi/kernel/types.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c | 99 kernel/xenomai-v3.2.4/include/vxworks/taskLib.h | 119 kernel/include/xenomai/rtdm/uapi/testing.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c | 340 kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h | 177 kernel/xenomai-v3.2.4/lib/vxworks/reference.h | 22 kernel/xenomai-v3.2.4/lib/trank/Makefile.am | 21 kernel/kernel/xenomai/init.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/ticks.c | 134 kernel/kernel/xenomai/sched-tp.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c | 1201 kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h | 62 kernel/xenomai-v3.2.4/testsuite/gpiotest/Makefile.am | 19 kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c | 259 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h | 19 kernel/drivers/xenomai/net/stack/packet/Makefile | 1 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/target.h | 30 kernel/include/linux/spinlock_types.h | 154 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING | 281 kernel/drivers/pinctrl/qcom/pinctrl-msm.c | 5 kernel/drivers/xenomai/spi/spi-bcm2835.c | 1 kernel/arch/x86/kernel/cpu/mce/amd.c | 7 kernel/xenomai-v3.2.4/testsuite/smokey/iddp/iddp.c | 178 kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc | 78 kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h | 79 kernel/kernel/xenomai/Makefile | 1 kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c | 1 kernel/include/dovetail/netdevice.h | 13 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c | 394 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h | 122 kernel/drivers/xenomai/Kconfig | 1 kernel/include/vdso/datapage.h | 25 kernel/kernel/panic.c | 26 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h | 58 kernel/xenomai-v3.2.4/lib/analogy/range.c | 638 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec | 469 kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h | 32 kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/Makefile.am | 8 kernel/include/net/sock.h | 3 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c | 457 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c | 439 kernel/drivers/xenomai/analogy/sensoray/Makefile | 1 kernel/arch/arm64/include/dovetail/irq.h | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c | 686 kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am | 10 kernel/include/xenomai/rtdm/compat.h | 1 kernel/lib/Kconfig.debug | 53 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h | 86 kernel/drivers/xenomai/can/Makefile | 1 kernel/drivers/xenomai/net/stack/include/rtnet_socket.h | 1 kernel/arch/arm/mm/alignment.c | 21 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c | 2039 kernel/arch/arm64/Kconfig | 4 kernel/drivers/xenomai/net/stack/include/ipv4/arp.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/posix-mutex.c | 1130 kernel/init/Kconfig | 63 kernel/arch/x86/kvm/x86.c | 106 kernel/kernel/irq/dummychip.c | 4 kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile | 38 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h | 28 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c | 392 kernel/xenomai-v3.2.4/lib/alchemy/Makefile.am | 53 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h | 39 kernel/drivers/xenomai/net/drivers/experimental/3c59x.c | 1 kernel/arch/arm/kernel/smp.c | 121 kernel/xenomai-v3.2.4/include/rtdm/autotune.h | 26 kernel/drivers/irqchip/irq-sun4i.c | 2 kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h | 77 kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c | 976 kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am | 76 kernel/drivers/xenomai/net/drivers/macb.c | 1 kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c | 108 kernel/xenomai-v3.2.4/include/trank/native/mutex.h | 23 kernel/include/xenomai/cobalt/uapi/kernel/pipe.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h | 94 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c | 43 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h | 427 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h | 27 kernel/include/uapi/asm-generic/fcntl.h | 9 kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c | 25 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h | 205 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c | 305 kernel/drivers/xenomai/can/rtcan_flexcan.c | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h | 1 kernel/include/asm-generic/cmpxchg.h | 16 kernel/xenomai-v3.2.4/scripts/maint/test-xeno-test.rb | 301 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h | 61 kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h | 1 kernel/scripts/mkcompile_h | 6 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c | 1274 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h | 1498 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c | 1999 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c | 68 kernel/xenomai-v3.2.4/lib/boilerplate/avl.c | 778 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h | 1397 kernel/xenomai-v3.2.4/utils/net/Makefile.am | 16 kernel/include/xenomai/cobalt/kernel/schedparam.h | 1 kernel/xenomai-v3.2.4/lib/copperplate/internal.h | 179 kernel/include/xenomai/cobalt/kernel/synch.h | 1 kernel/xenomai-v3.2.4/include/cobalt/tunables.h | 80 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h | 625 kernel/xenomai-v3.2.4/include/boilerplate/obstack.h | 515 kernel/xenomai-v3.2.4/utils/net/rtifconfig.c | 440 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c | 484 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h | 60 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-3.c | 48 kernel/kernel/xenomai/rtdm/device.c | 1 kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc | 155 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-1.c | 34 kernel/kernel/xenomai/heap.c | 1 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-3.c | 111 kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h | 1 kernel/kernel/xenomai/thread.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/setsched/Makefile.am | 9 kernel/xenomai-v3.2.4/utils/ps/Makefile.am | 7 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/time.h | 16 kernel/arch/arm64/configs/rockchip_linux_defconfig.rej | 32 kernel/kernel/irq/generic-chip.c | 2 kernel/drivers/misc/Makefile | 1 kernel/xenomai-v3.2.4/include/trank/native/event.h | 81 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig | 10 kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c | 408 kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile | 8 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h | 1 kernel/include/trace/events/cobalt-rtdm.h | 1 kernel/arch/x86/kernel/irq_work.c | 3 kernel/drivers/clocksource/Kconfig | 9 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c | 146 kernel/xenomai-v3.2.4/lib/psos/reference.h | 22 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c | 1192 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c | 87 kernel/include/linux/net.h | 1 kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts | 12 kernel/drivers/xenomai/net/stack/ipv4/icmp.c | 1 kernel/drivers/xenomai/can/rtcan_raw_filter.c | 1 kernel/kernel/xenomai/pipeline/init.c | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h | 113 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c | 199 kernel/xenomai-v3.2.4/testsuite/xeno-test/dohell | 96 kernel/kernel/exit.c | 2 kernel/xenomai-v3.2.4/scripts/histo.gp | 22 kernel/xenomai-v3.2.4/scripts/xeno.in | 17 kernel/lib/atomic64.c | 24 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/rng-1.c | 213 kernel/arch/x86/kernel/tsc_sync.c | 4 kernel/include/xenomai/rtdm/net.h | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c | 1 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c | 1073 kernel/xenomai-v3.2.4/configure.ac | 1062 kernel/xenomai-v3.2.4/lib/cobalt/rtdm.c | 578 kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc | 879 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c | 77 kernel/xenomai-v3.2.4/include/copperplate/registry.h | 154 kernel/drivers/dma/imx-sdma.c | 195 kernel/kernel/trace/ring_buffer.c | 18 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c | 835 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h | 61 kernel/xenomai-v3.2.4/demo/posix/Makefile.am | 8 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c | 2453 kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c | 1 kernel/include/xenomai/pipeline/trace.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h | 764 kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h | 66 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c | 535 kernel/drivers/xenomai/net/drivers/eepro100.c | 1 kernel/arch/x86/kernel/cpu/mce/core.c | 2 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h | 23 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c | 1001 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h | 73 kernel/xenomai-v3.2.4/lib/smokey/COPYING | 458 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h | 116 kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/sched-tp.c | 244 kernel/drivers/spi/spi-bcm2835.c | 99 kernel/include/xenomai/cobalt/kernel/timer.h | 1 kernel/arch/arm64/configs/rockchip_defconfig | 5 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c | 9094 + kernel/include/xenomai/cobalt/kernel/pipe.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile | 8 kernel/drivers/irqchip/irq-bcm2836.c | 3 kernel/include/linux/spinlock_api_up.h | 18 kernel/xenomai-v3.2.4/lib/alchemy/internal.c | 51 kernel/include/linux/printk.h | 17 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h | 3454 kernel/arch/x86/kernel/apic/x2apic_cluster.c | 4 kernel/kernel/locking/Makefile | 1 kernel/xenomai-v3.2.4/lib/alchemy/internal.h | 93 kernel/include/linux/entry-common.h | 22 kernel/include/trace/events/cobalt-core.h | 1 kernel/drivers/pinctrl/samsung/pinctrl-exynos.c | 23 kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h | 98 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README | 3 kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile.am | 5 kernel/drivers/xenomai/net/stack/rtcfg/Kconfig | 1 kernel/xenomai-v3.2.4/include/cobalt/stdio.h | 129 kernel/xenomai-v3.2.4/utils/net/tdma.conf | 39 kernel/xenomai-v3.2.4/include/trank/native/task.h | 58 kernel/xenomai-v3.2.4/include/alchemy/compat.h | 23 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h | 64 kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.h | 260 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h | 1 kernel/xenomai-v3.2.4/lib/boilerplate/debug.c | 185 kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.c | 1282 kernel/arch/Kconfig | 3 kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h | 24 kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h | 74 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h | 51 kernel/xenomai-v3.2.4/lib/copperplate/Makefile.am | 70 kernel/arch/arm/include/asm/syscall.h | 5 kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc | 106 kernel/include/xenomai/rtdm/uapi/gpio.h | 1 kernel/xenomai-v3.2.4/config/version-label | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig | 9 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/syscall.h | 104 kernel/arch/x86/kernel/cpu/mce/therm_throt.c | 6 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c | 940 kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h | 43 kernel/drivers/xenomai/net/drivers/e1000/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c | 421 kernel/xenomai-v3.2.4/utils/chkkconf/Makefile.am | 11 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h | 431 kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h | 120 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/features.c | 102 kernel/drivers/xenomai/net/drivers/igb/igb_main.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c | 72 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig | 41 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c | 451 kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c | 638 kernel/arch/x86/include/asm/uaccess.h | 2 kernel/kernel/xenomai/procfs.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/wrappers.c | 574 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h | 12 kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig | 3 kernel/kernel/sched/idle.c | 22 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c | 327 kernel/kernel/xenomai/procfs.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h | 82 kernel/include/xenomai/cobalt/kernel/sched-weak.h | 1 kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile | 8 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c | 1 kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h | 1 kernel/kernel/xenomai/posix/event.h | 1 kernel/kernel/xenomai/posix/sem.h | 1 kernel/xenomai-v3.2.4/include/copperplate/semobj.h | 88 kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h | 65 kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules | 2 kernel/drivers/xenomai/net/stack/rtdev.c | 1 kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h | 905 kernel/xenomai-v3.2.4/lib/cobalt/semaphore.c | 654 kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in | 186 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-7.c | 116 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h | 37 kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h | 52 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/README | 4 kernel/kernel/time/tick-internal.h | 15 kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc | 85 kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h | 56 kernel/xenomai-v3.2.4/utils/corectl/Makefile.am | 17 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig | 8 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h | 1 kernel/arch/x86/kernel/apic/msi.c | 12 kernel/kernel/xenomai/posix/cond.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h | 147 kernel/drivers/clocksource/bcm2835_timer.c | 27 kernel/xenomai-v3.2.4/include/rtdm/testing.h | 59 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h | 1018 kernel/kernel/xenomai/posix/mutex.h | 1 kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c | 504 kernel/drivers/xenomai/net/stack/Kconfig | 1 kernel/kernel/xenomai/posix/event.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h | 33 kernel/kernel/xenomai/posix/sem.c | 1 kernel/kernel/xenomai/posix/mutex.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile | 5 kernel/include/xenomai/pipeline/clock.h | 1 kernel/kernel/xenomai/posix/cond.h | 1 kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf | 12 kernel/xenomai-v3.2.4/include/trank/native/alarm.h | 38 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c | 258 kernel/arch/x86/kvm/emulate.c | 65 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h | 39 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c | 186 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c | 78 kernel/xenomai-v3.2.4/testsuite/smokey/leaks/Makefile.am | 10 kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c | 4 kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c | 257 kernel/drivers/dma/virt-dma.c | 122 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac | 341 kernel/xenomai-v3.2.4/testsuite/smokey/bufp/Makefile.am | 10 kernel/drivers/dma/virt-dma.h | 127 kernel/xenomai-v3.2.4/kernel/cobalt/debug.h | 72 kernel/include/xenomai/pipeline/sched.h | 1 kernel/kernel/xenomai/pipeline/kevents.c | 1 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-1.c | 49 kernel/xenomai-v3.2.4/kernel/cobalt/debug.c | 657 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c | 105 kernel/xenomai-v3.2.4/include/psos/Makefile.am | 5 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h | 27 kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/sigdebug.c | 302 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h | 299 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README | 3 kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h | 55 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h | 75 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c | 1328 kernel/arch/x86/kernel/smpboot.c | 9 kernel/arch/arm/kernel/process.c | 26 kernel/drivers/xenomai/analogy/proc.h | 1 kernel/xenomai-v3.2.4/utils/analogy/cmd_bits.c | 279 kernel/kernel/rcu/tree.c | 31 kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c | 560 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h | 207 kernel/kernel/irq/internals.h | 2 kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am | 8 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h | 68 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h | 809 kernel/arch/x86/kernel/fpu/core.c | 111 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h | 45 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h | 78 kernel/xenomai-v3.2.4/config/INSTALL | 229 kernel/xenomai-v3.2.4/include/cobalt/ticks.h | 83 kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c | 215 kernel/include/xenomai/cobalt/kernel/map.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h | 40 kernel/arch/x86/kernel/apic/ipi.c | 32 kernel/include/xenomai/pipeline/thread.h | 1 kernel/include/xenomai/pipeline/wrappers.h | 1 kernel/arch/arm/vdso/datapage.S | 11 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopen.c | 44 kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h | 72 kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c | 298 kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h | 38 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h | 84 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile | 5 kernel/xenomai-v3.2.4/include/cobalt/Makefile.am | 27 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig | 8 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-malloc.c | 192 kernel/xenomai-v3.2.4/include/psos/psos.h | 376 kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h | 38 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c | 2235 kernel/include/xenomai/rtdm/uapi/ipc.h | 1 kernel/kernel/rcu/tree_plugin.h | 2 kernel/drivers/xenomai/gpiopwm/Kconfig | 1 kernel/xenomai-v3.2.4/doc/install.rules | 56 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h | 42 kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/Makefile.am | 9 kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h | 1 kernel/xenomai-v3.2.4/include/rtdm/spi.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig | 39 kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h | 1 kernel/drivers/irqchip/exynos-combiner.c | 7 kernel/xenomai-v3.2.4/lib/copperplate/eventobj.c | 337 kernel/xenomai-v3.2.4/lib/vxworks/rngLib.c | 198 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-1.c | 161 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h | 206 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-7.c | 64 kernel/xenomai-v3.2.4/lib/vxworks/rngLib.h | 32 kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c | 550 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig | 4 kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.h | 85 kernel/xenomai-v3.2.4/lib/copperplate/regd/regd.c | 560 kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.c | 886 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h | 36 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h | 59 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/Makefile.am | 10 kernel/arch/arm64/kernel/asm-offsets.c | 1 kernel/include/xenomai/cobalt/kernel/tree.h | 1 kernel/kernel/trace/trace_irqsoff.c | 11 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h | 119 kernel/arch/x86/kernel/irq_pipeline.c | 387 kernel/include/xenomai/cobalt/kernel/lock.h | 1 kernel/include/xenomai/cobalt/uapi/event.h | 1 kernel/include/xenomai/cobalt/kernel/init.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/posix-select.c | 147 kernel/kernel/xenomai/posix/signal.c | 1 kernel/drivers/xenomai/serial/16550A_pci.h | 1 kernel/kernel/xenomai/posix/signal.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h | 49 kernel/drivers/xenomai/analogy/intel/8255.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h | 127 kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h | 128 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h | 63 kernel/xenomai-v3.2.4/lib/psos/tm.c | 352 kernel/mm/memory.c | 18 kernel/include/xenomai/cobalt/kernel/trace.h | 1 kernel/xenomai-v3.2.4/lib/vxworks/Makefile.am | 47 kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.c | 395 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile | 10 kernel/xenomai-v3.2.4/include/cobalt/arith.h | 45 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h | 60 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/Makefile.am | 5 kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.h | 45 kernel/drivers/xenomai/analogy/intel/8255.h | 1 kernel/arch/x86/include/asm/fpu/api.h | 25 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c | 331 kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile | 18 kernel/kernel/xenomai/pipeline/tick.c | 1 kernel/arch/x86/kernel/apic/apic.c | 70 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c | 127 kernel/kernel/xenomai/posix/sched.c | 1 kernel/kernel/xenomai/posix/sched.h | 1 kernel/drivers/xenomai/net/drivers/e1000/kcompat.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c | 522 kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h | 145 kernel/drivers/gpio/gpio-pl061.c | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h | 34 kernel/xenomai-v3.2.4/utils/analogy/wf_generate.c | 251 kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh | 32 kernel/drivers/xenomai/net/stack/rtmac/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h | 240 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h | 45 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h | 75 kernel/include/uapi/linux/clocksource.h | 33 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c | 691 kernel/drivers/xenomai/can/rtcan_internal.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c | 360 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile | 19 kernel/drivers/xenomai/can/rtcan_socket.h | 1 kernel/xenomai-v3.2.4/demo/posix/cyclictest/README | 5 kernel/drivers/xenomai/can/rtcan_socket.c | 1 kernel/vmlinux.symvers | 464 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec | 597 kernel/arch/arm/kernel/entry-armv.S | 52 kernel/xenomai-v3.2.4/lib/copperplate/reference.c | 57 kernel/drivers/xenomai/analogy/testing/loop.c | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h | 23 kernel/drivers/iio/industrialio-trigger.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/Makefile.am | 2 kernel/include/xenomai/rtdm/analogy/buffer.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h | 280 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/tsc.h | 54 kernel/include/xenomai/rtdm/gpio.h | 1 kernel/drivers/irqchip/irq-gic-v3.c | 6 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h | 78 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h | 121 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c | 2891 kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c | 1 kernel/drivers/xenomai/testing/heapcheck.c | 1 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c | 1 kernel/drivers/xenomai/analogy/sensoray/s526.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c | 299 kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/posix-cond.c | 788 kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h | 743 kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h | 42 kernel/arch/x86/kernel/Makefile | 1 kernel/drivers/xenomai/net/stack/rtnet_module.c | 1 kernel/net/sched/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h | 275 kernel/kernel/xenomai/posix/internal.h | 1 kernel/tools/perf/trace/beauty/include/linux/socket.h | 3 kernel/xenomai-v3.2.4/lib/cobalt/modechk.wrappers | 2 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h | 62 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h | 139 kernel/xenomai-v3.2.4/include/trank/native/cond.h | 23 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c | 1 kernel/drivers/xenomai/net/stack/ipv4/af_inet.c | 1 kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig | 8 kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h | 178 kernel/kernel/irq/Kconfig | 13 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h | 96 kernel/kernel/xenomai/sched-idle.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h | 35 kernel/xenomai-v3.2.4/lib/psos/testsuite/pt-1.c | 50 kernel/xenomai-v3.2.4/include/alchemy/event.h | 130 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h | 428 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README | 3 kernel/drivers/xenomai/testing/Kconfig | 1 kernel/include/xenomai/rtdm/uapi/autotune.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c | 588 kernel/xenomai-v3.2.4/lib/psos/tm.h | 39 kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c | 1145 kernel/xenomai-v3.2.4/testsuite/smokey/gdb/Makefile.am | 12 kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/Makefile.am | 11 kernel/kernel/irq/handle.c | 9 kernel/drivers/cpuidle/poll_state.c | 2 kernel/xenomai-v3.2.4/lib/trank/native.c | 668 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h | 54 kernel/xenomai-v3.2.4/include/mercury/Makefile.am | 4 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h | 86 kernel/xenomai-v3.2.4/include/copperplate/Makefile.am | 19 kernel/xenomai-v3.2.4/include/rtdm/ipc.h | 26 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig | 12 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-2.c | 108 kernel/xenomai-v3.2.4/testsuite/smokey/leaks/leaks.c | 286 kernel/include/asm-generic/xenomai/ipipe/thread.h | 1 kernel/xenomai-v3.2.4/include/boilerplate/private-list.h | 217 kernel/include/linux/dovetail.h | 325 kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile | 4 kernel/drivers/xenomai/net/drivers/e1000e/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig | 6 kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 1 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c | 1 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-3.c | 42 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libposix-test.c | 32 kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h | 551 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopentest.c | 79 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h | 84 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c | 571 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h | 23 kernel/kernel/xenomai/posix/clock.h | 1 kernel/kernel/Kconfig.dovetail | 23 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h | 81 kernel/drivers/Makefile | 2 kernel/drivers/xenomai/can/sja1000/Makefile | 1 kernel/include/linux/irqstage.h | 398 kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile | 16 kernel/drivers/xenomai/net/drivers/igb/Makefile | 1 kernel/arch/x86/include/asm/special_insns.h | 4 kernel/xenomai-v3.2.4/include/cobalt/pthread.h | 180 kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h | 167 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.c | 836 kernel/kernel/xenomai/posix/clock.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c | 5676 kernel/xenomai-v3.2.4/lib/analogy/root_leaf.h | 54 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile | 24 kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h | 56 kernel/xenomai-v3.2.4/include/trank/native/sem.h | 23 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h | 307 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h | 51 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.h | 358 kernel/arch/x86/kernel/process_64.c | 32 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c | 2095 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c | 674 kernel/include/linux/tick.h | 8 kernel/include/xenomai/cobalt/uapi/kernel/vdso.h | 1 kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c | 1 kernel/xenomai-v3.2.4/lib/psos/queue.c | 507 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile | 5 kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c | 214 kernel/xenomai-v3.2.4/lib/analogy/info.c | 84 kernel/xenomai-v3.2.4/lib/psos/queue.h | 51 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h | 51 kernel/drivers/gpio/gpio-xilinx.c | 26 kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c | 1203 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c | 745 kernel/xenomai-v3.2.4/include/boilerplate/avl.h | 28 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/tsc.h | 48 kernel/xenomai-v3.2.4/lib/psos/init.c | 141 kernel/arch/x86/include/asm/fpu/types.h | 12 kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/posix-fork.c | 36 kernel/include/xenomai/cobalt/uapi/kernel/limits.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c | 1164 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c | 43 kernel/drivers/xenomai/net/Makefile | 1 kernel/xenomai-v3.2.4/testsuite/gpiotest/gpiotest.c | 267 kernel/arch/arm64/include/asm/ptrace.h | 6 kernel/xenomai-v3.2.4/include/alchemy/heap.h | 137 kernel/drivers/xenomai/analogy/subdevice.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h | 124 kernel/xenomai-v3.2.4/testsuite/smokey/gdb/gdb.c | 317 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c | 444 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-2.c | 66 kernel/xenomai-v3.2.4/testsuite/gpiobench/Makefile.am | 18 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c | 198 kernel/kernel/locking/pipeline.c | 231 kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c | 238 kernel/xenomai-v3.2.4/doc/Makefile.am | 6 kernel/arch/arm64/xenomai/Kconfig | 1 kernel/kernel/xenomai/posix/syscall.c | 1 kernel/include/linux/spi/spi.h | 99 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h | 71 kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.h | 150 kernel/Makefile | 2 kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.c | 124 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/Makefile.am | 2 kernel/drivers/xenomai/net/drivers/rt_eth1394.h | 1 kernel/xenomai-v3.2.4/lib/psos/testsuite/rn-1.c | 63 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h | 1417 kernel/kernel/xenomai/posix/syscall.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h | 166 kernel/include/xenomai/cobalt/uapi/monitor.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/Makefile.am | 8 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c | 1 kernel/drivers/xenomai/analogy/national_instruments/pcimio.c | 1 kernel/arch/x86/mm/fault.c | 158 kernel/xenomai-v3.2.4/lib/analogy/math.c | 457 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h | 1 kernel/xenomai-v3.2.4/testsuite/latency/latency.c | 822 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h | 1 kernel/xenomai-v3.2.4/include/xenomai/init.h | 48 kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi | 108 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h | 32 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am | 12 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h | 997 kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/Makefile.am | 9 kernel/xenomai-v3.2.4/include/copperplate/tunables.h | 100 kernel/xenomai-v3.2.4/include/alchemy/cond.h | 98 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-4.c | 71 kernel/xenomai-v3.2.4/lib/cobalt/mq.c | 589 kernel/kernel/irq/proc.c | 3 kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/Makefile.am | 8 kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c | 665 kernel/drivers/misc/atemsys-main/Makefile | 34 kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h | 138 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h | 570 kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c | 1 kernel/xenomai-v3.2.4/include/vxworks/wdLib.h | 50 kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/coreheap.c | 108 kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h | 179 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c | 88 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c | 411 kernel/include/xenomai/rtdm/analogy/driver.h | 1 kernel/arch/arm/include/asm/efi.h | 4 kernel/net/core/net-sysfs.c | 52 kernel/xenomai-v3.2.4/lib/copperplate/init.c | 385 kernel/kernel/xenomai/sched-quota.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c | 1104 kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h | 37 kernel/xenomai-v3.2.4/testsuite/smokey/arith/Makefile.am | 8 kernel/kernel/xenomai/posix/syscall32.h | 1 kernel/kernel/power/Makefile | 2 kernel/include/dovetail/thread_info.h | 13 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/LICENSE | 21 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h | 35 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c | 317 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h | 30 kernel/kernel/xenomai/posix/syscall32.c | 1 kernel/xenomai-v3.2.4/include/boilerplate/lock.h | 224 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/README | 1 kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c | 1 kernel/xenomai-v3.2.4/config/version-code | 1 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c | 1 kernel/xenomai-v3.2.4/include/copperplate/threadobj.h | 589 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h | 68 kernel/drivers/xenomai/analogy/sensoray/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c | 820 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/Makefile.am | 58 kernel/arch/arm/include/asm/ptrace.h | 3 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c | 321 kernel/drivers/memory/omap-gpmc.c | 3 kernel/kernel/xenomai/posix/io.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c | 29 kernel/arch/arm64/kernel/entry-common.c | 152 kernel/kernel/xenomai/posix/io.h | 1 kernel/kernel/trace/ftrace.c | 18 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h | 113 kernel/xenomai-v3.2.4/lib/cobalt/trace.c | 97 kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING | 281 kernel/arch/arm/kernel/raw_printk.c | 30 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-tlsf.c | 121 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c | 1 kernel/drivers/xenomai/net/stack/corectl.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h | 74 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h | 25 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile | 12 kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c | 1 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am | 8 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c | 443 kernel/xenomai-v3.2.4/lib/psos/internal.h | 36 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-2.c | 102 kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h | 1 kernel/drivers/xenomai/net/drivers/loopback.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h | 135 kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.h | 32 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-2.c | 110 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/Makefile.am | 2 kernel/arch/arm64/mm/fault.c | 40 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h | 34 kernel/drivers/xenomai/autotune/autotune.c | 1 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c | 1 kernel/arch/arm/kernel/vdso.c | 51 kernel/arch/arm64/mm/context.c | 11 kernel/xenomai-v3.2.4/lib/psos/README | 51 kernel/arch/arm/kernel/irq.c | 9 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c | 343 kernel/xenomai-v3.2.4/testsuite/smokey/xddp/xddp.c | 264 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h | 67 kernel/xenomai-v3.2.4/include/copperplate/clockobj.h | 263 kernel/xenomai-v3.2.4/include/xenomai/version.h | 34 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c | 63 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h | 23 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h | 23 kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith.c | 124 kernel/arch/arm64/include/dovetail/thread_info.h | 1 kernel/kernel/trace/trace_stack.c | 5 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-3.c | 118 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/Makefile.am | 5 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.h | 173 kernel/include/xenomai/rtdm/analogy/command.h | 1 kernel/drivers/xenomai/net/drivers/tulip/pnic2.c | 1 kernel/drivers/xenomai/gpio/gpio-core.c | 1 kernel/arch/x86/hyperv/hv_init.c | 3 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c | 1 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.c | 380 kernel/drivers/xenomai/analogy/command.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h | 37 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig | 13 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile | 9 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c | 510 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h | 39 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c | 2112 kernel/arch/arm64/kernel/syscall.c | 20 kernel/include/linux/wakeup_reason.h | 2 kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc | 106 kernel/drivers/xenomai/net/stack/ipv4/arp.c | 1 kernel/drivers/xenomai/analogy/driver_facilities.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile | 10 kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.c | 242 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h | 114 kernel/arch/arm/kernel/asm-offsets.c | 4 kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk | 64 kernel/xenomai-v3.2.4/utils/analogy/insn_write.c | 279 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h | 33 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig | 21 kernel/xenomai-v3.2.4/lib/vxworks/taskLib.c | 908 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig | 100 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h | 33 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h | 32 kernel/xenomai-v3.2.4/lib/vxworks/intLib.c | 25 kernel/arch/arm/kernel/Makefile | 5 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h | 1 kernel/xenomai-v3.2.4/lib/vxworks/taskLib.h | 106 kernel/xenomai-v3.2.4/lib/analogy/COPYING | 458 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c | 4423 kernel/xenomai-v3.2.4/kernel/cobalt/thread.c | 2531 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h | 106 kernel/xenomai-v3.2.4/lib/cobalt/sched.c | 649 kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h | 202 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c | 2207 kernel/arch/arm64/xenomai/ipipe/syscall.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile | 6 kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h | 228 kernel/arch/x86/include/asm/irq_pipeline.h | 135 kernel/xenomai-v3.2.4/lib/vxworks/init.c | 105 kernel/drivers/xenomai/analogy/buffer.c | 1 kernel/drivers/xenomai/analogy/intel/Kconfig | 1 kernel/include/linux/regmap.h | 1 kernel/xenomai-v3.2.4/include/cobalt/sched.h | 63 kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h | 1 kernel/include/asm-generic/xenomai/syscall32.h | 1 kernel/include/linux/clocksource.h | 52 kernel/drivers/xenomai/net/stack/socket.c | 1 kernel/drivers/clocksource/dw_apb_timer.c | 40 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c | 364 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile | 19 kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c | 954 kernel/xenomai-v3.2.4/lib/boilerplate/version.c | 55 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h | 83 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c | 651 kernel/arch/x86/include/asm/dovetail.h | 45 kernel/arch/x86/include/asm/idtentry.h | 93 kernel/kernel/xenomai/posix/mqueue.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c | 1188 kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c | 415 kernel/kernel/irq/irqdesc.c | 9 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c | 285 kernel/xenomai-v3.2.4/testsuite/smokey/y2038/Makefile.am | 10 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h | 24 kernel/include/asm-generic/atomic.h | 12 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c | 1733 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h | 77 kernel/drivers/xenomai/net/drivers/via-rhine.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation | 49 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile | 9 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h | 61 kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c | 1 kernel/kernel/xenomai/posix/mqueue.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c | 458 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h | 39 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h | 272 kernel/include/linux/fcntl.h | 2 kernel/arch/x86/kernel/i8259.c | 3 kernel/xenomai-v3.2.4/include/boilerplate/compiler.h | 91 kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h | 554 kernel/include/xenomai/cobalt/kernel/sched-sporadic.h | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h | 461 kernel/drivers/xenomai/ipc/Kconfig | 1 kernel/include/xenomai/pipeline/machine.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy | 1150 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h | 24 kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c | 1 kernel/drivers/xenomai/ipc/internal.h | 1 kernel/xenomai-v3.2.4/scripts/dynlist.ld | 3 kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c | 3 kernel/drivers/xenomai/net/stack/include/ipv4/udp.h | 1 kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c | 1 kernel/include/xenomai/cobalt/kernel/time.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/umm.h | 31 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h | 34 kernel/xenomai-v3.2.4/lib/cobalt/umm.c | 136 kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h | 43 kernel/include/xenomai/rtdm/uapi/spi.h | 1 kernel/arch/arm64/boot/dts/rockchip/rk3588s.dtsi | 0 kernel/kernel/irq/pipeline.c | 1764 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c | 241 kernel/arch/arm64/include/asm/fpsimd.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/features.c | 70 kernel/xenomai-v3.2.4/lib/psos/sem.c | 231 kernel/drivers/gpio/gpio-mxc.c | 3 kernel/xenomai-v3.2.4/kernel/cobalt/map.c | 265 kernel/xenomai-v3.2.4/lib/psos/sem.h | 35 kernel/xenomai-v3.2.4/include/alchemy/task.h | 213 kernel/arch/x86/include/asm/i8259.h | 2 kernel/Documentation/dovetail.rst | 30 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c | 110 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h | 83 kernel/arch/arm/mm/fault.c | 109 kernel/include/linux/xenomai/wrappers.h | 1 kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c | 1 kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c | 1 kernel/drivers/dma/bcm2835-dma.c | 148 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c | 1 kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h | 1 kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c | 186 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h | 132 kernel/drivers/spi/spi.c | 295 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-6.c | 77 kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c | 446 kernel/xenomai-v3.2.4/lib/boilerplate/hash.c | 507 kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc | 573 kernel/xenomai-v3.2.4/utils/slackspot/slackspot.c | 684 kernel/xenomai-v3.2.4/include/alchemy/queue.h | 172 kernel/arch/arm/kernel/patch.c | 2 kernel/xenomai-v3.2.4/testsuite/clocktest/Makefile.am | 18 kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run-wrapper | 22 kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h | 1 kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h | 1 kernel/kernel/locking/lockdep_internals.h | 4 kernel/drivers/pci/controller/dwc/pcie-designware-host.c | 1 kernel/include/xenomai/rtdm/fd.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/printf.c | 919 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c | 1 kernel/drivers/xenomai/can/sja1000/rtcan_isa.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c | 449 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h | 74 kernel/xenomai-v3.2.4/utils/can/rtcanconfig.c | 258 kernel/drivers/xenomai/net/drivers/natsemi.c | 1 kernel/xenomai-v3.2.4/testsuite/smokey/tsc/tsc.c | 186 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h | 28 kernel/xenomai-v3.2.4/scripts/wrap-link.sh | 213 kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h | 157 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h | 1 kernel/xenomai-v3.2.4/include/vxworks/lstLib.h | 148 kernel/arch/x86/kernel/traps.c | 159 kernel/kernel/trace/trace_functions.c | 4 kernel/drivers/mfd/tps65217.c | 1 kernel/drivers/xenomai/analogy/Makefile | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/features.h | 63 kernel/include/linux/context_tracking_state.h | 2 kernel/include/xenomai/rtdm/analogy/device.h | 1 kernel/drivers/xenomai/can/rtcan_raw.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h | 113 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h | 1 kernel/drivers/xenomai/can/rtcan_raw.c | 1 kernel/include/xenomai/rtdm/udd.h | 1 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-2.c | 119 kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h | 80 kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c | 466 kernel/arch/x86/kernel/hpet.c | 10 kernel/arch/arm64/xenomai/ipipe/Makefile | 1 kernel/fs/udf/inode.c | 76 kernel/kernel/xenomai/sched-sporadic.c | 1 kernel/arch/x86/include/asm/tlbflush.h | 8 kernel/xenomai-v3.2.4/testsuite/smokey/iddp/Makefile.am | 10 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h | 75 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h | 62 kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/Makefile.am | 10 kernel/xenomai-v3.2.4/include/trank/rtdk.h | 38 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c | 103 kernel/drivers/xenomai/net/drivers/e1000/e1000.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h | 30 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h | 69 kernel/kernel/time/tick-common.c | 19 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c | 2752 kernel/drivers/xenomai/can/rtcan_raw_dev.c | 1 kernel/drivers/xenomai/analogy/rtdm_helpers.c | 1 kernel/kernel/xenomai/timer.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h | 16 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c | 395 kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h | 10 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c | 2513 kernel/drivers/xenomai/analogy/national_instruments/mite.h | 1 kernel/xenomai-v3.2.4/lib/alchemy/cond.h | 40 kernel/kernel/xenomai/posix/memory.h | 1 kernel/fs/exec.c | 14 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c | 104 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/Makefile.am | 13 kernel/kernel/xenomai/posix/memory.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h | 175 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h | 144 kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c | 1 kernel/drivers/xenomai/analogy/national_instruments/mite.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c | 299 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h | 70 kernel/include/xenomai/cobalt/uapi/kernel/heap.h | 1 kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc | 86 kernel/kernel/xenomai/synch.c | 1 kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-cobalt.c | 153 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h | 16 kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h | 172 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h | 110 kernel/xenomai-v3.2.4/lib/cobalt/attr.c | 148 kernel/kernel/xenomai/rtdm/core.c | 1 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mutex-1.c | 148 kernel/kernel/xenomai/posix/compat.c | 1 kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile | 1 kernel/kernel/xenomai/debug.h | 1 kernel/drivers/xenomai/net/stack/include/rtdev.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c | 2582 kernel/kernel/time/Makefile | 1 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c | 1025 kernel/kernel/xenomai/debug.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig | 25 kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c | 328 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c | 1 kernel/include/linux/preempt.h | 57 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h | 43 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h | 340 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h | 83 kernel/arch/x86/kernel/fpu/signal.c | 29 kernel/include/trace/events/irq.h | 42 kernel/drivers/xenomai/net/stack/include/rtwlan_io.h | 1 kernel/xenomai-v3.2.4/include/trank/native/timer.h | 47 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h | 1 kernel/drivers/xenomai/testing/switchtest.c | 1 kernel/drivers/spi/Kconfig | 11 kernel/include/linux/netdevice.h | 94 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/INCLUDE.policy | 15 kernel/drivers/xenomai/net/drivers/freescale/Makefile | 1 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-9.c | 70 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig | 6 kernel/kernel/ptrace.c | 2 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h | 47 kernel/arch/arm/vfp/vfphw.S | 2 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h | 218 kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h | 881 kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4 | 108 kernel/xenomai-v3.2.4/include/Makefile.am | 31 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h | 649 kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in | 196 kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c | 293 kernel/kernel/entry/common.c | 196 kernel/xenomai-v3.2.4/kernel/drivers/Makefile | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h | 59 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h | 102 kernel/xenomai-v3.2.4/lib/vxworks/tickLib.h | 26 kernel/arch/x86/include/asm/irq_vectors.h | 11 kernel/xenomai-v3.2.4/lib/alchemy/cond.c | 497 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig | 10 kernel/xenomai-v3.2.4/lib/cobalt/parse_vdso.c | 281 kernel/xenomai-v3.2.4/include/copperplate/timerobj.h | 71 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c | 67 kernel/xenomai-v3.2.4/lib/vxworks/tickLib.c | 45 kernel/include/xenomai/cobalt/kernel/compat.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c | 764 kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h | 1 kernel/drivers/xenomai/net/stack/rtnet_chrdev.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile | 6 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README | 58 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c | 215 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/tlsf.c | 123 kernel/drivers/misc/atemsys-main/atemsys.h | 428 kernel/xenomai-v3.2.4/include/COPYING | 305 kernel/drivers/irqchip/irq-imx-irqsteer.c | 3 kernel/drivers/misc/atemsys-main/atemsys.c | 4885 kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c | 221 kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h | 46 kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am | 37 kernel/drivers/xenomai/gpio/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c | 3385 kernel/kernel/signal.c | 12 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c | 798 kernel/xenomai-v3.2.4/lib/vxworks/kernLib.c | 61 kernel/include/xenomai/cobalt/uapi/asm-generic/features.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile | 12 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h | 40 kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h | 35 kernel/arch/arm/include/asm/trace/exceptions.h | 62 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile | 5 kernel/fs/ioctl.c | 16 kernel/drivers/irqchip/irq-gic.c | 5 kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c | 1 kernel/arch/x86/kernel/apic/apic_flat_64.c | 4 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c | 151 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-8.c | 108 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-2.c | 144 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp | 52 kernel/xenomai-v3.2.4/lib/boilerplate/heapmem.c | 728 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/pipe-1.c | 139 kernel/xenomai-v3.2.4/utils/net/rtiwconfig.c | 236 kernel/drivers/clocksource/timer-ti-dm-systimer.c | 29 kernel/kernel/xenomai/rtdm/Makefile | 1 kernel/arch/arm64/xenomai/ipipe/machine.c | 1 kernel/kernel/xenomai/rtdm/internal.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/README | 143 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c | 159 kernel/arch/arm/include/asm/vdso/gettimeofday.h | 60 kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h | 44 kernel/xenomai-v3.2.4/lib/analogy/sys.c | 213 kernel/include/xenomai/version.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c | 3708 kernel/xenomai-v3.2.4/lib/trank/init.c | 37 kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c | 524 kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h | 32 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c | 1 kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-1.c | 61 kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc | 1935 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig | 75 kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am | 18 kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/Makefile.am | 10 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg | 135 kernel/drivers/pinctrl/samsung/pinctrl-samsung.c | 22 kernel/include/xenomai/pipeline/lock.h | 1 kernel/kernel/xenomai/rtdm/wrappers.c | 1 kernel/drivers/xenomai/net/stack/include/rtnet_internal.h | 1 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h | 1 kernel/drivers/pinctrl/samsung/pinctrl-samsung.h | 2 kernel/xenomai-v3.2.4/include/alchemy/sem.h | 108 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h | 1 kernel/xenomai-v3.2.4/lib/copperplate/traceobj.c | 330 kernel/arch/x86/include/asm/syscall.h | 5 kernel/kernel/trace/trace_sched_wakeup.c | 2 kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c | 1 kernel/xenomai-v3.2.4/utils/can/rtcanrecv.c | 324 kernel/xenomai-v3.2.4/include/alchemy/pipe.h | 87 kernel/include/xenomai/rtdm/analogy/transfer.h | 1 kernel/xenomai-v3.2.4/include/vxworks/tickLib.h | 41 kernel/xenomai-v3.2.4/lib/boilerplate/obstack.c | 356 kernel/arch/arm/kernel/smp_twd.c | 7 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile | 5 kernel/include/linux/lockdep.h | 111 kernel/xenomai-v3.2.4/lib/smokey/init.c | 577 kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig | 5 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c | 1 kernel/include/xenomai/rtdm/analogy/channel_range.h | 1 kernel/drivers/xenomai/net/stack/include/rtskb.h | 1 kernel/include/linux/sched/coredump.h | 1 kernel/xenomai-v3.2.4/config/acinclude.m4 | 579 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h | 425 kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/Makefile.am | 10 kernel/drivers/xenomai/can/peak_canfd/Makefile | 1 kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/fpu-stress.c | 95 kernel/xenomai-v3.2.4/utils/ps/rtps.c | 91 kernel/xenomai-v3.2.4/lib/trank/COPYING | 458 kernel/include/xenomai/rtdm/uapi/analogy.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/malloc-nowrap.c | 30 kernel/arch/arm64/kernel/Makefile | 1 kernel/include/xenomai/rtdm/serial.h | 1 kernel/security/selinux/hooks.c | 4 kernel/drivers/xenomai/serial/mpc52xx_uart.c | 1 kernel/drivers/xenomai/net/drivers/8139too.c | 1 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h | 277 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c | 53 kernel/xenomai-v3.2.4/kernel/cobalt/sched.c | 1493 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c | 156 kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig | 72 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c | 5987 kernel/drivers/gpu/ipu-v3/ipu-common.c | 1 kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c | 37 kernel/xenomai-v3.2.4/lib/copperplate/regd/sysregfs.h | 60 kernel/include/xenomai/cobalt/kernel/assert.h | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am | 5 kernel/xenomai-v3.2.4/lib/vxworks/README | 66 kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig | 1 kernel/include/xenomai/cobalt/uapi/mutex.h | 1 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h | 1 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h | 44 kernel/arch/x86/kernel/apic/vector.c | 72 kernel/xenomai-v3.2.4/kernel/cobalt/heap.c | 863 kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules | 2 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c | 894 kernel/arch/x86/kernel/process.c | 16 kernel/xenomai-v3.2.4/Makefile.am | 78 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/Makefile.am | 2 kernel/drivers/xenomai/net/drivers/e1000e/phy.c | 1 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h | 1 kernel/xenomai-v3.2.4/include/rtdm/rtdm.h | 59 kernel/xenomai-v3.2.4/kernel/cobalt/clock.c | 830 kernel/arch/arm64/Makefile | 3 kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in | 879 kernel/xenomai-v3.2.4/include/xenomai/tunables.h | 24 kernel/kernel/sched/wait.c | 2 kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c | 990 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h | 23 kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-mercury.c | 127 kernel/xenomai-v3.2.4/testsuite/switchtest/switchtest.c | 1572 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-1.c | 97 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c | 36 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c | 1 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-7.c | 42 kernel/drivers/tty/serial/amba-pl011.c | 39 kernel/arch/arm64/include/asm/mmu_context.h | 34 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c | 319 kernel/include/linux/sched.h | 10 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c | 2693 kernel/xenomai-v3.2.4/lib/mercury/Makefile.am | 9 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h | 32 kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h | 63 kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h | 1 kernel/xenomai-v3.2.4/lib/copperplate/semobj.c | 354 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c | 1 kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 8 kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c | 392 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c | 803 kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c | 653 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig | 42 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c | 893 kernel/drivers/xenomai/can/sja1000/Kconfig | 1 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h | 1 kernel/include/linux/clockchips.h | 71 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h | 61 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h | 57 kernel/drivers/xenomai/ipc/iddp.c | 1 kernel/arch/arm/include/asm/dovetail.h | 61 kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c | 1132 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c | 131 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c | 44 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap | 39 kernel/drivers/base/regmap/regmap-irq.c | 1 kernel/drivers/xenomai/net/stack/include/rtwlan.h | 1 kernel/xenomai-v3.2.4/scripts/bootstrap | 3 kernel/xenomai-v3.2.4/utils/chkkconf/checkconfig.c | 331 kernel/include/linux/skbuff.h | 69 kernel/drivers/xenomai/gpiopwm/gpiopwm.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c | 455 kernel/arch/arm64/include/asm/dovetail.h | 69 kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h | 24 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c | 80 kernel/kernel/irq/manage.c | 89 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h | 118 kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig | 79 kernel/include/linux/irq_pipeline.h | 145 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-1.c | 151 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h | 1 kernel/xenomai-v3.2.4/include/cobalt/mqueue.h | 74 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h | 22 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h | 23 kernel/include/xenomai/cobalt/kernel/heap.h | 1 kernel/kernel/xenomai/lock.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h | 72 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-4.c | 69 kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c | 1 kernel/kernel/xenomai/pipeline/syscall.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c | 231 kernel/kernel/power/hibernate.c | 3 kernel/xenomai-v3.2.4/scripts/xeno-config-mercury.in | 221 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile | 5 kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h | 360 kernel/drivers/xenomai/analogy/device.c | 1 kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c | 248 kernel/arch/arm/common/mcpm_entry.c | 12 kernel/drivers/xenomai/net/addons/proxy.c | 1 kernel/include/xenomai/pipeline/irq.h | 1 kernel/xenomai-v3.2.4/include/vxworks/kernLib.h | 39 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/Makefile.am | 28 kernel/xenomai-v3.2.4/utils/analogy/insn_bits.c | 227 kernel/drivers/xenomai/net/drivers/freescale/fec_main.c | 1 kernel/include/linux/fs.h | 10 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h | 1361 kernel/xenomai-v3.2.4/include/trank/native/Makefile.am | 16 kernel/xenomai-v3.2.4/include/psos/tunables.h | 49 kernel/xenomai-v3.2.4/lib/alchemy/pipe.c | 675 kernel/include/linux/kernel.h | 8 kernel/lib/smp_processor_id.c | 4 kernel/arch/arm/include/asm/cmpxchg.h | 8 kernel/xenomai-v3.2.4/scripts/xeno-config-cobalt.in | 292 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c | 334 kernel/drivers/xenomai/serial/16550A.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h | 42 kernel/include/xenomai/cobalt/kernel/ppd.h | 1 kernel/xenomai-v3.2.4/include/trank/native/heap.h | 28 kernel/xenomai-v3.2.4/include/rtdm/Makefile.am | 20 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h | 92 kernel/include/xenomai/cobalt/kernel/sched-quota.h | 1 kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c | 329 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec | 44 kernel/drivers/xenomai/net/drivers/freescale/fec.h | 1 kernel/xenomai-v3.2.4/lib/alchemy/pipe.h | 40 kernel/xenomai-v3.2.4/kernel/cobalt/arith.c | 65 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h | 446 kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c | 1 kernel/include/xenomai/cobalt/uapi/kernel/urw.h | 1 kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c | 1985 kernel/drivers/xenomai/net/stack/include/ipv4/route.h | 1 kernel/xenomai-v3.2.4/testsuite/gpiobench/gpiobench.c | 680 kernel/drivers/xenomai/testing/Makefile | 1 kernel/net/core/skbuff.c | 115 kernel/xenomai-v3.2.4/include/vxworks/Makefile.am | 18 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/syscall.h | 133 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h | 51 kernel/arch/arm/mm/cache-l2x0.c | 47 kernel/arch/x86/mm/tlb.c | 50 kernel/kernel/xenomai/sched-weak.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c | 1515 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S | 219 kernel/xenomai-v3.2.4/lib/vxworks/sysLib.c | 53 kernel/arch/arm/kernel/traps.c | 2 kernel/xenomai-v3.2.4/kernel/cobalt/select.c | 461 kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc | 47 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h | 50 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h | 711 kernel/arch/arm64/xenomai/ipipe/thread.c | 1 kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc | 53 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c | 41 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c | 628 kernel/xenomai-v3.2.4/lib/cobalt/cond.c | 689 kernel/kernel/xenomai/posix/timer.c | 1 kernel/lib/vdso/Kconfig | 8 kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc | 63 kernel/kernel/xenomai/posix/timer.h | 1 kernel/include/asm-generic/xenomai/machine.h | 1 kernel/include/linux/kvm_host.h | 55 kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c | 254 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/Makefile.am | 2 kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/heapmem.c | 51 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h | 243 kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h | 346 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c | 267 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h | 33 kernel/kernel/locking/lockdep.c | 213 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h | 226 kernel/modules.builtin.modinfo | 0 kernel/xenomai-v3.2.4/include/copperplate/cluster.h | 308 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h | 34 kernel/xenomai-v3.2.4/lib/copperplate/threadobj.c | 1835 kernel/drivers/xenomai/net/drivers/rt_at91_ether.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/tsc.h | 38 kernel/drivers/xenomai/net/Kconfig | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h | 1 kernel/drivers/gpio/gpio-omap.c | 6 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c | 798 kernel/arch/x86/include/asm/irq_stack.h | 7 kernel/drivers/xenomai/net/stack/ipv4/Kconfig | 1 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-1.c | 32 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h | 38 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/time.h | 16 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h | 1 kernel/net/sched/Kconfig | 23 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c | 52 kernel/kernel/dovetail.c | 450 kernel/xenomai-v3.2.4/lib/boilerplate/init/bootstrap.c | 172 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-3.c | 75 kernel/kernel/xenomai/rtdm/fd.c | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h | 1 kernel/drivers/xenomai/can/Kconfig | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h | 168 kernel/drivers/xenomai/Makefile | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c | 308 kernel/arch/x86/kvm/vmx/vmx.c | 37 kernel/kernel/xenomai/pipeline/intr.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c | 2106 kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h | 24 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h | 101 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h | 1 kernel/include/asm-generic/irq_pipeline.h | 109 kernel/drivers/xenomai/net/drivers/experimental/Kconfig | 1 kernel/arch/arm/include/asm/assembler.h | 16 kernel/drivers/xenomai/net/drivers/e1000e/lib.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h | 1192 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h | 45 kernel/drivers/clocksource/arm_global_timer.c | 41 kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi | 2 kernel/xenomai-v3.2.4/include/cobalt/stdlib.h | 40 kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h | 1 kernel/xenomai-v3.2.4/lib/vxworks/semLib.h | 57 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c | 502 kernel/xenomai-v3.2.4/lib/vxworks/semLib.c | 480 kernel/xenomai-v3.2.4/include/rtdm/serial.h | 79 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile | 13 kernel/include/linux/spinlock_pipeline.h | 387 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile | 16 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c | 66 kernel/kernel/xenomai/posix/Makefile | 1 kernel/xenomai-v3.2.4/config/apirev | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h | 95 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h | 115 kernel/xenomai-v3.2.4/utils/autotune/autotune.c | 338 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c | 1536 kernel/include/xenomai/rtdm/analogy/subdevice.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c | 3184 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig | 23 kernel/drivers/xenomai/analogy/transfer.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h | 39 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am | 17 kernel/drivers/xenomai/can/rtcan_module.c | 1 kernel/arch/arm/include/asm/mmu_context.h | 40 kernel/xenomai-v3.2.4/.gitignore | 17 kernel/arch/arm/kernel/entry-common.S | 79 kernel/drivers/soc/ti/ti_sci_inta_msi.c | 1 kernel/kernel/xenomai/rtdm/drvlib.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h | 490 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h | 33 kernel/drivers/irqchip/irq-gic-v2m.c | 3 kernel/arch/arm/Kconfig | 9 kernel/include/xenomai/cobalt/kernel/sched-rt.h | 1 kernel/xenomai-v3.2.4/testsuite/smokey/tsc/Makefile.am | 8 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h | 603 kernel/xenomai-v3.2.4/include/xenomai/Makefile.am | 6 kernel/drivers/pci/controller/pcie-brcmstb.c | 2 kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run.c | 678 kernel/arch/x86/kernel/dumpstack.c | 12 kernel/drivers/xenomai/net/drivers/eth1394.c | 1 kernel/xenomai-v3.2.4/lib/vxworks/errnoLib.c | 144 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h | 66 kernel/include/dovetail/poll.h | 12 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h | 25 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h | 1 kernel/include/xenomai/linux/stdarg.h | 1 kernel/arch/x86/include/asm/irqflags.h | 71 kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am | 18 kernel/include/xenomai/cobalt/kernel/clock.h | 1 kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc | 217 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile | 7 kernel/include/xenomai/cobalt/kernel/schedqueue.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c | 212 kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am | 43 kernel/kernel/trace/trace_branch.c | 4 kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile | 5 kernel/arch/x86/kernel/cpu/mshyperv.c | 6 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h | 9 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h | 94 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h | 81 kernel/drivers/clocksource/exynos_mct.c | 50 kernel/kernel/trace/trace_clock.c | 4 kernel/xenomai-v3.2.4/lib/alchemy/reference.h | 22 kernel/xenomai-v3.2.4/lib/alchemy/timer.c | 113 kernel/xenomai-v3.2.4/include/alchemy/timer.h | 125 kernel/xenomai-v3.2.4/lib/alchemy/buffer.h | 49 kernel/xenomai-v3.2.4/lib/alchemy/timer.h | 25 kernel/drivers/xenomai/net/drivers/experimental/Makefile | 1 kernel/xenomai-v3.2.4/utils/net/rtcfg.c | 562 kernel/xenomai-v3.2.4/.clang-format | 493 kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/rtdm.c | 202 kernel/kernel/xenomai/Kconfig | 1 kernel/drivers/xenomai/net/drivers/Makefile | 1 kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h | 1 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c | 1 kernel/include/xenomai/cobalt/kernel/arith.h | 1 kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am | 32 kernel/include/asm-generic/xenomai/dovetail/thread.h | 1 kernel/arch/x86/kernel/idt.c | 4 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h | 1 kernel/kernel/irq/cpuhotplug.c | 5 kernel/drivers/xenomai/net/drivers/r8169.c | 1 kernel/drivers/xenomai/net/drivers/Kconfig | 1 kernel/drivers/xenomai/gpio/gpio-zynq7000.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c | 190 kernel/drivers/xenomai/analogy/Kconfig | 1 kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c | 1 kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c | 1 kernel/xenomai-v3.2.4/config/docbook.m4 | 170 kernel/arch/x86/include/asm/fpu/internal.h | 31 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h | 27 kernel/xenomai-v3.2.4/testsuite/smokey/main.c | 53 kernel/drivers/spmi/spmi-pmic-arb.c | 4 kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h | 42 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c | 407 kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-1.c | 149 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/features.h | 30 kernel/kernel/time/vsyscall.c | 31 kernel/xenomai-v3.2.4/include/trank/native/types.h | 25 kernel/xenomai-v3.2.4/lib/alchemy/buffer.c | 953 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c | 1828 kernel/drivers/misc/atemsys-main/COPYING | 339 kernel/include/linux/irq.h | 50 kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h | 135 kernel/xenomai-v3.2.4/include/cobalt/signal.h | 61 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES | 53 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h | 154 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h | 59 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h | 30 kernel/xenomai-v3.2.4/utils/corectl/corectl.c | 153 kernel/drivers/irqchip/irq-sunxi-nmi.c | 4 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h | 142 kernel/kernel/time/clocksource.c | 30 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h | 24 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h | 18 kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h | 106 kernel/drivers/xenomai/testing/timerbench.c | 1 kernel/include/asm-generic/xenomai/pci_ids.h | 1 kernel/xenomai-v3.2.4/lib/copperplate/registry.c | 1022 kernel/include/xenomai/rtdm/uapi/gpiopwm.h | 1 kernel/xenomai-v3.2.4/include/mercury/pthread.h | 21 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-5.c | 107 kernel/xenomai-v3.2.4/lib/cobalt/signal.c | 128 kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/Makefile.am | 10 kernel/kernel/xenomai/registry.c | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h | 38 kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig | 81 kernel/drivers/tty/serial/samsung_tty.c | 4 kernel/drivers/xenomai/analogy/national_instruments/Kconfig | 1 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/Makefile.am | 13 kernel/arch/arm/include/asm/thread_info.h | 20 kernel/xenomai-v3.2.4/lib/alchemy/sem.h | 39 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-pshared.c | 1269 kernel/drivers/irqchip/irq-omap-intc.c | 2 kernel/xenomai-v3.2.4/lib/alchemy/sem.c | 576 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h | 63 kernel/xenomai-v3.2.4/utils/net/tdmacfg.c | 332 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h | 43 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c | 1 kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c | 1 kernel/drivers/clocksource/timer-sun4i.c | 5 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h | 1 kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h | 39 kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS | 37 kernel/drivers/xenomai/analogy/intel/parport.c | 1 kernel/include/xenomai/cobalt/uapi/kernel/trace.h | 1 kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c | 1 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h | 150 kernel/drivers/xenomai/can/peak_canfd/Kconfig | 1 kernel/drivers/xenomai/spi/Makefile | 1 kernel/arch/arm64/boot/dts/broadcom/Makefile | 1 kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c | 1 kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h | 23 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h | 82 kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c | 2269 kernel/mm/mprotect.c | 4 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/Makefile.am | 2 kernel/xenomai-v3.2.4/include/cobalt/sys/time.h | 39 kernel/include/xenomai/pipeline/kevents.h | 1 kernel/xenomai-v3.2.4/lib/boilerplate/COPYING | 458 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h | 71 kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c | 1 kernel/drivers/xenomai/net/drivers/tulip/tulip.h | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c | 659 kernel/include/xenomai/rtdm/testing.h | 1 kernel/include/linux/intel-iommu.h | 2 kernel/include/xenomai/pipeline/pipeline.h | 1 kernel/include/xenomai/cobalt/kernel/stat.h | 1 kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h | 88 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c | 1 kernel/net/socket.c | 127 kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h | 83 kernel/drivers/gpio/gpio-pca953x.c | 1 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h | 15 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h | 109 kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h | 1 kernel/xenomai-v3.2.4/lib/cobalt/timerfd.c | 63 kernel/kernel/time/hrtimer.c | 1 kernel/xenomai-v3.2.4/include/rtdm/gpio.h | 24 kernel/xenomai-v3.2.4/utils/analogy/analogy_config.c | 305 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h | 53 kernel/drivers/xenomai/serial/Makefile | 1 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h | 25 kernel/drivers/xenomai/serial/Kconfig | 1 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/client.c | 298 kernel/xenomai-v3.2.4/lib/cobalt/arch/Makefile.am | 4 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c | 198 kernel/drivers/xenomai/can/rtcan_dev.h | 1 kernel/drivers/xenomai/can/rtcan_dev.c | 1 kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h | 1 kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c | 1 kernel/include/trace/events/cobalt-posix.h | 1 kernel/xenomai-v3.2.4/utils/can/Makefile.am | 36 kernel/include/linux/irqdesc.h | 27 kernel/xenomai-v3.2.4/include/boilerplate/time.h | 101 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h | 31 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c | 543 kernel/include/xenomai/cobalt/kernel/sched.h | 1 kernel/xenomai-v3.2.4/demo/alchemy/altency.c | 699 kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts | 1676 kernel/arch/x86/kernel/apic/apic_numachip.c | 4 kernel/xenomai-v3.2.4/testsuite/smokey/y2038/syscall-tests.c | 1203 kernel/kernel/xenomai/pipe.c | 1 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394 | 65 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/Makefile.am | 13 kernel/xenomai-v3.2.4/include/boilerplate/list.h | 36 kernel/mm/kasan/report.c | 6 kernel/arch/x86/entry/common.c | 32 kernel/include/xenomai/rtdm/uapi/net.h | 1 kernel/fs/file.c | 8 kernel/arch/arm64/xenomai/dovetail/Makefile | 1 2,407 files changed, 417,045 insertions(+), 2,671 deletions(-) diff --git a/kernel/Documentation/dovetail.rst b/kernel/Documentation/dovetail.rst new file mode 100644 index 0000000..5d37b04 --- /dev/null +++ b/kernel/Documentation/dovetail.rst @@ -0,0 +1,30 @@ +======================== +Introduction to Dovetail +======================== + +:Author: Philippe Gerum +:Date: 08.04.2020 + +Using Linux as a host for lightweight software cores specialized in +delivering very short and bounded response times has been a popular +way of supporting real-time applications in the embedded space over +the years. + +In this so-called *dual kernel* design, the time-critical work is +immediately delegated to a small companion core running out-of-band +with respect to the regular, in-band kernel activities. Applications +run in user space, obtaining real-time services from the +core. Alternatively, when there is no real-time requirement, threads +can still use the rich GPOS feature set Linux provides such as +networking, data storage or GUIs. + +*Dovetail* introduces a high-priority execution stage into the main +kernel logic reserved for such a companion core to run on. At any +time, out-of-band activities from this stage can preempt the common, +in-band work. A companion core can be implemented as as a driver, +which connects to the main kernel via the Dovetail interface for +delivering ultra-low latency scheduling capabilities to applications. + +Dovetail is fully described at https://evlproject.org/dovetail/. +The reference implementation of a Dovetail-based companion core is +maintained at https://evlproject.org/core/. diff --git a/kernel/Makefile b/kernel/Makefile index 8bac90b..dba458a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 160 +SUBLEVEL = 161 EXTRAVERSION = NAME = Dare mighty things diff --git a/kernel/arch/Kconfig b/kernel/arch/Kconfig index c3a40c5..42eb62f 100644 --- a/kernel/arch/Kconfig +++ b/kernel/arch/Kconfig @@ -229,6 +229,9 @@ config HAVE_NMI bool +config HAVE_PERCPU_PREEMPT_COUNT + bool + # # An arch should select this if it provides all these things: # diff --git a/kernel/arch/arm/Kconfig b/kernel/arch/arm/Kconfig index f6ce22c..57afcf1 100644 --- a/kernel/arch/arm/Kconfig +++ b/kernel/arch/arm/Kconfig @@ -236,6 +236,11 @@ config ARCH_MTD_XIP bool +# Limited I-pipe compat (syscall routing only). +config IPIPE_COMPAT + bool + select DOVETAIL_LEGACY_SYSCALL_RANGE + config ARM_PATCH_PHYS_VIRT bool "Patch physical to virtual translations at runtime" if EMBEDDED default y @@ -585,6 +590,8 @@ config ARCH_MULTI_V6_V7 bool select MIGHT_HAVE_CACHE_L2X0 + select HAVE_IRQ_PIPELINE + select HAVE_DOVETAIL if CPU_HAS_ASID config ARCH_MULTI_CPU_AUTO def_bool !(ARCH_MULTI_V4 || ARCH_MULTI_V4T || ARCH_MULTI_V6_V7) @@ -1237,6 +1244,8 @@ MultiThreading at a cost of slightly increased overhead in some places. If unsure say N here. +source "kernel/Kconfig.dovetail" + config HAVE_ARM_SCU bool help diff --git a/kernel/arch/arm/common/mcpm_entry.c b/kernel/arch/arm/common/mcpm_entry.c index 8a9aeeb..53c3be5 100644 --- a/kernel/arch/arm/common/mcpm_entry.c +++ b/kernel/arch/arm/common/mcpm_entry.c @@ -206,7 +206,7 @@ * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here. */ - local_irq_disable(); + hard_local_irq_disable(); arch_spin_lock(&mcpm_lock); cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; @@ -230,7 +230,7 @@ ret = platform_ops->cpu_powerup(cpu, cluster); arch_spin_unlock(&mcpm_lock); - local_irq_enable(); + hard_local_irq_enable(); return ret; } @@ -349,7 +349,7 @@ mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - local_irq_save(flags); + flags = hard_local_irq_save(); arch_spin_lock(&mcpm_lock); cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; @@ -363,7 +363,7 @@ platform_ops->cpu_is_up(cpu, cluster); arch_spin_unlock(&mcpm_lock); - local_irq_restore(flags); + hard_local_irq_restore(flags); return 0; } @@ -402,7 +402,7 @@ * infrastructure. Let's play it safe by using cpu_pm_enter() * in case the CPU init code path resets the VFP or similar. */ - local_irq_disable(); + hard_local_irq_disable(); local_fiq_disable(); ret = cpu_pm_enter(); if (!ret) { @@ -410,7 +410,7 @@ cpu_pm_exit(); } local_fiq_enable(); - local_irq_enable(); + hard_local_irq_enable(); if (ret) pr_err("%s returned %d\n", __func__, ret); return ret; diff --git a/kernel/arch/arm/include/asm/assembler.h b/kernel/arch/arm/include/asm/assembler.h index 8ff20f9..e2df9c4 100644 --- a/kernel/arch/arm/include/asm/assembler.h +++ b/kernel/arch/arm/include/asm/assembler.h @@ -122,7 +122,7 @@ .if \save stmdb sp!, {r0-r3, ip, lr} .endif - bl trace_hardirqs_off + bl trace_hardirqs_off_pipelined .if \save ldmia sp!, {r0-r3, ip, lr} .endif @@ -138,13 +138,25 @@ .if \save stmdb sp!, {r0-r3, ip, lr} .endif - bl\cond trace_hardirqs_on + bl\cond trace_hardirqs_on_pipelined .if \save ldmia sp!, {r0-r3, ip, lr} .endif #endif .endm + .macro disable_irq_if_pipelined +#ifdef CONFIG_IRQ_PIPELINE + disable_irq_notrace +#endif + .endm + + .macro enable_irq_if_pipelined +#ifdef CONFIG_IRQ_PIPELINE + enable_irq_notrace +#endif + .endm + .macro disable_irq, save=1 disable_irq_notrace asm_trace_hardirqs_off \save diff --git a/kernel/arch/arm/include/asm/atomic.h b/kernel/arch/arm/include/asm/atomic.h index 455eb19..0cf92e5 100644 --- a/kernel/arch/arm/include/asm/atomic.h +++ b/kernel/arch/arm/include/asm/atomic.h @@ -164,9 +164,9 @@ { \ unsigned long flags; \ \ - raw_local_irq_save(flags); \ + flags = hard_local_irq_save(); \ v->counter c_op i; \ - raw_local_irq_restore(flags); \ + hard_local_irq_restore(flags); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ @@ -175,10 +175,10 @@ unsigned long flags; \ int val; \ \ - raw_local_irq_save(flags); \ + flags = hard_local_irq_save(); \ v->counter c_op i; \ val = v->counter; \ - raw_local_irq_restore(flags); \ + hard_local_irq_restore(flags); \ \ return val; \ } @@ -189,10 +189,10 @@ unsigned long flags; \ int val; \ \ - raw_local_irq_save(flags); \ + flags = hard_local_irq_save(); \ val = v->counter; \ v->counter c_op i; \ - raw_local_irq_restore(flags); \ + hard_local_irq_restore(flags); \ \ return val; \ } @@ -202,11 +202,11 @@ int ret; unsigned long flags; - raw_local_irq_save(flags); + flags = hard_local_irq_save(); ret = v->counter; if (likely(ret == old)) v->counter = new; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return ret; } diff --git a/kernel/arch/arm/include/asm/bitops.h b/kernel/arch/arm/include/asm/bitops.h index c92e42a..9779f32 100644 --- a/kernel/arch/arm/include/asm/bitops.h +++ b/kernel/arch/arm/include/asm/bitops.h @@ -40,9 +40,9 @@ p += BIT_WORD(bit); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); *p |= mask; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) @@ -52,9 +52,9 @@ p += BIT_WORD(bit); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); *p &= ~mask; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) @@ -64,9 +64,9 @@ p += BIT_WORD(bit); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); *p ^= mask; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline int @@ -78,10 +78,10 @@ p += BIT_WORD(bit); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); res = *p; *p = res | mask; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return (res & mask) != 0; } @@ -95,10 +95,10 @@ p += BIT_WORD(bit); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); res = *p; *p = res & ~mask; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return (res & mask) != 0; } @@ -112,10 +112,10 @@ p += BIT_WORD(bit); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); res = *p; *p = res ^ mask; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return (res & mask) != 0; } diff --git a/kernel/arch/arm/include/asm/cmpxchg.h b/kernel/arch/arm/include/asm/cmpxchg.h index 8b701f8..60ccad3 100644 --- a/kernel/arch/arm/include/asm/cmpxchg.h +++ b/kernel/arch/arm/include/asm/cmpxchg.h @@ -77,17 +77,17 @@ #error SMP is not supported on this platform #endif case 1: - raw_local_irq_save(flags); + flags = hard_local_irq_save(); ret = *(volatile unsigned char *)ptr; *(volatile unsigned char *)ptr = x; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); break; case 4: - raw_local_irq_save(flags); + flags = hard_local_irq_save(); ret = *(volatile unsigned long *)ptr; *(volatile unsigned long *)ptr = x; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); break; #else case 1: diff --git a/kernel/arch/arm/include/asm/dovetail.h b/kernel/arch/arm/include/asm/dovetail.h new file mode 100644 index 0000000..f8fe64a --- /dev/null +++ b/kernel/arch/arm/include/asm/dovetail.h @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum. + */ +#ifndef _ASM_ARM_DOVETAIL_H +#define _ASM_ARM_DOVETAIL_H + +/* ARM traps */ +#define ARM_TRAP_ACCESS 0 /* Data or instruction access exception */ +#define ARM_TRAP_SECTION 1 /* Section fault */ +#define ARM_TRAP_DABT 2 /* Generic data abort */ +#define ARM_TRAP_PABT 3 /* Prefetch abort */ +#define ARM_TRAP_BREAK 4 /* Instruction breakpoint */ +#define ARM_TRAP_FPU 5 /* Floating point exception */ +#define ARM_TRAP_VFP 6 /* VFP floating point exception */ +#define ARM_TRAP_UNDEFINSTR 7 /* Undefined instruction */ +#define ARM_TRAP_ALIGNMENT 8 /* Unaligned access exception */ + +#if !defined(__ASSEMBLY__) + +#ifdef CONFIG_DOVETAIL + +static inline void arch_dovetail_exec_prepare(void) +{ } + +static inline void arch_dovetail_switch_prepare(bool leave_inband) +{ } + +static inline void arch_dovetail_switch_finish(bool enter_inband) +{ } + +#endif + +/* + * Pass the trap event to the companion core. Return true if running + * in-band afterwards. + */ +#define mark_cond_trap_entry(__trapnr, __regs) \ + ({ \ + oob_trap_notify(__trapnr, __regs); \ + running_inband(); \ + }) + +/* + * Pass the trap event to the companion core. We expect the current + * context to be running on the in-band stage upon return so that our + * caller can tread on common kernel code. + */ +#define mark_trap_entry(__trapnr, __regs) \ + do { \ + bool __ret = mark_cond_trap_entry(__trapnr, __regs); \ + BUG_ON(dovetail_debug() && !__ret); \ + } while (0) + +#define mark_trap_exit(__trapnr, __regs) \ + oob_trap_unwind(__trapnr, __regs) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_ARM_DOVETAIL_H */ diff --git a/kernel/arch/arm/include/asm/efi.h b/kernel/arch/arm/include/asm/efi.h index 3ee4f43..dedafc6 100644 --- a/kernel/arch/arm/include/asm/efi.h +++ b/kernel/arch/arm/include/asm/efi.h @@ -37,7 +37,11 @@ static inline void efi_set_pgd(struct mm_struct *mm) { + unsigned long flags; + + protect_inband_mm(flags); check_and_switch_context(mm, NULL); + unprotect_inband_mm(flags); } void efi_virtmap_load(void); diff --git a/kernel/arch/arm/include/asm/irq_pipeline.h b/kernel/arch/arm/include/asm/irq_pipeline.h new file mode 100644 index 0000000..5970c6d --- /dev/null +++ b/kernel/arch/arm/include/asm/irq_pipeline.h @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef _ASM_ARM_IRQ_PIPELINE_H +#define _ASM_ARM_IRQ_PIPELINE_H + +#include <asm-generic/irq_pipeline.h> + +#ifdef CONFIG_IRQ_PIPELINE + +/* + * In order to cope with the limited number of SGIs available to us, + * In-band IPI messages are multiplexed over SGI0, whereas out-of-band + * IPIs are directly mapped to SGI1-2. + */ +#define OOB_NR_IPI 2 +#define OOB_IPI_OFFSET 1 /* SGI1 */ +#define TIMER_OOB_IPI (ipi_irq_base + OOB_IPI_OFFSET) +#define RESCHEDULE_OOB_IPI (TIMER_OOB_IPI + 1) + +extern int ipi_irq_base; + +static inline notrace +unsigned long arch_irqs_virtual_to_native_flags(int stalled) +{ + return (!!stalled) << IRQMASK_I_POS; +} + +static inline notrace +unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags) +{ + return (!!hard_irqs_disabled_flags(flags)) << IRQMASK_i_POS; +} + +static inline notrace unsigned long arch_local_irq_save(void) +{ + int stalled = inband_irq_save(); + barrier(); + return arch_irqs_virtual_to_native_flags(stalled); +} + +static inline notrace void arch_local_irq_enable(void) +{ + barrier(); + inband_irq_enable(); +} + +static inline notrace void arch_local_irq_disable(void) +{ + inband_irq_disable(); + barrier(); +} + +static inline notrace unsigned long arch_local_save_flags(void) +{ + int stalled = inband_irqs_disabled(); + barrier(); + return arch_irqs_virtual_to_native_flags(stalled); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return native_irqs_disabled_flags(flags); +} + +static inline notrace void arch_local_irq_restore(unsigned long flags) +{ + inband_irq_restore(arch_irqs_disabled_flags(flags)); + barrier(); +} + +static inline +void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src) +{ + dst->ARM_cpsr = src->ARM_cpsr; + dst->ARM_pc = src->ARM_pc; +} + +static inline bool arch_steal_pipelined_tick(struct pt_regs *regs) +{ + return !!(regs->ARM_cpsr & IRQMASK_I_BIT); +} + +static inline int arch_enable_oob_stage(void) +{ + return 0; +} + +#define arch_kentry_get_irqstate(__regs) \ + ({ \ + to_svc_pt_regs(__regs)->irqstate; \ + }) + +#define arch_kentry_set_irqstate(__regs, __irqstate) \ + do { \ + to_svc_pt_regs(__regs)->irqstate = __irqstate; \ + } while (0) + +#else /* !CONFIG_IRQ_PIPELINE */ + +static inline unsigned long arch_local_irq_save(void) +{ + return native_irq_save(); +} + +static inline void arch_local_irq_enable(void) +{ + native_irq_enable(); +} + +static inline void arch_local_irq_disable(void) +{ + native_irq_disable(); +} + +static inline unsigned long arch_local_save_flags(void) +{ + return native_save_flags(); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + native_irq_restore(flags); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return native_irqs_disabled_flags(flags); +} + +#endif /* !CONFIG_IRQ_PIPELINE */ + +#endif /* _ASM_ARM_IRQ_PIPELINE_H */ diff --git a/kernel/arch/arm/include/asm/irqflags.h b/kernel/arch/arm/include/asm/irqflags.h index aeec7f2..3c67ce2 100644 --- a/kernel/arch/arm/include/asm/irqflags.h +++ b/kernel/arch/arm/include/asm/irqflags.h @@ -5,6 +5,7 @@ #ifdef __KERNEL__ #include <asm/ptrace.h> +#include <asm/barrier.h> /* * CPU interrupt mask handling. @@ -13,41 +14,44 @@ #define IRQMASK_REG_NAME_R "primask" #define IRQMASK_REG_NAME_W "primask" #define IRQMASK_I_BIT 1 +#define IRQMASK_I_POS 0 #else #define IRQMASK_REG_NAME_R "cpsr" #define IRQMASK_REG_NAME_W "cpsr_c" #define IRQMASK_I_BIT PSR_I_BIT +#define IRQMASK_I_POS 7 #endif +#define IRQMASK_i_POS 31 #if __LINUX_ARM_ARCH__ >= 6 #define arch_local_irq_save arch_local_irq_save -static inline unsigned long arch_local_irq_save(void) +static inline unsigned long native_irq_save(void) { unsigned long flags; asm volatile( - " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n" + " mrs %0, " IRQMASK_REG_NAME_R " @ native_irq_save\n" " cpsid i" : "=r" (flags) : : "memory", "cc"); return flags; } #define arch_local_irq_enable arch_local_irq_enable -static inline void arch_local_irq_enable(void) +static inline void native_irq_enable(void) { asm volatile( - " cpsie i @ arch_local_irq_enable" + " cpsie i @ native_irq_enable" : : : "memory", "cc"); } #define arch_local_irq_disable arch_local_irq_disable -static inline void arch_local_irq_disable(void) +static inline void native_irq_disable(void) { asm volatile( - " cpsid i @ arch_local_irq_disable" + " cpsid i @ native_irq_disable" : : : "memory", "cc"); @@ -69,12 +73,12 @@ * Save the current interrupt enable state & disable IRQs */ #define arch_local_irq_save arch_local_irq_save -static inline unsigned long arch_local_irq_save(void) +static inline unsigned long native_irq_save(void) { unsigned long flags, temp; asm volatile( - " mrs %0, cpsr @ arch_local_irq_save\n" + " mrs %0, cpsr @ native_irq_save\n" " orr %1, %0, #128\n" " msr cpsr_c, %1" : "=r" (flags), "=r" (temp) @@ -87,11 +91,11 @@ * Enable IRQs */ #define arch_local_irq_enable arch_local_irq_enable -static inline void arch_local_irq_enable(void) +static inline void native_irq_enable(void) { unsigned long temp; asm volatile( - " mrs %0, cpsr @ arch_local_irq_enable\n" + " mrs %0, cpsr @ native_irq_enable\n" " bic %0, %0, #128\n" " msr cpsr_c, %0" : "=r" (temp) @@ -103,11 +107,11 @@ * Disable IRQs */ #define arch_local_irq_disable arch_local_irq_disable -static inline void arch_local_irq_disable(void) +static inline void native_irq_disable(void) { unsigned long temp; asm volatile( - " mrs %0, cpsr @ arch_local_irq_disable\n" + " mrs %0, cpsr @ native_irq_disable\n" " orr %0, %0, #128\n" " msr cpsr_c, %0" : "=r" (temp) @@ -149,15 +153,22 @@ #define local_abt_disable() do { } while (0) #endif +static inline void native_irq_sync(void) +{ + native_irq_enable(); + isb(); + native_irq_disable(); +} + /* * Save the current interrupt enable state. */ #define arch_local_save_flags arch_local_save_flags -static inline unsigned long arch_local_save_flags(void) +static inline unsigned long native_save_flags(void) { unsigned long flags; asm volatile( - " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags" + " mrs %0, " IRQMASK_REG_NAME_R " @ native_save_flags" : "=r" (flags) : : "memory", "cc"); return flags; } @@ -166,21 +177,28 @@ * restore saved IRQ & FIQ state */ #define arch_local_irq_restore arch_local_irq_restore -static inline void arch_local_irq_restore(unsigned long flags) +static inline void native_irq_restore(unsigned long flags) { asm volatile( - " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore" + " msr " IRQMASK_REG_NAME_W ", %0 @ native_irq_restore" : : "r" (flags) : "memory", "cc"); } #define arch_irqs_disabled_flags arch_irqs_disabled_flags -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline int native_irqs_disabled_flags(unsigned long flags) { return flags & IRQMASK_I_BIT; } +static inline bool native_irqs_disabled(void) +{ + unsigned long flags = native_save_flags(); + return native_irqs_disabled_flags(flags); +} + +#include <asm/irq_pipeline.h> #include <asm-generic/irqflags.h> #endif /* ifdef __KERNEL__ */ diff --git a/kernel/arch/arm/include/asm/mmu_context.h b/kernel/arch/arm/include/asm/mmu_context.h index f99ed52..32cded3 100644 --- a/kernel/arch/arm/include/asm/mmu_context.h +++ b/kernel/arch/arm/include/asm/mmu_context.h @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/mm_types.h> #include <linux/preempt.h> +#include <linux/irq_pipeline.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> @@ -72,6 +73,7 @@ static inline void finish_arch_post_lock_switch(void) { struct mm_struct *mm = current->mm; + unsigned long flags; if (mm && mm->context.switch_pending) { /* @@ -83,7 +85,9 @@ preempt_disable(); if (mm->context.switch_pending) { mm->context.switch_pending = 0; + protect_inband_mm(flags); cpu_switch_mm(mm->pgd, mm); + unprotect_inband_mm(flags); } preempt_enable_no_resched(); } @@ -102,7 +106,7 @@ #endif /* CONFIG_CPU_HAS_ASID */ #define destroy_context(mm) do { } while(0) -#define activate_mm(prev,next) switch_mm(prev, next, NULL) +#define activate_mm(prev,next) __switch_mm(prev, next, NULL) /* * This is called when "tsk" is about to enter lazy TLB mode. @@ -118,15 +122,9 @@ { } -/* - * This is the actual mm switch as far as the scheduler - * is concerned. No registers are touched. We avoid - * calling the CPU specific function when the mm hasn't - * actually changed. - */ static inline void -switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) +__switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { #ifdef CONFIG_MMU unsigned int cpu = smp_processor_id(); @@ -149,6 +147,30 @@ #endif } +/* + * This is the actual mm switch as far as the scheduler + * is concerned. No registers are touched. We avoid + * calling the CPU specific function when the mm hasn't + * actually changed. + */ +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + protect_inband_mm(flags); + __switch_mm(prev, next, tsk); + unprotect_inband_mm(flags); +} + #define deactivate_mm(tsk,mm) do { } while (0) +static inline void +switch_oob_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + __switch_mm(prev, next, tsk); +} + #endif diff --git a/kernel/arch/arm/include/asm/outercache.h b/kernel/arch/arm/include/asm/outercache.h index 3364637..811978d 100644 --- a/kernel/arch/arm/include/asm/outercache.h +++ b/kernel/arch/arm/include/asm/outercache.h @@ -78,8 +78,13 @@ */ static inline void outer_flush_all(void) { - if (outer_cache.flush_all) + unsigned long flags; + + if (outer_cache.flush_all) { + flags = hard_cond_local_irq_save(); outer_cache.flush_all(); + hard_cond_local_irq_restore(flags); + } } /** diff --git a/kernel/arch/arm/include/asm/ptrace.h b/kernel/arch/arm/include/asm/ptrace.h index 73c83f4..5b4f5c4 100644 --- a/kernel/arch/arm/include/asm/ptrace.h +++ b/kernel/arch/arm/include/asm/ptrace.h @@ -20,6 +20,9 @@ struct pt_regs regs; u32 dacr; u32 addr_limit; +#ifdef CONFIG_IRQ_PIPELINE + long irqstate; +#endif }; #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs) diff --git a/kernel/arch/arm/include/asm/syscall.h b/kernel/arch/arm/include/asm/syscall.h index fd02761..eec5bca 100644 --- a/kernel/arch/arm/include/asm/syscall.h +++ b/kernel/arch/arm/include/asm/syscall.h @@ -63,6 +63,11 @@ memcpy(args, ®s->ARM_r0 + 1, 5 * sizeof(args[0])); } +static inline unsigned long syscall_get_arg0(struct pt_regs *regs) +{ + return regs->ARM_ORIG_r0; +} + static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, const unsigned long *args) diff --git a/kernel/arch/arm/include/asm/thread_info.h b/kernel/arch/arm/include/asm/thread_info.h index eb7ce27..7c84af4 100644 --- a/kernel/arch/arm/include/asm/thread_info.h +++ b/kernel/arch/arm/include/asm/thread_info.h @@ -21,6 +21,7 @@ struct task_struct; +#include <dovetail/thread_info.h> #include <asm/types.h> typedef unsigned long mm_segment_t; @@ -45,6 +46,7 @@ */ struct thread_info { unsigned long flags; /* low level flags */ + __u32 local_flags; /* local (synchronous) flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ @@ -65,15 +67,19 @@ #ifdef CONFIG_ARM_THUMBEE unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif + struct oob_thread_state oob_state; /* co-kernel thread state */ }; #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ .flags = 0, \ + .local_flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ } + +#define ti_local_flags(__ti) ((__ti)->local_flags) /* * how to get the thread information struct from C @@ -142,6 +148,8 @@ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 +#define TIF_MAYDAY 21 /* emergency trap pending */ +#define TIF_RETUSER 22 /* INBAND_TASK_RETUSER is pending */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -153,6 +161,8 @@ #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) +#define _TIF_MAYDAY (1 << TIF_MAYDAY) +#define _TIF_RETUSER (1 << TIF_RETUSER) /* Checks for any syscall work in entry-common.S */ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ @@ -163,7 +173,15 @@ */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_RETUSER) + +/* + * Local (synchronous) thread flags. + */ +#define _TLF_OOB 0x0001 +#define _TLF_DOVETAIL 0x0002 +#define _TLF_OFFSTAGE 0x0004 +#define _TLF_OOBTRAP 0x0008 #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/kernel/arch/arm/include/asm/trace/exceptions.h b/kernel/arch/arm/include/asm/trace/exceptions.h new file mode 100644 index 0000000..bdb666b --- /dev/null +++ b/kernel/arch/arm/include/asm/trace/exceptions.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM exceptions + +#if !defined(_TRACE_EXCEPTIONS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EXCEPTIONS_H + +#include <linux/tracepoint.h> +#include <asm/ptrace.h> +#include <asm/dovetail.h> + +#define __trace_trap(__sym) { __sym, #__sym } + +#define trace_trap_symbolic(__trapnr) \ + __print_symbolic(__trapnr, \ + __trace_trap(ARM_TRAP_ACCESS), \ + __trace_trap(ARM_TRAP_SECTION), \ + __trace_trap(ARM_TRAP_DABT), \ + __trace_trap(ARM_TRAP_PABT), \ + __trace_trap(ARM_TRAP_BREAK), \ + __trace_trap(ARM_TRAP_FPU), \ + __trace_trap(ARM_TRAP_VFP), \ + __trace_trap(ARM_TRAP_UNDEFINSTR), \ + __trace_trap(ARM_TRAP_ALIGNMENT)) + +DECLARE_EVENT_CLASS(ARM_trap_event, + TP_PROTO(int trapnr, struct pt_regs *regs), + TP_ARGS(trapnr, regs), + + TP_STRUCT__entry( + __field(int, trapnr) + __field(struct pt_regs *, regs) + ), + + TP_fast_assign( + __entry->trapnr = trapnr; + __entry->regs = regs; + ), + + TP_printk("%s mode trap: %s", + user_mode(__entry->regs) ? "user" : "kernel", + trace_trap_symbolic(__entry->trapnr)) +); + +DEFINE_EVENT(ARM_trap_event, ARM_trap_entry, + TP_PROTO(int trapnr, struct pt_regs *regs), + TP_ARGS(trapnr, regs) +); + +DEFINE_EVENT(ARM_trap_event, ARM_trap_exit, + TP_PROTO(int trapnr, struct pt_regs *regs), + TP_ARGS(trapnr, regs) +); + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE exceptions +#endif /* _TRACE_EXCEPTIONS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/arch/arm/include/asm/vdso/gettimeofday.h b/kernel/arch/arm/include/asm/vdso/gettimeofday.h index 2134cbd..eadbcde 100644 --- a/kernel/arch/arm/include/asm/vdso/gettimeofday.h +++ b/kernel/arch/arm/include/asm/vdso/gettimeofday.h @@ -142,6 +142,66 @@ return __get_datapage(); } +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + +extern struct vdso_priv *__get_privpage(void); + +static __always_inline struct vdso_priv *__arch_get_vdso_priv(void) +{ + return __get_privpage(); +} + +static __always_inline long clock_open_device(const char *path, int mode) +{ + register u32 r0 asm("r0") = (u32)path; + register u32 r1 asm("r1") = (u32)mode; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_open; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r"(r0), "r"(r1), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_ioctl_device(int fd, unsigned int cmd, long arg) +{ + register u32 r0 asm("r0") = (u32)fd; + register u32 r1 asm("r1") = (u32)cmd; + register u32 r2 asm("r2") = (u32)arg; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_ioctl; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r"(r0), "r"(r1), "r"(r2), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline long clock_close_device(int fd) +{ + register u32 r0 asm("r0") = (u32)fd; + register long ret asm ("r0"); + register long nr asm("r7") = __NR_close; + + asm volatile( + " swi #0\n" + : "=r" (ret) + : "r"(r0), "r"(nr) + : "memory"); + + return ret; +} + +#endif /* CONFIG_GENERIC_CLOCKSOURCE_VDSO */ + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/kernel/arch/arm/kernel/Makefile b/kernel/arch/arm/kernel/Makefile index 79588b5..5d9267c 100644 --- a/kernel/arch/arm/kernel/Makefile +++ b/kernel/arch/arm/kernel/Makefile @@ -92,6 +92,11 @@ head-y := head$(MMUEXT).o obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +ifeq ($(CONFIG_DEBUG_LL),y) +obj-$(CONFIG_RAW_PRINTK) += raw_printk.o +endif + +obj-$(CONFIG_IRQ_PIPELINE) += irq_pipeline.o # This is executed very early using a temporary stack when no memory allocator # nor global data is available. Everything has to be allocated on the stack. diff --git a/kernel/arch/arm/kernel/asm-offsets.c b/kernel/arch/arm/kernel/asm-offsets.c index 70993af..6dd7a8f 100644 --- a/kernel/arch/arm/kernel/asm-offsets.c +++ b/kernel/arch/arm/kernel/asm-offsets.c @@ -42,6 +42,8 @@ #endif BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); + DEFINE(TI_SYSCALL, offsetof(struct thread_info, syscall)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); @@ -51,6 +53,7 @@ DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); + DEFINE(TI_OOB_MASK, STAGE_MASK); #ifdef CONFIG_VFP DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); #ifdef CONFIG_SMP @@ -161,6 +164,7 @@ BLANK(); #ifdef CONFIG_VDSO DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); + DEFINE(VDSO_PRIV_SIZE, PAGE_SIZE); #endif BLANK(); #ifdef CONFIG_ARM_MPU diff --git a/kernel/arch/arm/kernel/entry-armv.S b/kernel/arch/arm/kernel/entry-armv.S index 030351d..a3932c6 100644 --- a/kernel/arch/arm/kernel/entry-armv.S +++ b/kernel/arch/arm/kernel/entry-armv.S @@ -5,6 +5,7 @@ * Copyright (C) 1996,1997,1998 Russell King. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) * nommu support by Hyok S. Choi (hyok.choi@samsung.com) + * Copyright (C) 2005 Stelian Pop. * * Low-level vector interface routines * @@ -32,16 +33,24 @@ #include "entry-header.S" #include <asm/entry-macro-multi.S> #include <asm/probes.h> +#include <asm/dovetail.h> /* * Interrupt handling. */ .macro irq_handler #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER - ldr r1, =handle_arch_irq mov r0, sp badr lr, 9997f +#ifdef CONFIG_IRQ_PIPELINE + ldr r1, =handle_arch_irq_pipelined + mov pc, r1 +#else + ldr r1, =handle_arch_irq ldr pc, [r1] +#endif +#elif CONFIG_IRQ_PIPELINE +#error "Legacy IRQ handling not pipelined" #else arch_irq_handler_default #endif @@ -183,7 +192,10 @@ uaccess_entry tsk, r0, r1, r2, \uaccess .if \trace -#ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_IRQ_PIPELINE + mov r0, sp + bl kentry_enter_pipelined +#elif defined(CONFIG_TRACE_IRQFLAGS) bl trace_hardirqs_off #endif .endif @@ -203,6 +215,10 @@ __irq_svc: svc_entry irq_handler +#ifdef CONFIG_IRQ_PIPELINE + tst r0, r0 @ skip epilogue if oob or in-band stalled + beq 1f +#endif #ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -213,6 +229,7 @@ blne svc_preempt #endif +1: svc_exit r5, irq = 1 @ return from exception UNWIND(.fnend ) ENDPROC(__irq_svc) @@ -222,7 +239,7 @@ #ifdef CONFIG_PREEMPTION svc_preempt: mov r8, lr -1: bl preempt_schedule_irq @ irq en/disable is done inside +1: bl arm_preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED reteq r8 @ go again @@ -252,6 +269,16 @@ #else svc_entry #endif +#ifdef CONFIG_DOVETAIL + get_thread_info tsk + ldr r0, [tsk, #TI_PREEMPT] @ get preempt count + tst r0, #TI_OOB_MASK @ oob stage? + beq 1f + mov r0, #ARM_TRAP_UNDEFINSTR + mov r1, sp @ r1 = ®s + bl __oob_trap_notify +1: +#endif mov r1, #4 @ PC correction to apply THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode? @@ -261,6 +288,15 @@ __und_svc_finish: get_thread_info tsk +#ifdef CONFIG_DOVETAIL + ldr r0, [tsk, #TI_PREEMPT] @ get preempt count + tst r0, #TI_OOB_MASK @ oob stage? + beq 1f + mov r0, #ARM_TRAP_UNDEFINSTR + mov r1, sp @ r1 = ®s + bl __oob_trap_unwind +1: +#endif ldr r5, [sp, #S_PSR] @ Get SVC cpsr svc_exit r5 @ return from exception UNWIND(.fnend ) @@ -391,7 +427,7 @@ .if \trace #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_off + bl trace_hardirqs_off_pipelined #endif ct_user_exit save = 0 .endif @@ -427,8 +463,12 @@ usr_entry kuser_cmpxchg_check irq_handler - get_thread_info tsk mov why, #0 +#ifdef CONFIG_IRQ_PIPELINE + tst r0, r0 + beq fast_ret_to_user @ skip epilogue if oob (in-band cannot be stalled) +#endif + get_thread_info tsk b ret_to_user_from_irq UNWIND(.fnend ) ENDPROC(__irq_usr) @@ -721,7 +761,7 @@ UNWIND(.cantunwind ) get_thread_info tsk mov why, #0 - b ret_to_user + ret_to_user_pipelined r1 UNWIND(.fnend ) ENDPROC(__pabt_usr) ENDPROC(ret_from_exception) diff --git a/kernel/arch/arm/kernel/entry-common.S b/kernel/arch/arm/kernel/entry-common.S index 9b3c737..7a75a49 100644 --- a/kernel/arch/arm/kernel/entry-common.S +++ b/kernel/arch/arm/kernel/entry-common.S @@ -3,6 +3,7 @@ * linux/arch/arm/kernel/entry-common.S * * Copyright (C) 2000 Russell King + * Copyright (C) 2005 Stelian Pop. */ #include <asm/assembler.h> @@ -12,6 +13,7 @@ #include <asm/memory.h> #ifdef CONFIG_AEABI #include <asm/unistd-oabi.h> +#include <uapi/asm-generic/dovetail.h> #endif .equ NR_syscalls, __NR_syscalls @@ -54,6 +56,8 @@ blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing movs r1, r1, lsl #16 + ldr r2, =#_TIF_SYSCALL_WORK | _TIF_WORK_MASK + ands r2, r1, r2 bne fast_work_pending @@ -91,6 +95,8 @@ blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing movs r1, r1, lsl #16 + ldr r2, =#_TIF_SYSCALL_WORK | _TIF_WORK_MASK + ands r2, r1, r2 beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) @@ -132,6 +138,8 @@ blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] movs r1, r1, lsl #16 + ldr r2, =#_TIF_WORK_MASK + ands r2, r1, r2 bne slow_work_pending no_work_pending: asm_trace_hardirqs_on save = 0 @@ -143,6 +151,10 @@ restore_user_regs fast = 0, offset = 0 ENDPROC(ret_to_user_from_irq) ENDPROC(ret_to_user) +ENTRY(fast_ret_to_user) + disable_irq_notrace @ disable interrupts + b no_work_pending +ENDPROC(fast_ret_to_user) /* * This is how we return from a fork. @@ -265,6 +277,9 @@ eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif get_thread_info tsk +#ifdef CONFIG_DOVETAIL + str scno, [tsk, #TI_SYSCALL] +#endif /* * Reload the registers that may have been corrupted on entry to * the syscall assembly (by tracing or context tracking.) @@ -272,6 +287,70 @@ TRACE( ldmia sp, {r0 - r3} ) local_restart: +#ifdef CONFIG_DOVETAIL + ldr r10, [tsk, #TI_LOCAL_FLAGS] @ tsk(r10) is callee-saved +#ifdef CONFIG_IPIPE_COMPAT + ldr r0, =#0xf0042 @ old syscall signature + cmp scno, r0 + bne 1f + add scno, scno, #__OOB_SYSCALL_BIT @ force in oob marker + b fastcall_try +1: +#endif +#ifdef CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE + ldr r0, =#__OOB_SYSCALL_BIT + ands r0, scno, r0 + bne fastcall_try +#endif + cmp scno, #__NR_prctl + bne slow_path + ldr r0, [sp, #S_OLD_R0] + tst r0, #__OOB_SYSCALL_BIT + beq slow_path +fastcall_try: + tst r10, #_TLF_OOB + beq slow_path + mov r0, sp @ regs + bl handle_oob_syscall + ldr r10, [tsk, #TI_LOCAL_FLAGS] + tst r0, r0 + beq slow_path + tst r10, #_TLF_OOB + bne fastcall_exit_check @ check for MAYDAY + bl sync_inband_irqs + b ret_slow_syscall +fastcall_exit_check: + ldr r10, [tsk, #TI_FLAGS] + tst r10, #_TIF_MAYDAY + beq fast_ret_to_user + mov r0, sp + bl dovetail_call_mayday + b fast_ret_to_user +slow_path: + tst r10, #_TLF_DOVETAIL + bne pipeline_syscall +#ifdef CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE + ldr r0, =#__OOB_SYSCALL_BIT + ands r0, scno, r0 + bne pipeline_syscall +#endif + cmp scno, #__NR_prctl + bne root_syscall + ldr r0, [sp, #S_OLD_R0] + tst r0, #__OOB_SYSCALL_BIT + beq root_syscall +pipeline_syscall: + mov r0, sp @ regs + bl __pipeline_syscall + ldr r10, [tsk, #TI_LOCAL_FLAGS] + tst r10, #_TLF_OOB + bne fast_ret_to_user + cmp r0, #0 + bgt ret_slow_syscall +root_syscall: + ldmia sp, { r0 - r3 } +#endif /* CONFIG_DOVETAIL */ + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args diff --git a/kernel/arch/arm/kernel/entry-header.S b/kernel/arch/arm/kernel/entry-header.S index 40db0f9..da1251c 100644 --- a/kernel/arch/arm/kernel/entry-header.S +++ b/kernel/arch/arm/kernel/entry-header.S @@ -203,15 +203,21 @@ .macro svc_exit, rpsr, irq = 0 .if \irq != 0 @ IRQs already off -#ifdef CONFIG_TRACE_IRQFLAGS @ The parent context IRQs must have been enabled to get here in @ the first place, so there's no point checking the PSR I bit. +#ifdef CONFIG_IRQ_PIPELINE + mov r0, sp + bl kentry_exit_pipelined +#elif defined(CONFIG_TRACE_IRQFLAGS) bl trace_hardirqs_on #endif .else @ IRQs off again before pulling preserved data off the stack disable_irq_notrace -#ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_IRQ_PIPELINE + mov r0, sp + bl kentry_exit_pipelined +#elif defined(CONFIG_TRACE_IRQFLAGS) tst \rpsr, #PSR_I_BIT bleq trace_hardirqs_on tst \rpsr, #PSR_I_BIT @@ -402,6 +408,19 @@ .endm /* + * Branch to the exception epilogue, skipping the in-band work + * if running over the out-of-band interrupt stage. + */ + .macro ret_to_user_pipelined, tmp +#ifdef CONFIG_IRQ_PIPELINE + ldr \tmp, [tsk, #TI_LOCAL_FLAGS] + tst \tmp, #_TLF_OOB + bne fast_ret_to_user +#endif + b ret_to_user + .endm + +/* * These are the registers used in the syscall handler, and allow us to * have in theory up to 7 arguments to a function - r0 to r6. * diff --git a/kernel/arch/arm/kernel/irq.c b/kernel/arch/arm/kernel/irq.c index 698b6f6..0c3b893 100644 --- a/kernel/arch/arm/kernel/irq.c +++ b/kernel/arch/arm/kernel/irq.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> +#include <linux/irq_pipeline.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> @@ -97,6 +98,14 @@ uniphier_cache_init(); } +#ifdef CONFIG_IRQ_PIPELINE +asmlinkage int __exception_irq_entry +handle_arch_irq_pipelined(struct pt_regs *regs) +{ + return handle_irq_pipelined(regs); +} +#endif + #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { diff --git a/kernel/arch/arm/kernel/irq_pipeline.c b/kernel/arch/arm/kernel/irq_pipeline.c new file mode 100644 index 0000000..aa12dce --- /dev/null +++ b/kernel/arch/arm/kernel/irq_pipeline.c @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/irq.h> +#include <linux/irq_pipeline.h> + +void arch_do_IRQ_pipelined(struct irq_desc *desc) +{ + struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs); + unsigned int irq = irq_desc_get_irq(desc); + + __handle_domain_irq(NULL, irq, false, regs); +} + +void __init arch_irq_pipeline_init(void) +{ + /* no per-arch init. */ +} diff --git a/kernel/arch/arm/kernel/patch.c b/kernel/arch/arm/kernel/patch.c index e9e828b..35c7285 100644 --- a/kernel/arch/arm/kernel/patch.c +++ b/kernel/arch/arm/kernel/patch.c @@ -17,7 +17,7 @@ }; #ifdef CONFIG_MMU -static DEFINE_RAW_SPINLOCK(patch_lock); +static DEFINE_HARD_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) { diff --git a/kernel/arch/arm/kernel/process.c b/kernel/arch/arm/kernel/process.c index 47a30ff..d8da49a 100644 --- a/kernel/arch/arm/kernel/process.c +++ b/kernel/arch/arm/kernel/process.c @@ -71,6 +71,7 @@ arm_pm_idle(); else cpu_do_idle(); + hard_cond_local_irq_enable(); raw_local_irq_enable(); } @@ -448,3 +449,28 @@ return ret; } #endif + +#ifdef CONFIG_IRQ_PIPELINE + +/* + * When pipelining interrupts, we have to reconcile the hardware and + * the virtual states. Hard irqs are off on entry while the current + * stage has to be unstalled: fix this up by stalling the in-band + * stage on entry, unstalling on exit. + */ +asmlinkage void __sched arm_preempt_schedule_irq(void) +{ + WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall()); + stall_inband_nocheck(); + preempt_schedule_irq(); + unstall_inband_nocheck(); +} + +#else + +asmlinkage void __sched arm_preempt_schedule_irq(void) +{ + preempt_schedule_irq(); +} + +#endif diff --git a/kernel/arch/arm/kernel/ptrace.c b/kernel/arch/arm/kernel/ptrace.c index 2771e68..1074670 100644 --- a/kernel/arch/arm/kernel/ptrace.c +++ b/kernel/arch/arm/kernel/ptrace.c @@ -205,7 +205,9 @@ static int break_trap(struct pt_regs *regs, unsigned int instr) { + mark_trap_entry(ARM_TRAP_BREAK, regs); ptrace_break(regs); + mark_trap_exit(ARM_TRAP_BREAK, regs); return 0; } diff --git a/kernel/arch/arm/kernel/raw_printk.c b/kernel/arch/arm/kernel/raw_printk.c new file mode 100644 index 0000000..9024b77 --- /dev/null +++ b/kernel/arch/arm/kernel/raw_printk.c @@ -0,0 +1,30 @@ +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/init.h> + +/* + * If both CONFIG_DEBUG_LL and CONFIG_RAW_PRINTK are set, create a + * console device sending the raw output to printascii(). + */ +void printascii(const char *s); + +static void raw_console_write(struct console *co, + const char *s, unsigned count) +{ + printascii(s); +} + +static struct console raw_console = { + .name = "rawcon", + .write_raw = raw_console_write, + .flags = CON_PRINTBUFFER | CON_ENABLED, + .index = -1, +}; + +static int __init raw_console_init(void) +{ + register_console(&raw_console); + + return 0; +} +console_initcall(raw_console_init); diff --git a/kernel/arch/arm/kernel/signal.c b/kernel/arch/arm/kernel/signal.c index a3a38d0..da78d2f 100644 --- a/kernel/arch/arm/kernel/signal.c +++ b/kernel/arch/arm/kernel/signal.c @@ -8,6 +8,7 @@ #include <linux/random.h> #include <linux/signal.h> #include <linux/personality.h> +#include <linux/irq_pipeline.h> #include <linux/uaccess.h> #include <linux/tracehook.h> #include <linux/uprobes.h> @@ -639,16 +640,36 @@ return 0; } +static inline void do_retuser(void) +{ + unsigned int thread_flags; + + if (dovetailing()) { + thread_flags = current_thread_info()->flags; + if (thread_flags & _TIF_RETUSER) + inband_retuser_notify(); + } +} + asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { + WARN_ON_ONCE(irq_pipeline_debug() && + (irqs_disabled() || running_oob())); + /* * The assembly code enters us with IRQs off, but it hasn't * informed the tracing code of that for efficiency reasons. * Update the trace code with the current status. */ - trace_hardirqs_off(); + if (!irqs_pipelined()) + trace_hardirqs_off(); do { + if (irqs_pipelined()) { + local_irq_disable(); + hard_cond_local_irq_enable(); + } + if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); } else { @@ -658,6 +679,7 @@ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { + do_retuser(); /* * Restart without handlers. * Deal with it without leaving @@ -672,10 +694,16 @@ tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); } + do_retuser(); } - local_irq_disable(); + hard_local_irq_disable(); + + /* RETUSER might have switched oob */ + if (!running_inband()) + break; + thread_flags = current_thread_info()->flags; - } while (thread_flags & _TIF_WORK_MASK); + } while (inband_irq_pending() || (thread_flags & _TIF_WORK_MASK)); return 0; } diff --git a/kernel/arch/arm/kernel/smp.c b/kernel/arch/arm/kernel/smp.c index 123432b..bdb4f7e 100644 --- a/kernel/arch/arm/kernel/smp.c +++ b/kernel/arch/arm/kernel/smp.c @@ -84,7 +84,7 @@ MAX_IPI }; -static int ipi_irq_base __read_mostly; +int ipi_irq_base __read_mostly; static int nr_ipi __read_mostly = NR_IPI; static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; @@ -329,7 +329,7 @@ idle_task_exit(); - local_irq_disable(); + local_irq_disable_full(); /* * Flush the data out of the L1 cache for this CPU. This must be @@ -421,6 +421,13 @@ local_flush_tlb_all(); /* + * irq_pipeline: debug_smp_processor_id() accesses percpu + * data. + */ + if (irqs_pipelined()) + set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id())); + + /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ @@ -463,7 +470,7 @@ complete(&cpu_running); - local_irq_enable(); + local_irq_enable_full(); local_fiq_enable(); local_abt_enable(); @@ -539,6 +546,8 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu); + void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; @@ -553,7 +562,7 @@ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); + seq_printf(p, "%10u ", get_ipi_count(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } @@ -606,7 +615,7 @@ set_cpu_online(cpu, false); local_fiq_disable(); - local_irq_disable(); + local_irq_disable_full(); while (1) { cpu_relax(); @@ -695,12 +704,85 @@ { struct pt_regs *old_regs = set_irq_regs(regs); + /* + * We don't support legacy IPI delivery when pipelining + * interrupts. + */ + WARN_ON_ONCE(irqs_pipelined()); + irq_enter(); do_handle_IPI(ipinr); irq_exit(); set_irq_regs(old_regs); } + +static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +#ifdef CONFIG_IRQ_PIPELINE + +static DEFINE_PER_CPU(unsigned long, ipi_messages); + +static DEFINE_PER_CPU(unsigned int [MAX_IPI], ipi_counts); + +static irqreturn_t ipi_handler(int irq, void *data) +{ + unsigned long *pmsg; + unsigned int ipinr; + + /* + * Decode in-band IPIs (0..MAX_IPI - 1) multiplexed over + * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own + * individual handler. + */ + pmsg = raw_cpu_ptr(&ipi_messages); + while (*pmsg) { + ipinr = ffs(*pmsg) - 1; + clear_bit(ipinr, pmsg); + __this_cpu_inc(ipi_counts[ipinr]); + do_handle_IPI(ipinr); + } + + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + unsigned int cpu; + + /* regular in-band IPI (multiplexed over SGI0). */ + for_each_cpu(cpu, target) + set_bit(ipinr, &per_cpu(ipi_messages, cpu)); + + wmb(); + __smp_cross_call(target, 0); +} + +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) +{ + return per_cpu(ipi_counts[irq - ipi_irq_base], cpu); +} + +void irq_send_oob_ipi(unsigned int irq, + const struct cpumask *cpumask) +{ + unsigned int sgi = irq - ipi_irq_base; + + if (WARN_ON(irq_pipeline_debug() && + (sgi < OOB_IPI_OFFSET || + sgi >= OOB_IPI_OFFSET + OOB_NR_IPI))) + return; + + /* Out-of-band IPI (SGI1-2). */ + __smp_cross_call(cpumask, sgi); +} +EXPORT_SYMBOL_GPL(irq_send_oob_ipi); + +#else static irqreturn_t ipi_handler(int irq, void *data) { @@ -710,9 +792,15 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); - __ipi_send_mask(ipi_desc[ipinr], target); + __smp_cross_call(target, ipinr); } + +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) +{ + return kstat_irqs_cpu(irq, cpu); +} + +#endif /* CONFIG_IRQ_PIPELINE */ static void ipi_setup(int cpu) { @@ -727,18 +815,25 @@ void __init set_smp_ipi_range(int ipi_base, int n) { - int i; + int i, inband_nr_ipi; WARN_ON(n < MAX_IPI); nr_ipi = min(n, MAX_IPI); + /* + * irq_pipeline: the in-band stage traps SGI0 only, + * over which IPI messages are mutiplexed. Other SGIs + * are available for exchanging out-of-band IPIs. + */ + inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi; for (i = 0; i < nr_ipi; i++) { - int err; + if (i < inband_nr_ipi) { + int err; - err = request_percpu_irq(ipi_base + i, ipi_handler, - "IPI", &irq_stat); - WARN_ON(err); - + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &irq_stat); + WARN_ON(err); + } ipi_desc[i] = irq_to_desc(ipi_base + i); irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); diff --git a/kernel/arch/arm/kernel/smp_twd.c b/kernel/arch/arm/kernel/smp_twd.c index 9a14f72..8377f1d 100644 --- a/kernel/arch/arm/kernel/smp_twd.c +++ b/kernel/arch/arm/kernel/smp_twd.c @@ -31,7 +31,7 @@ static struct clock_event_device __percpu *twd_evt; static unsigned int twd_features = - CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE; static int twd_ppi; static int twd_shutdown(struct clock_event_device *clk) @@ -182,7 +182,7 @@ struct clock_event_device *evt = dev_id; if (twd_timer_ack()) { - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -279,7 +279,8 @@ goto out_free; } - err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); + err = __request_percpu_irq(twd_ppi, twd_handler, + IRQF_TIMER, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; diff --git a/kernel/arch/arm/kernel/traps.c b/kernel/arch/arm/kernel/traps.c index a531afa..e33ea9c 100644 --- a/kernel/arch/arm/kernel/traps.c +++ b/kernel/arch/arm/kernel/traps.c @@ -406,7 +406,7 @@ #endif static LIST_HEAD(undef_hook); -static DEFINE_RAW_SPINLOCK(undef_lock); +static DEFINE_HARD_SPINLOCK(undef_lock); void register_undef_hook(struct undef_hook *hook) { diff --git a/kernel/arch/arm/kernel/vdso.c b/kernel/arch/arm/kernel/vdso.c index fddd08a..557fb35 100644 --- a/kernel/arch/arm/kernel/vdso.c +++ b/kernel/arch/arm/kernel/vdso.c @@ -32,7 +32,10 @@ extern char vdso_start[], vdso_end[]; -/* Total number of pages needed for the data and text portions of the VDSO. */ +/* + * Total number of pages needed for the data, private and text + * portions of the VDSO. + */ unsigned int vdso_total_pages __ro_after_init; /* @@ -53,8 +56,8 @@ unsigned long new_size = new_vma->vm_end - new_vma->vm_start; unsigned long vdso_size; - /* without VVAR page */ - vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT; + /* without VVAR and VPRIV pages */ + vdso_size = (vdso_total_pages - 2) << PAGE_SHIFT; if (vdso_size != new_size) return -EINVAL; @@ -180,8 +183,10 @@ /* If the virtual counter is absent or non-functional we don't * want programs to incur the slight additional overhead of * dispatching through the VDSO only to fall back to syscalls. + * However, if clocksources supporting generic MMIO access can + * be reached via the vDSO, keep this fast path enabled. */ - if (!cntvct_ok) { + if (!cntvct_ok && !IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO)) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64"); @@ -219,16 +224,26 @@ vdso_text_mapping.pages = vdso_text_pagelist; - vdso_total_pages = 1; /* for the data/vvar page */ + vdso_total_pages = 2; /* for the data/vvar and vpriv pages */ vdso_total_pages += text_pages; cntvct_ok = cntvct_functional(); patch_vdso(vdso_start); +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + vdso_data->cs_type_seq = CLOCKSOURCE_VDSO_NONE << 16 | 1; +#endif return 0; } arch_initcall(vdso_init); + +static int install_vpriv(struct mm_struct *mm, unsigned long addr) +{ + return mmap_region(NULL, addr, PAGE_SIZE, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0, NULL) != addr ? -EINVAL : 0; +} static int install_vvar(struct mm_struct *mm, unsigned long addr) { @@ -237,8 +252,13 @@ vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_MAYREAD, &vdso_data_mapping); + if (IS_ERR(vma)) + return PTR_ERR(vma); - return PTR_ERR_OR_ZERO(vma); + if (cache_is_vivt()) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + return vma->vm_start != addr ? -EINVAL : 0; } /* assumes mmap_lock is write-locked */ @@ -252,18 +272,29 @@ if (vdso_text_pagelist == NULL) return; - if (install_vvar(mm, addr)) + if (install_vpriv(mm, addr)) { + pr_err("cannot map VPRIV at expected address!\n"); return; + } - /* Account for vvar page. */ + /* Account for the private storage. */ addr += PAGE_SIZE; - len = (vdso_total_pages - 1) << PAGE_SHIFT; + if (install_vvar(mm, addr)) { + WARN(1, "cannot map VVAR at expected address!\n"); + return; + } + + /* Account for vvar and vpriv pages. */ + addr += PAGE_SIZE; + len = (vdso_total_pages - 2) << PAGE_SHIFT; vma = _install_special_mapping(mm, addr, len, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &vdso_text_mapping); - if (!IS_ERR(vma)) + if (IS_ERR(vma) || vma->vm_start != addr) + WARN(1, "cannot map VDSO at expected address!\n"); + else mm->context.vdso = addr; } diff --git a/kernel/arch/arm/mach-imx/gpc.c b/kernel/arch/arm/mach-imx/gpc.c index ebc4339..189642e 100644 --- a/kernel/arch/arm/mach-imx/gpc.c +++ b/kernel/arch/arm/mach-imx/gpc.c @@ -62,28 +62,38 @@ void imx_gpc_pre_suspend(bool arm_power_off) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; + unsigned long flags; int i; /* Tell GPC to power off ARM core when suspend */ if (arm_power_off) imx_gpc_set_arm_power_in_lpm(arm_power_off); + flags = hard_cond_local_irq_save(); + for (i = 0; i < IMR_NUM; i++) { gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4); } + + hard_cond_local_irq_restore(flags); } void imx_gpc_post_resume(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; + unsigned long flags; int i; /* Keep ARM core powered on for other low-power modes */ imx_gpc_set_arm_power_in_lpm(false); + flags = hard_cond_local_irq_save(); + for (i = 0; i < IMR_NUM; i++) writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4); + + hard_cond_local_irq_restore(flags); } static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on) @@ -105,21 +115,31 @@ void imx_gpc_mask_all(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; + unsigned long flags; int i; + + flags = hard_cond_local_irq_save(); for (i = 0; i < IMR_NUM; i++) { gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4); writel_relaxed(~0, reg_imr1 + i * 4); } + + hard_cond_local_irq_restore(flags); } void imx_gpc_restore_all(void) { void __iomem *reg_imr1 = gpc_base + GPC_IMR1; + unsigned long flags; int i; + + flags = hard_cond_local_irq_save(); for (i = 0; i < IMR_NUM; i++) writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4); + + hard_cond_local_irq_restore(flags); } void imx_gpc_hwirq_unmask(unsigned int hwirq) @@ -167,6 +187,7 @@ #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, #endif + .flags = IRQCHIP_PIPELINE_SAFE, }; static int imx_gpc_domain_translate(struct irq_domain *d, diff --git a/kernel/arch/arm/mm/alignment.c b/kernel/arch/arm/mm/alignment.c index bcefe3f..6018226 100644 --- a/kernel/arch/arm/mm/alignment.c +++ b/kernel/arch/arm/mm/alignment.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> +#include <linux/dovetail.h> #include <asm/cp15.h> #include <asm/system_info.h> @@ -807,10 +808,12 @@ u16 tinstr = 0; int isize = 4; int thumb2_32b = 0; - int fault; + int fault, ret = 0; if (interrupts_enabled(regs)) - local_irq_enable(); + hard_local_irq_enable(); + + mark_trap_entry(ARM_TRAP_ALIGNMENT, regs); instrptr = instruction_pointer(regs); @@ -938,7 +941,7 @@ if (thumb_mode(regs)) regs->ARM_cpsr = it_advance(regs->ARM_cpsr); - return 0; + goto out; bad_or_fault: if (type == TYPE_ERROR) @@ -947,7 +950,7 @@ * We got a fault - fix it up, or die. */ do_bad_area(addr, fsr, regs); - return 0; + goto out; swp: pr_err("Alignment trap: not handling swp instruction\n"); @@ -961,7 +964,8 @@ isize << 1, isize == 2 ? tinstr : instr, instrptr); ai_skipped += 1; - return 1; + ret = 1; + goto out; user: ai_user += 1; @@ -992,12 +996,15 @@ * entry-common.S) and disable the alignment trap only if * there is no work pending for this thread. */ - raw_local_irq_disable(); + hard_local_irq_disable(); if (!(current_thread_info()->flags & _TIF_WORK_MASK)) set_cr(cr_no_alignment); } - return 0; +out: + mark_trap_exit(ARM_TRAP_ALIGNMENT, regs); + + return ret; } static int __init noalign_setup(char *__unused) diff --git a/kernel/arch/arm/mm/cache-l2x0.c b/kernel/arch/arm/mm/cache-l2x0.c index 43d91bf..b2af3e0 100644 --- a/kernel/arch/arm/mm/cache-l2x0.c +++ b/kernel/arch/arm/mm/cache-l2x0.c @@ -38,7 +38,7 @@ static void __iomem *l2x0_base; static const struct l2c_init_data *l2x0_data; -static DEFINE_RAW_SPINLOCK(l2x0_lock); +static DEFINE_HARD_SPINLOCK(l2x0_lock); static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_size; static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; @@ -47,6 +47,19 @@ static bool l2x0_bresp_disable; static bool l2x0_flz_disable; + +#ifdef CONFIG_IRQ_PIPELINE +#define CACHE_RANGE_ATOMIC_MAX 512UL +static int l2x0_wa = -1; +static int __init l2x0_setup_wa(char *str) +{ + l2x0_wa = !!simple_strtol(str, NULL, 0); + return 0; +} +early_param("l2x0_write_allocate", l2x0_setup_wa); +#else +#define CACHE_RANGE_ATOMIC_MAX 4096UL +#endif /* * Common code for all cache controllers. @@ -120,11 +133,11 @@ l2x0_data->unlock(base, num_lock); - local_irq_save(flags); + flags = hard_local_irq_save(); __l2c_op_way(base + L2X0_INV_WAY); writel_relaxed(0, base + sync_reg_offset); l2c_wait_mask(base + sync_reg_offset, 1); - local_irq_restore(flags); + hard_local_irq_restore(flags); l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); } @@ -225,7 +238,7 @@ { void __iomem *base = l2x0_base; - BUG_ON(!irqs_disabled()); + BUG_ON(!hard_irqs_disabled()); __l2c_op_way(base + L2X0_CLEAN_INV_WAY); __l2c210_cache_sync(base); @@ -284,10 +297,10 @@ static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, unsigned long end, unsigned long flags) { - raw_spinlock_t *lock = &l2x0_lock; + typeof(l2x0_lock) *lock = &l2x0_lock; while (start < end) { - unsigned long blk_end = start + min(end - start, 4096UL); + unsigned long blk_end = start + min(end - start, CACHE_RANGE_ATOMIC_MAX); while (start < blk_end) { l2c_wait_mask(reg, 1); @@ -498,13 +511,13 @@ static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) { - raw_spinlock_t *lock = &l2x0_lock; + typeof(l2x0_lock) *lock = &l2x0_lock; unsigned long flags; void __iomem *base = l2x0_base; raw_spin_lock_irqsave(lock, flags); while (start < end) { - unsigned long blk_end = start + min(end - start, 4096UL); + unsigned long blk_end = start + min(end - start, CACHE_RANGE_ATOMIC_MAX); l2c_set_debug(base, 0x03); while (start < blk_end) { @@ -800,6 +813,24 @@ if (aux_val & aux_mask) pr_alert("L2C: platform provided aux values permit register corruption.\n"); +#ifdef CONFIG_IRQ_PIPELINE + if (!l2x0_wa) { + /* + * Disable WA by setting bit 23 in the auxiliary + * control register. + */ + aux_mask &= ~L220_AUX_CTRL_FWA_MASK; + aux_val &= ~L220_AUX_CTRL_FWA_MASK; + aux_val |= 1 << L220_AUX_CTRL_FWA_SHIFT; + pr_warn("%s: irq_pipeline: write-allocate disabled via command line\n", + data->type); + } else if ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L220 || + ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310 && + (cache_id & L2X0_CACHE_ID_RTL_MASK) < L310_CACHE_ID_RTL_R3P2)) + pr_alert("%s: irq_pipeline: write-allocate enabled, may induce high latency\n", + data->type); +#endif + old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; diff --git a/kernel/arch/arm/mm/context.c b/kernel/arch/arm/mm/context.c index b7525b4..0cf14bd 100644 --- a/kernel/arch/arm/mm/context.c +++ b/kernel/arch/arm/mm/context.c @@ -39,7 +39,7 @@ #define ASID_FIRST_VERSION (1ULL << ASID_BITS) #define NUM_USER_ASIDS ASID_FIRST_VERSION -static DEFINE_RAW_SPINLOCK(cpu_asid_lock); +static DEFINE_HARD_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); @@ -237,8 +237,11 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) { unsigned long flags; - unsigned int cpu = smp_processor_id(); + unsigned int cpu = raw_smp_processor_id(); + bool need_flush; u64 asid; + + WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled()); if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) __check_vmalloc_seq(mm); @@ -263,15 +266,16 @@ atomic64_set(&mm->context.id, asid); } - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { - local_flush_bp_all(); - local_flush_tlb_all(); - } - + need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending); atomic64_set(&per_cpu(active_asids, cpu), asid); cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + if (need_flush) { + local_flush_bp_all(); + local_flush_tlb_all(); + } + switch_mm_fastpath: cpu_switch_mm(mm->pgd, mm); } diff --git a/kernel/arch/arm/mm/fault.c b/kernel/arch/arm/mm/fault.c index efa4020..e23d0ff 100644 --- a/kernel/arch/arm/mm/fault.c +++ b/kernel/arch/arm/mm/fault.c @@ -9,6 +9,7 @@ #include <linux/signal.h> #include <linux/mm.h> #include <linux/hardirq.h> +#include <linux/irq_pipeline.h> #include <linux/init.h> #include <linux/kprobes.h> #include <linux/uaccess.h> @@ -21,10 +22,68 @@ #include <asm/system_misc.h> #include <asm/system_info.h> #include <asm/tlbflush.h> +#include <asm/dovetail.h> +#define CREATE_TRACE_POINTS +#include <asm/trace/exceptions.h> #include "fault.h" #ifdef CONFIG_MMU + +#ifdef CONFIG_IRQ_PIPELINE +/* + * We need to synchronize the virtual interrupt state with the hard + * interrupt state we received on entry, then turn hardirqs back on to + * allow code which does not require strict serialization to be + * preempted by an out-of-band activity. + */ +static inline +unsigned long fault_entry(int exception, struct pt_regs *regs) +{ + unsigned long flags; + + trace_ARM_trap_entry(exception, regs); + + flags = hard_local_save_flags(); + + /* + * The companion core must demote the current context to + * in-band stage if running oob on entry. + */ + mark_trap_entry(exception, regs); + + if (raw_irqs_disabled_flags(flags)) { + stall_inband(); + trace_hardirqs_off(); + } + + hard_local_irq_enable(); + + return flags; +} + +static inline +void fault_exit(int exception, struct pt_regs *regs, + unsigned long flags) +{ + WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()); + + /* + * We expect kentry_exit_pipelined() to clear the stall bit if + * kentry_enter_pipelined() observed it that way. + */ + mark_trap_exit(exception, regs); + trace_ARM_trap_exit(exception, regs); + hard_local_irq_restore(flags); +} + +#else /* !CONFIG_IRQ_PIPELINE */ + +#define fault_entry(__exception, __regs) ({ 0; }) +#define fault_exit(__exception, __regs, __flags) \ + do { (void)(__flags); } while (0) + +#endif /* !CONFIG_IRQ_PIPELINE */ /* * This is useful to dump out the page tables associated with @@ -96,6 +155,15 @@ pr_cont("\n"); } #else /* CONFIG_MMU */ +unsigned long fault_entry(int exception, struct pt_regs *regs) +{ + return 0; +} + +static inline void fault_exit(int exception, struct pt_regs *regs, + unsigned long combo) +{ } + void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) { } #endif /* CONFIG_MMU */ @@ -116,6 +184,7 @@ /* * No handler, we'll have to terminate things with extreme prejudice. */ + irq_pipeline_oops(); bust_spinlocks(1); pr_alert("8<--- cut here ---\n"); pr_alert("Unable to handle kernel %s at virtual address %08lx\n", @@ -168,14 +237,22 @@ { struct task_struct *tsk = current; struct mm_struct *mm = tsk->active_mm; + unsigned long irqflags; /* * If we are in kernel mode at this point, we * have no context to handle this fault with. */ - if (user_mode(regs)) + if (user_mode(regs)) { + irqflags = fault_entry(ARM_TRAP_ACCESS, regs); __do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs); - else + fault_exit(ARM_TRAP_ACCESS, regs, irqflags); + } else + /* + * irq_pipeline: kernel faults are either quickly + * recoverable via fixup, or lethal. In both cases, we + * can skip the interrupt state synchronization. + */ __do_kernel_fault(mm, addr, fsr, regs); } @@ -244,9 +321,12 @@ int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; + unsigned long irqflags; + + irqflags = fault_entry(ARM_TRAP_ACCESS, regs); if (kprobe_page_fault(regs, fsr)) - return 0; + goto out; tsk = current; mm = tsk->mm; @@ -302,7 +382,7 @@ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; - return 0; + goto out; } if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { @@ -318,7 +398,7 @@ * Handle the "normal" case first - VM_FAULT_MAJOR */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) - return 0; + goto out; /* * If we are in kernel mode at this point, we @@ -334,7 +414,7 @@ * got oom-killed) */ pagefault_out_of_memory(); - return 0; + goto out; } if (fault & VM_FAULT_SIGBUS) { @@ -355,10 +435,13 @@ } __do_user_fault(addr, fsr, sig, code, regs); - return 0; + goto out; no_context: __do_kernel_fault(mm, addr, fsr, regs); +out: + fault_exit(ARM_TRAP_ACCESS, regs, irqflags); + return 0; } #else /* CONFIG_MMU */ @@ -396,6 +479,8 @@ p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; + + WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled()); if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); @@ -470,7 +555,11 @@ static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + unsigned long irqflags; + + irqflags = fault_entry(ARM_TRAP_SECTION, regs); do_bad_area(addr, fsr, regs); + fault_exit(ARM_TRAP_SECTION, regs, irqflags); return 0; } #endif /* CONFIG_ARM_LPAE */ @@ -518,10 +607,12 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { const struct fsr_info *inf = fsr_info + fsr_fs(fsr); + unsigned long irqflags; if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; + irqflags = fault_entry(ARM_TRAP_DABT, regs); pr_alert("8<--- cut here ---\n"); pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); @@ -529,6 +620,7 @@ arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, fsr, 0); + fault_exit(ARM_TRAP_DABT, regs, irqflags); } void __init @@ -548,15 +640,18 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); + unsigned long irqflags; if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; + irqflags = fault_entry(ARM_TRAP_PABT, regs); pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, ifsr, 0); + fault_exit(ARM_TRAP_PABT, regs, irqflags); } /* diff --git a/kernel/arch/arm/vdso/datapage.S b/kernel/arch/arm/vdso/datapage.S index 9cd73b7..9beb76d 100644 --- a/kernel/arch/arm/vdso/datapage.S +++ b/kernel/arch/arm/vdso/datapage.S @@ -5,6 +5,8 @@ .align 2 .L_vdso_data_ptr: .long _start - . - VDSO_DATA_SIZE +.L_vdso_priv_ptr: + .long _start - . - VDSO_DATA_SIZE - VDSO_PRIV_SIZE ENTRY(__get_datapage) .fnstart @@ -14,3 +16,12 @@ bx lr .fnend ENDPROC(__get_datapage) + +ENTRY(__get_privpage) + .fnstart + adr r0, .L_vdso_priv_ptr + ldr r1, [r0] + add r0, r0, r1 + bx lr + .fnend +ENDPROC(__get_privpage) diff --git a/kernel/arch/arm/vfp/entry.S b/kernel/arch/arm/vfp/entry.S index 27b0a1f..2e6680c 100644 --- a/kernel/arch/arm/vfp/entry.S +++ b/kernel/arch/arm/vfp/entry.S @@ -23,6 +23,7 @@ @ ENTRY(do_vfp) inc_preempt_count r10, r4 + disable_irq_if_pipelined ldr r4, .LCvfp ldr r11, [r10, #TI_CPU] @ CPU number add r10, r10, #TI_VFPSTATE @ r10 = workspace @@ -30,6 +31,7 @@ ENDPROC(do_vfp) ENTRY(vfp_null_entry) + enable_irq_if_pipelined dec_preempt_count_ti r10, r4 ret lr ENDPROC(vfp_null_entry) diff --git a/kernel/arch/arm/vfp/vfphw.S b/kernel/arch/arm/vfp/vfphw.S index d5837bf..d512f9f 100644 --- a/kernel/arch/arm/vfp/vfphw.S +++ b/kernel/arch/arm/vfp/vfphw.S @@ -170,6 +170,7 @@ @ out before setting an FPEXC that @ stops us reading stuff VFPFMXR FPEXC, r1 @ Restore FPEXC last + enable_irq_if_pipelined sub r2, r2, #4 @ Retry current instruction - if Thumb str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, @ else it's one 32-bit instruction, so @@ -199,6 +200,7 @@ @ Fall into hand on to next handler - appropriate coproc instr @ not recognised by VFP + enable_irq_if_pipelined DBGSTR "not VFP" dec_preempt_count_ti r10, r4 ret lr diff --git a/kernel/arch/arm/vfp/vfpmodule.c b/kernel/arch/arm/vfp/vfpmodule.c index 2cb355c..de099bc 100644 --- a/kernel/arch/arm/vfp/vfpmodule.c +++ b/kernel/arch/arm/vfp/vfpmodule.c @@ -14,10 +14,12 @@ #include <linux/signal.h> #include <linux/sched/signal.h> #include <linux/smp.h> +#include <linux/dovetail.h> #include <linux/init.h> #include <linux/uaccess.h> #include <linux/user.h> #include <linux/export.h> +#include <linux/smp.h> #include <asm/cp15.h> #include <asm/cputype.h> @@ -90,6 +92,7 @@ static void vfp_thread_flush(struct thread_info *thread) { union vfp_state *vfp = &thread->vfpstate; + unsigned long flags; unsigned int cpu; /* @@ -100,11 +103,11 @@ * Do this first to ensure that preemption won't overwrite our * state saving should access to the VFP be enabled at this point. */ - cpu = get_cpu(); + cpu = hard_get_cpu(flags); if (vfp_current_hw_state[cpu] == vfp) vfp_current_hw_state[cpu] = NULL; fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); - put_cpu(); + hard_put_cpu(flags); memset(vfp, 0, sizeof(union vfp_state)); @@ -119,11 +122,12 @@ { /* release case: Per-thread VFP cleanup. */ union vfp_state *vfp = &thread->vfpstate; - unsigned int cpu = get_cpu(); + unsigned long flags; + unsigned int cpu = hard_get_cpu(flags); if (vfp_current_hw_state[cpu] == vfp) vfp_current_hw_state[cpu] = NULL; - put_cpu(); + hard_put_cpu(flags); } static void vfp_thread_copy(struct thread_info *thread) @@ -159,6 +163,7 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) { struct thread_info *thread = v; + unsigned long flags; u32 fpexc; #ifdef CONFIG_SMP unsigned int cpu; @@ -166,6 +171,7 @@ switch (cmd) { case THREAD_NOTIFY_SWITCH: + flags = hard_cond_local_irq_save(); fpexc = fmrx(FPEXC); #ifdef CONFIG_SMP @@ -185,6 +191,7 @@ * old state. */ fmxr(FPEXC, fpexc & ~FPEXC_EN); + hard_cond_local_irq_restore(flags); break; case THREAD_NOTIFY_FLUSH: @@ -248,7 +255,10 @@ if (exceptions == VFP_EXCEPTION_ERROR) { vfp_panic("unhandled bounce", inst); - vfp_raise_sigfpe(FPE_FLTINV, regs); + if (mark_cond_trap_entry(ARM_TRAP_VFP, regs)) { + vfp_raise_sigfpe(FPE_FLTINV, regs); + mark_trap_exit(ARM_TRAP_VFP, regs); + } return; } @@ -322,7 +332,7 @@ */ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) { - u32 fpscr, orig_fpscr, fpsid, exceptions; + u32 fpscr, orig_fpscr, fpsid, exceptions, next_trigger = 0; pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); @@ -352,6 +362,7 @@ /* * Synchronous exception, emulate the trigger instruction */ + hard_cond_local_irq_enable(); goto emulate; } @@ -364,7 +375,18 @@ trigger = fmrx(FPINST); regs->ARM_pc -= 4; #endif - } else if (!(fpexc & FPEXC_DEX)) { + if (fpexc & FPEXC_FP2V) { + /* + * The barrier() here prevents fpinst2 being read + * before the condition above. + */ + barrier(); + next_trigger = fmrx(FPINST2); + } + } + hard_cond_local_irq_enable(); + + if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { /* * Illegal combination of bits. It can be caused by an * unallocated VFP instruction but with FPSCR.IXE set and not @@ -404,18 +426,14 @@ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) goto exit; - /* - * The barrier() here prevents fpinst2 being read - * before the condition above. - */ - barrier(); - trigger = fmrx(FPINST2); + trigger = next_trigger; emulate: exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); if (exceptions) vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); exit: + hard_cond_local_irq_enable(); preempt_enable(); } @@ -515,7 +533,8 @@ */ void vfp_sync_hwstate(struct thread_info *thread) { - unsigned int cpu = get_cpu(); + unsigned long flags; + unsigned int cpu = hard_get_cpu(flags); if (vfp_state_in_hw(cpu, thread)) { u32 fpexc = fmrx(FPEXC); @@ -528,17 +547,18 @@ fmxr(FPEXC, fpexc); } - put_cpu(); + hard_put_cpu(flags); } /* Ensure that the thread reloads the hardware VFP state on the next use. */ void vfp_flush_hwstate(struct thread_info *thread) { - unsigned int cpu = get_cpu(); + unsigned long flags; + unsigned int cpu = hard_get_cpu(flags); vfp_force_reload(cpu, thread); - put_cpu(); + hard_put_cpu(flags); } /* diff --git a/kernel/arch/arm64/Kconfig b/kernel/arch/arm64/Kconfig index 88ac9f0..d453ece 100644 --- a/kernel/arch/arm64/Kconfig +++ b/kernel/arch/arm64/Kconfig @@ -176,6 +176,8 @@ select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS + select HAVE_IRQ_PIPELINE + select HAVE_DOVETAIL select HAVE_IRQ_TIME_ACCOUNTING select HAVE_NMI select HAVE_PATA_PLATFORM @@ -1178,6 +1180,8 @@ config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) +source "kernel/Kconfig.dovetail" + config PARAVIRT bool "Enable paravirtualization code" help diff --git a/kernel/arch/arm64/Makefile b/kernel/arch/arm64/Makefile index b3a57c5..c77b376 100644 --- a/kernel/arch/arm64/Makefile +++ b/kernel/arch/arm64/Makefile @@ -219,3 +219,6 @@ CLEAN_DIRS += out CLEAN_FILES += boot.img kernel.img resource.img zboot.img + +KBUILD_CFLAGS += -I$(srctree)/arch/$(SRCARCH)/xenomai/include -I$(srctree)/arch/$(SRCARCH)/xenomai/dovetail/include -I$(srctree)/include/xenomai +core-$(CONFIG_XENOMAI) += arch/arm64/xenomai/dovetail/ diff --git a/kernel/arch/arm64/boot/dts/broadcom/Makefile b/kernel/arch/arm64/boot/dts/broadcom/Makefile index cb7de8d..7f86784 100644 --- a/kernel/arch/arm64/boot/dts/broadcom/Makefile +++ b/kernel/arch/arm64/boot/dts/broadcom/Makefile @@ -3,6 +3,7 @@ bcm2837-rpi-3-a-plus.dtb \ bcm2837-rpi-3-b.dtb \ bcm2837-rpi-3-b-plus.dtb \ + bcm2837-rpi-3-b-nobt.dtb \ bcm2837-rpi-cm3-io3.dtb subdir-y += northstar2 diff --git a/kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts b/kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts new file mode 100644 index 0000000..43f9d0f --- /dev/null +++ b/kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts @@ -0,0 +1,12 @@ +/dts-v1/; +#include "bcm2837-rpi-3-b.dts" + +&uart0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart0_gpio32>; +}; + +&uart1 { + status = "disabled"; +}; diff --git a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi index 282038e..fc812b5 100755 --- a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi +++ b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi @@ -27,7 +27,7 @@ }; es8316_sound: es8316-sound { - status = "okay"; + status = "disabled"; compatible = "rockchip,multicodecs-card"; rockchip,card-name = "rockchip-es8316"; rockchip,format = "i2s"; @@ -235,7 +235,7 @@ BT,reset_gpio = <&gpio0 RK_PB2 GPIO_ACTIVE_HIGH>; //BT_DISABLE_GPIO0_B2_u_1V8 //BT,wake_gpio = <&gpio3 RK_PA1 GPIO_ACTIVE_HIGH>;//HOST_WAKE_BT_H //BT,wake_host_irq = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;//BT_WAKE_HOST_H - status = "okay"; + status = "disabled"; }; wireless_wlan: wireless-wlan { @@ -245,7 +245,7 @@ // pinctrl-0 = <&wifi_host_wake_irq>; // WIFI,host_wake_irq = <&gpio2 RK_PB5 GPIO_ACTIVE_HIGH>; //GPIO2_B5_u_1V8_WF-BT_WAKEUP_IN // WIFI,poweren_gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>; - status = "okay"; + status = "disabled"; }; ndj_io_init { @@ -253,45 +253,13 @@ pinctrl-names = "default"; pinctrl-0 = <&ndj_io_gpio>; - vcc_12v { - gpio_num = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>; - gpio_function = <0>; - };//VCC12_IO_EN_GPIO0_D3_u_3V3 - - vcc_3v { - gpio_num = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>; - gpio_function = <0>; - };//VCC3_IO_EN_GPIO4_A1_d_3V3 + hub_5V_reset { gpio_num = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; gpio_function = <3>; };//HUB_RESET_GPIO4_B6_d_3V3 - 4g_power { - gpio_num = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>; - gpio_function = <0>; - };//4G_PWREN_GPIO3_C7_u_3V3 - - 5g_power { - gpio_num =<&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>; - gpio_function = <0>; - }; - - wake_wifi_bt { - gpio_num = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>; - gpio_function = <0>; - };//GPIO2_B5_u_1V8_WF-BT_WAKEUP_IN - - air_mode_4g { - gpio_num = <&gpio2 RK_PB4 GPIO_ACTIVE_LOW>; - gpio_function = <0>; - }; //GPIO2_B4_u_1V8_4G_AIR_MODE_IN - - reset_4g { - gpio_num = <&gpio2 RK_PC3 GPIO_ACTIVE_LOW>; - gpio_function = <3>; - }; //GPIO2_C3_d_1V8_4G_RESET_N_IN }; @@ -439,7 +407,7 @@ }; &dsi0_in_vp3 { - status = "okay"; + status = "disabled"; }; /* @@ -472,6 +440,31 @@ //pinctrl-0 = <&lcd_rst_gpio>; }; +&gmac0 { + /* Use rgmii-rxid mode to disable rx delay inside Soc */ + phy-mode = "rgmii-rxid"; + clock_in_out = "output"; + + snps,reset-gpio = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + /* Reset time is 20ms, 100ms for rtl8211f */ + snps,reset-delays-us = <0 20000 100000>; + + pinctrl-names = "default"; + pinctrl-0 = <&gmac0_miim + &gmac0_tx_bus2 + &gmac0_rx_bus2 + &gmac0_rgmii_clk + &gmac0_rgmii_bus + ð0_pins + &gmac0_clkinout>; + tx_delay = <0x44>; + /* rx_delay = <0x4f>; */ + + phy-handle = <&rgmii_phy0>; + status = "okay"; +}; + &gmac1 { /* Use rgmii-rxid mode to disable rx delay inside Soc */ phy-mode = "rgmii-rxid"; @@ -492,7 +485,7 @@ tx_delay = <0x43>; /* rx_delay = <0x3f>; */ - phy-handle = <&rgmii_phy>; + phy-handle = <&rgmii_phy1>; status = "okay"; }; @@ -524,7 +517,7 @@ /* Should work with at least 128MB cma reserved above. */ &hdmirx_ctrler { - status = "okay"; + status = "disabled"; #sound-dai-cells = <1>; /* Effective level used to trigger HPD: 0-low, 1-high */ @@ -824,12 +817,18 @@ }; &mdio1 { - rgmii_phy: phy@1 { + rgmii_phy1: phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; reg = <0x1>; }; }; +&mdio0 { + rgmii_phy0: phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0x1>; + }; +}; &mipi_dcphy1 { @@ -838,7 +837,7 @@ &pcie2x1l2 { phys = <&combphy0_ps PHY_TYPE_PCIE>; - reset-gpios = <&gpio3 RK_PD0 GPIO_ACTIVE_HIGH>;//PCIE20x1_2_RSTn_GPIO3_D0_3V3 + reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;//PCIE20x1_2_RSTn_GPIO3_D0_3V3 vpcie3v3-supply = <&vcc3v3_pcie30>; status = "okay"; };//MINIPCIE @@ -847,7 +846,7 @@ phys = <&combphy2_psu PHY_TYPE_PCIE>; reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;//PCIEX1_1_PERSTn_M1_L vpcie3v3-supply = <&vcc3v3_pcie30>; - status = "disabled"; + status = "okay"; };//M.2 WIFI6 &pcie2x1l0 { @@ -976,20 +975,7 @@ ndj_io_init{ ndj_io_gpio: ndj_io_gpio_col{ rockchip,pins = - <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>, - <4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>, - <4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>, - <0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>, - <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>, - <2 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>, - <4 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>, //vcc_5v - <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>, //SPI0_MISO_M2_1V8 41 - <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>, //SPI4_MISO_M2_1V8 32 - <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>, //SPI0_MOSI_M2_3V3 42 - <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>, //SPI4_MOSI_M2_1V8 33 - <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>, //SPI0_CLK_M2_1V8 43 - <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>, //SPI4_CLK_M2_1V8 34 - <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>, //SPI0_CS0_M2_1V8 44 + <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; //SPI4_CS0_M2_1V8 35 }; }; @@ -1049,7 +1035,7 @@ pinctrl-names = "default"; pinctrl-0 = <&sdiom0_pins>; sd-uhs-sdr104; - status = "okay"; + status = "disabled"; }; &sdmmc { @@ -1109,7 +1095,7 @@ #endif &uart1 { - status = "okay"; + status = "disabled"; // dma-names = "tx", "rx"; //ʹ��dma����ģʽ pinctrl-names = "default"; pinctrl-0 = <&uart1m0_xfer>; @@ -1123,7 +1109,7 @@ &uart4 { - status = "okay"; + status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&uart4m0_xfer>; }; @@ -1141,7 +1127,7 @@ }; &uart7 { - status = "okay"; + status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&uart7m1_xfer>; }; @@ -1154,7 +1140,7 @@ &uart9 { - status = "okay"; + status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&uart9m0_xfer &uart9m0_ctsn>; }; diff --git a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts index e3ba742..7ab118b 100644 --- a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts +++ b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts @@ -95,7 +95,7 @@ #clock-cells = <0x0>; clock-frequency = <0x29d7ab80>; clock-output-names = "spll"; - phandle = <0x1ec>; + phandle = <0x1f4>; }; xin32k { @@ -103,7 +103,7 @@ #clock-cells = <0x0>; clock-frequency = <0x8000>; clock-output-names = "xin32k"; - phandle = <0x1ed>; + phandle = <0x1f5>; }; xin24m { @@ -111,7 +111,7 @@ #clock-cells = <0x0>; clock-frequency = <0x16e3600>; clock-output-names = "xin24m"; - phandle = <0x1ee>; + phandle = <0x1f6>; }; hclk_vo1@fd7c08ec { @@ -131,7 +131,7 @@ clocks = <0x2 0x1bc>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1ef>; + phandle = <0x1f7>; }; hclk_vo0@fd7c08dc { @@ -151,7 +151,7 @@ clocks = <0x2 0x264>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f0>; + phandle = <0x1f8>; }; hclk_nvm@fd7c087c { @@ -181,7 +181,7 @@ clocks = <0x2 0x1e1>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f1>; + phandle = <0x1f9>; }; aclk_isp1_pre@fd7c0868 { @@ -191,7 +191,7 @@ clocks = <0x2 0x1e0>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f2>; + phandle = <0x1fa>; }; aclk_rkvdec0_pre@fd7c08a0 { @@ -201,7 +201,7 @@ clocks = <0x2 0x1bc>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f3>; + phandle = <0x1fb>; }; hclk_rkvdec0_pre@fd7c08a0 { @@ -211,7 +211,7 @@ clocks = <0x2 0x1be>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f4>; + phandle = <0x1fc>; }; aclk_rkvdec1_pre@fd7c08a4 { @@ -221,7 +221,7 @@ clocks = <0x2 0x1bc>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f5>; + phandle = <0x1fd>; }; hclk_rkvdec1_pre@fd7c08a4 { @@ -231,7 +231,7 @@ clocks = <0x2 0x1be>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f6>; + phandle = <0x1fe>; }; aclk_jpeg_decoder_pre@fd7c08b0 { @@ -241,7 +241,7 @@ clocks = <0x2 0x1bc>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f7>; + phandle = <0x1ff>; }; aclk_rkvenc1_pre@fd7c08c0 { @@ -251,7 +251,7 @@ clocks = <0x2 0x1c5>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f8>; + phandle = <0x200>; }; hclk_rkvenc1_pre@fd7c08c0 { @@ -261,7 +261,7 @@ clocks = <0x2 0x1c4>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1f9>; + phandle = <0x201>; }; aclk_hdcp0_pre@fd7c08dc { @@ -271,7 +271,7 @@ clocks = <0x2 0x26c>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1fa>; + phandle = <0x202>; }; aclk_hdcp1_pre@fd7c08ec { @@ -281,7 +281,7 @@ clocks = <0x2 0x263>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1fb>; + phandle = <0x203>; }; pclk_av1_pre@fd7c0910 { @@ -291,7 +291,7 @@ clocks = <0x2 0x1be>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1fc>; + phandle = <0x204>; }; aclk_av1_pre@fd7c0910 { @@ -301,7 +301,7 @@ clocks = <0x2 0x1bc>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1fd>; + phandle = <0x205>; }; hclk_sdio_pre@fd7c092c { @@ -311,7 +311,7 @@ clocks = <0x3>; #power-domain-cells = <0x1>; #clock-cells = <0x0>; - phandle = <0x1fe>; + phandle = <0x206>; }; pclk_vo0_grf@fd7c08dc { @@ -337,7 +337,7 @@ #clock-cells = <0x0>; clock-frequency = <0x0>; clock-output-names = "i2s0_mclkin"; - phandle = <0x1ff>; + phandle = <0x207>; }; mclkin-i2s1 { @@ -345,7 +345,7 @@ #clock-cells = <0x0>; clock-frequency = <0x0>; clock-output-names = "i2s1_mclkin"; - phandle = <0x200>; + phandle = <0x208>; }; mclkin-i2s2 { @@ -353,7 +353,7 @@ #clock-cells = <0x0>; clock-frequency = <0x0>; clock-output-names = "i2s2_mclkin"; - phandle = <0x201>; + phandle = <0x209>; }; mclkin-i2s3 { @@ -361,7 +361,7 @@ #clock-cells = <0x0>; clock-frequency = <0x0>; clock-output-names = "i2s3_mclkin"; - phandle = <0x202>; + phandle = <0x20a>; }; mclkout-i2s0@fd58c318 { @@ -383,7 +383,7 @@ clock-output-names = "i2s1_mclkout_to_io"; rockchip,bit-shift = <0x1>; rockchip,bit-set-to-disable; - phandle = <0x203>; + phandle = <0x20b>; }; mclkout-i2s1@fd58a000 { @@ -393,7 +393,7 @@ #clock-cells = <0x0>; clock-output-names = "i2s1m1_mclkout_to_io"; rockchip,bit-shift = <0x6>; - phandle = <0x204>; + phandle = <0x20c>; }; mclkout-i2s2@fd58c318 { @@ -404,7 +404,7 @@ clock-output-names = "i2s2_mclkout_to_io"; rockchip,bit-shift = <0x2>; rockchip,bit-set-to-disable; - phandle = <0x205>; + phandle = <0x20d>; }; mclkout-i2s3@fd58c318 { @@ -415,7 +415,7 @@ clock-output-names = "i2s3_mclkout_to_io"; rockchip,bit-shift = <0x7>; rockchip,bit-set-to-disable; - phandle = <0x206>; + phandle = <0x20e>; }; }; @@ -1420,7 +1420,7 @@ compatible = "arm,armv8-pmuv3"; interrupts = <0x1 0x7 0x8>; interrupt-affinity = <0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd>; - phandle = <0x207>; + phandle = <0x20f>; }; cpuinfo { @@ -1435,7 +1435,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x208>; + phandle = <0x210>; }; csi2-dcphy1 { @@ -1444,7 +1444,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x209>; + phandle = <0x211>; }; csi2-dphy0 { @@ -1453,7 +1453,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x20a>; + phandle = <0x212>; }; csi2-dphy1 { @@ -1462,7 +1462,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x20b>; + phandle = <0x213>; }; csi2-dphy2 { @@ -1471,7 +1471,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x20c>; + phandle = <0x214>; }; csi2-dphy3 { @@ -1480,7 +1480,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x20d>; + phandle = <0x215>; }; csi2-dphy4 { @@ -1489,7 +1489,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x20e>; + phandle = <0x216>; }; csi2-dphy5 { @@ -1498,7 +1498,7 @@ phys = <0x2f 0x30>; phy-names = "dcphy0", "dcphy1"; status = "disabled"; - phandle = <0x20f>; + phandle = <0x217>; }; display-subsystem { @@ -1506,7 +1506,7 @@ ports = <0x31>; memory-region = <0x32>; memory-region-names = "drm-logo"; - phandle = <0x210>; + phandle = <0x218>; route { @@ -1517,7 +1517,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x33>; - phandle = <0x211>; + phandle = <0x219>; }; route-dsi0 { @@ -1527,7 +1527,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x34>; - phandle = <0x212>; + phandle = <0x21a>; }; route-dsi1 { @@ -1537,7 +1537,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x35>; - phandle = <0x213>; + phandle = <0x21b>; }; route-edp0 { @@ -1547,7 +1547,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x36>; - phandle = <0x214>; + phandle = <0x21c>; }; route-edp1 { @@ -1557,7 +1557,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x37>; - phandle = <0x215>; + phandle = <0x21d>; }; route-hdmi0 { @@ -1567,7 +1567,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x38>; - phandle = <0x216>; + phandle = <0x21e>; }; route-rgb { @@ -1577,7 +1577,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x39>; - phandle = <0x217>; + phandle = <0x21f>; }; route-dp1 { @@ -1587,7 +1587,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x3a>; - phandle = <0x218>; + phandle = <0x220>; }; route-hdmi1 { @@ -1597,7 +1597,7 @@ logo,mode = "center"; charge_logo,mode = "center"; connect = <0x3b>; - phandle = <0x219>; + phandle = <0x221>; }; }; }; @@ -1617,7 +1617,7 @@ status = "okay"; center-supply = <0x3e>; mem-supply = <0x3f>; - phandle = <0x21a>; + phandle = <0x222>; }; dmc-opp-table { @@ -1706,7 +1706,7 @@ arm,smc-id = <0x82000010>; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x21b>; + phandle = <0x223>; protocol@14 { reg = <0x14>; @@ -1726,13 +1726,13 @@ sdei { compatible = "arm,sdei-1.0"; method = "smc"; - phandle = <0x21c>; + phandle = <0x224>; }; optee { compatible = "linaro,optee-tz"; method = "smc"; - phandle = <0x21d>; + phandle = <0x225>; }; }; @@ -1744,49 +1744,49 @@ mipi-dcphy-dummy { status = "disabled"; - phandle = <0x21e>; + phandle = <0x226>; }; mipi0-csi2 { compatible = "rockchip,rk3588-mipi-csi2"; rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>; status = "disabled"; - phandle = <0x21f>; + phandle = <0x227>; }; mipi1-csi2 { compatible = "rockchip,rk3588-mipi-csi2"; rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>; status = "disabled"; - phandle = <0x220>; + phandle = <0x228>; }; mipi2-csi2 { compatible = "rockchip,rk3588-mipi-csi2"; rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>; status = "disabled"; - phandle = <0x221>; + phandle = <0x229>; }; mipi3-csi2 { compatible = "rockchip,rk3588-mipi-csi2"; rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>; status = "disabled"; - phandle = <0x222>; + phandle = <0x22a>; }; mipi4-csi2 { compatible = "rockchip,rk3588-mipi-csi2"; rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>; status = "disabled"; - phandle = <0x223>; + phandle = <0x22b>; }; mipi5-csi2 { compatible = "rockchip,rk3588-mipi-csi2"; rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>; status = "disabled"; - phandle = <0x224>; + phandle = <0x22c>; }; mpp-srv { @@ -1814,7 +1814,7 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4b>; status = "disabled"; - phandle = <0x225>; + phandle = <0x22d>; }; rkcif-mipi-lvds { @@ -1829,28 +1829,28 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4c>; status = "disabled"; - phandle = <0x226>; + phandle = <0x22e>; }; rkcif-mipi-lvds-sditf-vir1 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4c>; status = "disabled"; - phandle = <0x227>; + phandle = <0x22f>; }; rkcif-mipi-lvds-sditf-vir2 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4c>; status = "disabled"; - phandle = <0x228>; + phandle = <0x230>; }; rkcif-mipi-lvds-sditf-vir3 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4c>; status = "disabled"; - phandle = <0x229>; + phandle = <0x231>; }; rkcif-mipi-lvds1 { @@ -1865,28 +1865,28 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4d>; status = "disabled"; - phandle = <0x22a>; + phandle = <0x232>; }; rkcif-mipi-lvds1-sditf-vir1 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4d>; status = "disabled"; - phandle = <0x22b>; + phandle = <0x233>; }; rkcif-mipi-lvds1-sditf-vir2 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4d>; status = "disabled"; - phandle = <0x22c>; + phandle = <0x234>; }; rkcif-mipi-lvds1-sditf-vir3 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4d>; status = "disabled"; - phandle = <0x22d>; + phandle = <0x235>; }; rkcif-mipi-lvds2 { @@ -1901,28 +1901,28 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4e>; status = "disabled"; - phandle = <0x22e>; + phandle = <0x236>; }; rkcif-mipi-lvds2-sditf-vir1 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4e>; status = "disabled"; - phandle = <0x22f>; + phandle = <0x237>; }; rkcif-mipi-lvds2-sditf-vir2 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4e>; status = "disabled"; - phandle = <0x230>; + phandle = <0x238>; }; rkcif-mipi-lvds2-sditf-vir3 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4e>; status = "disabled"; - phandle = <0x231>; + phandle = <0x239>; }; rkcif-mipi-lvds3 { @@ -1937,98 +1937,98 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4f>; status = "disabled"; - phandle = <0x232>; + phandle = <0x23a>; }; rkcif-mipi-lvds3-sditf-vir1 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4f>; status = "disabled"; - phandle = <0x233>; + phandle = <0x23b>; }; rkcif-mipi-lvds3-sditf-vir2 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4f>; status = "disabled"; - phandle = <0x234>; + phandle = <0x23c>; }; rkcif-mipi-lvds3-sditf-vir3 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x4f>; status = "disabled"; - phandle = <0x235>; + phandle = <0x23d>; }; rkisp0-vir0 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x50>; status = "disabled"; - phandle = <0x236>; + phandle = <0x23e>; }; rkisp0-vir1 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x50>; status = "disabled"; - phandle = <0x237>; + phandle = <0x23f>; }; rkisp0-vir2 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x50>; status = "disabled"; - phandle = <0x238>; + phandle = <0x240>; }; rkisp0-vir3 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x50>; status = "disabled"; - phandle = <0x239>; + phandle = <0x241>; }; rkisp1-vir0 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x51>; status = "disabled"; - phandle = <0x23a>; + phandle = <0x242>; }; rkisp1-vir1 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x51>; status = "disabled"; - phandle = <0x23b>; + phandle = <0x243>; }; rkisp1-vir2 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x51>; status = "disabled"; - phandle = <0x23c>; + phandle = <0x244>; }; rkisp1-vir3 { compatible = "rockchip,rkisp-vir"; rockchip,hw = <0x51>; status = "disabled"; - phandle = <0x23d>; + phandle = <0x245>; }; rkispp0-vir0 { compatible = "rockchip,rk3588-rkispp-vir"; rockchip,hw = <0x52>; status = "disabled"; - phandle = <0x23e>; + phandle = <0x246>; }; rkispp1-vir0 { compatible = "rockchip,rk3588-rkispp-vir"; rockchip,hw = <0x53>; status = "disabled"; - phandle = <0x23f>; + phandle = <0x247>; }; rkvenc-ccu { @@ -2043,24 +2043,24 @@ rockchip,sleep-debug-en = <0x1>; rockchip,sleep-mode-config = <0x1000608>; rockchip,wakeup-config = <0x100>; - phandle = <0x240>; + phandle = <0x248>; }; rockchip-system-monitor { compatible = "rockchip,system-monitor"; rockchip,thermal-zone = "soc-thermal"; - phandle = <0x241>; + phandle = <0x249>; }; thermal-zones { - phandle = <0x242>; + phandle = <0x24a>; soc-thermal { polling-delay-passive = <0x14>; polling-delay = <0x3e8>; sustainable-power = <0x834>; thermal-sensors = <0x54 0x0>; - phandle = <0x243>; + phandle = <0x24b>; trips { @@ -2068,7 +2068,7 @@ temperature = <0x124f8>; hysteresis = <0x7d0>; type = "passive"; - phandle = <0x244>; + phandle = <0x24c>; }; trip-point-1 { @@ -2082,7 +2082,7 @@ temperature = <0x1c138>; hysteresis = <0x7d0>; type = "critical"; - phandle = <0x245>; + phandle = <0x24d>; }; }; @@ -2118,42 +2118,42 @@ polling-delay-passive = <0x14>; polling-delay = <0x3e8>; thermal-sensors = <0x54 0x1>; - phandle = <0x246>; + phandle = <0x24e>; }; bigcore1-thermal { polling-delay-passive = <0x14>; polling-delay = <0x3e8>; thermal-sensors = <0x54 0x2>; - phandle = <0x247>; + phandle = <0x24f>; }; littlecore-thermal { polling-delay-passive = <0x14>; polling-delay = <0x3e8>; thermal-sensors = <0x54 0x3>; - phandle = <0x248>; + phandle = <0x250>; }; center-thermal { polling-delay-passive = <0x14>; polling-delay = <0x3e8>; thermal-sensors = <0x54 0x4>; - phandle = <0x249>; + phandle = <0x251>; }; gpu-thermal { polling-delay-passive = <0x14>; polling-delay = <0x3e8>; thermal-sensors = <0x54 0x5>; - phandle = <0x24a>; + phandle = <0x252>; }; npu-thermal { polling-delay-passive = <0x14>; polling-delay = <0x3e8>; thermal-sensors = <0x54 0x6>; - phandle = <0x24b>; + phandle = <0x253>; }; }; @@ -2371,7 +2371,7 @@ #size-cells = <0x2>; ranges; status = "okay"; - phandle = <0x24c>; + phandle = <0x254>; usb@fc000000 { compatible = "snps,dwc3"; @@ -2394,7 +2394,7 @@ snps,parkmode-disable-ss-quirk; quirk-skip-phy-init; status = "okay"; - phandle = <0x24d>; + phandle = <0x255>; }; }; @@ -2409,7 +2409,7 @@ phy-names = "usb2-phy"; power-domains = <0x57 0x1f>; status = "okay"; - phandle = <0x24e>; + phandle = <0x256>; }; usb@fc840000 { @@ -2436,7 +2436,7 @@ phy-names = "usb2-phy"; power-domains = <0x57 0x1f>; status = "okay"; - phandle = <0x24f>; + phandle = <0x257>; }; usb@fc8c0000 { @@ -2459,7 +2459,7 @@ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync"; #iommu-cells = <0x1>; status = "disabled"; - phandle = <0x250>; + phandle = <0x258>; }; iommu@fcb00000 { @@ -2469,7 +2469,7 @@ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync"; #iommu-cells = <0x1>; status = "disabled"; - phandle = <0x251>; + phandle = <0x259>; }; usbhost3_0 { @@ -2480,7 +2480,7 @@ #size-cells = <0x2>; ranges; status = "okay"; - phandle = <0x252>; + phandle = <0x25a>; usb@fcd00000 { compatible = "snps,dwc3"; @@ -2500,14 +2500,14 @@ snps,parkmode-disable-hs-quirk; snps,parkmode-disable-ss-quirk; status = "okay"; - phandle = <0x253>; + phandle = <0x25b>; }; }; syscon@fd588000 { compatible = "rockchip,rk3588-pmu0-grf", "syscon", "simple-mfd"; reg = <0x0 0xfd588000 0x0 0x2000>; - phandle = <0x254>; + phandle = <0x25c>; reboot-mode { compatible = "syscon-reboot-mode"; @@ -2522,7 +2522,7 @@ mode-panic = <0x5242c307>; mode-watchdog = <0x5242c308>; mode-quiescent = <0x5242c30e>; - phandle = <0x255>; + phandle = <0x25d>; }; }; @@ -2542,7 +2542,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x67>; status = "disabled"; - phandle = <0x256>; + phandle = <0x25e>; ports { #address-cells = <0x1>; @@ -2697,7 +2697,7 @@ reg = <0x0 0xfd5d8000 0x0 0x4000>; #address-cells = <0x1>; #size-cells = <0x1>; - phandle = <0x257>; + phandle = <0x25f>; usb2-phy@8000 { compatible = "rockchip,rk3588-usb2phy"; @@ -2726,7 +2726,7 @@ reg = <0x0 0xfd5dc000 0x0 0x4000>; #address-cells = <0x1>; #size-cells = <0x1>; - phandle = <0x258>; + phandle = <0x260>; usb2-phy@c000 { compatible = "rockchip,rk3588-usb2phy"; @@ -2796,7 +2796,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "okay"; - phandle = <0x259>; + phandle = <0x261>; rk8602@42 { compatible = "rockchip,rk8602"; @@ -2849,7 +2849,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x70>; status = "disabled"; - phandle = <0x25a>; + phandle = <0x262>; }; pwm@fd8b0000 { @@ -2862,7 +2862,7 @@ clocks = <0x2 0x2a5 0x2 0x2a4>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x25b>; + phandle = <0x263>; }; pwm@fd8b0010 { @@ -2875,7 +2875,7 @@ clocks = <0x2 0x2a5 0x2 0x2a4>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x25c>; + phandle = <0x264>; }; pwm@fd8b0020 { @@ -2888,7 +2888,7 @@ clocks = <0x2 0x2a5 0x2 0x2a4>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x25d>; + phandle = <0x265>; }; pwm@fd8b0030 { @@ -2901,7 +2901,7 @@ clocks = <0x2 0x2a5 0x2 0x2a4>; clock-names = "pwm", "pclk"; status = "okay"; - phandle = <0x1c5>; + phandle = <0x1ce>; }; power-management@fd8d8000 { @@ -3180,7 +3180,7 @@ status = "okay"; rknpu-supply = <0xa6>; mem-supply = <0xa6>; - phandle = <0x25e>; + phandle = <0x266>; }; npu-opp-table { @@ -3378,7 +3378,7 @@ rockchip,resetgroup-node = <0x0>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x25f>; + phandle = <0x267>; }; vdpu@fdb50400 { @@ -3401,7 +3401,7 @@ rockchip,resetgroup-node = <0x0>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x260>; + phandle = <0x268>; }; iommu@fdb50800 { @@ -3437,7 +3437,7 @@ rockchip,taskqueue-node = <0x0>; rockchip,resetgroup-node = <0x0>; status = "okay"; - phandle = <0x261>; + phandle = <0x269>; }; rga@fdb60000 { @@ -3450,7 +3450,7 @@ power-domains = <0x57 0x16>; iommus = <0xac>; status = "okay"; - phandle = <0x262>; + phandle = <0x26a>; }; iommu@fdb60f00 { @@ -3476,7 +3476,7 @@ power-domains = <0x57 0x1e>; iommus = <0xad>; status = "okay"; - phandle = <0x263>; + phandle = <0x26b>; }; iommu@fdb70f00 { @@ -3501,7 +3501,7 @@ clock-names = "aclk_rga2", "hclk_rga2", "clk_rga2"; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x264>; + phandle = <0x26c>; }; jpegd@fdb90000 { @@ -3522,7 +3522,7 @@ rockchip,taskqueue-node = <0x1>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x265>; + phandle = <0x26d>; }; iommu@fdb90480 { @@ -3558,7 +3558,7 @@ rockchip,ccu = <0xb0>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x266>; + phandle = <0x26e>; }; iommu@fdba0800 { @@ -3594,7 +3594,7 @@ rockchip,ccu = <0xb0>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x267>; + phandle = <0x26f>; }; iommu@fdba4800 { @@ -3630,7 +3630,7 @@ rockchip,ccu = <0xb0>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x268>; + phandle = <0x270>; }; iommu@fdba8800 { @@ -3666,7 +3666,7 @@ rockchip,ccu = <0xb0>; power-domains = <0x57 0x15>; status = "okay"; - phandle = <0x269>; + phandle = <0x271>; }; iommu@fdbac800 { @@ -3701,7 +3701,7 @@ rockchip,taskqueue-node = <0x6>; iommus = <0xb4>; status = "okay"; - phandle = <0x26a>; + phandle = <0x272>; }; iommu@fdbb0800 { @@ -3740,7 +3740,7 @@ status = "okay"; venc-supply = <0xb8>; mem-supply = <0xb8>; - phandle = <0x26b>; + phandle = <0x273>; }; iommu@fdbdf000 { @@ -3782,7 +3782,7 @@ status = "okay"; venc-supply = <0xb8>; mem-supply = <0xb8>; - phandle = <0x26c>; + phandle = <0x274>; }; iommu@fdbef000 { @@ -3862,7 +3862,7 @@ rockchip,rcb-min-width = <0x200>; power-domains = <0x57 0xe>; status = "okay"; - phandle = <0x26d>; + phandle = <0x275>; }; iommu@fdc38700 { @@ -3908,7 +3908,7 @@ rockchip,rcb-min-width = <0x200>; power-domains = <0x57 0xf>; status = "okay"; - phandle = <0x26e>; + phandle = <0x276>; }; iommu@fdc48700 { @@ -3946,7 +3946,7 @@ rockchip,taskqueue-node = <0xb>; power-domains = <0x57 0x17>; status = "disabled"; - phandle = <0x26f>; + phandle = <0x277>; }; iommu@fdca0000 { @@ -3972,7 +3972,7 @@ power-domains = <0x57 0x1c>; iommus = <0xc3>; status = "disabled"; - phandle = <0x270>; + phandle = <0x278>; }; rkisp@fdcb0000 { @@ -4238,7 +4238,7 @@ rockchip,vo1-grf = <0xcc>; rockchip,pmu = <0xcd>; status = "okay"; - phandle = <0x271>; + phandle = <0x279>; ports { #address-cells = <0x1>; @@ -4251,7 +4251,7 @@ reg = <0x0>; rockchip,plane-mask = <0x5>; rockchip,primary-plane = <0x2>; - phandle = <0x272>; + phandle = <0x27a>; endpoint@0 { reg = <0x0>; @@ -4296,7 +4296,7 @@ reg = <0x1>; rockchip,plane-mask = <0xa>; rockchip,primary-plane = <0x3>; - phandle = <0x273>; + phandle = <0x27b>; endpoint@0 { reg = <0x0>; @@ -4343,7 +4343,7 @@ assigned-clock-parents = <0x2 0x4>; rockchip,plane-mask = <0x140>; rockchip,primary-plane = <0x8>; - phandle = <0x274>; + phandle = <0x27c>; endpoint@0 { reg = <0x0>; @@ -4400,7 +4400,7 @@ reg = <0x3>; rockchip,plane-mask = <0x280>; rockchip,primary-plane = <0x9>; - phandle = <0x275>; + phandle = <0x27d>; endpoint@0 { reg = <0x0>; @@ -4450,7 +4450,7 @@ power-domains = <0x57 0x19>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x1cd>; + phandle = <0x1d6>; }; i2s@fddc0000 { @@ -4469,7 +4469,7 @@ rockchip,playback-only; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x276>; + phandle = <0x27e>; }; spdif-tx@fdde0000 { @@ -4485,7 +4485,7 @@ power-domains = <0x57 0x1a>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x277>; + phandle = <0x27f>; }; i2s@fddf0000 { @@ -4506,7 +4506,7 @@ rockchip,playback-only; #sound-dai-cells = <0x0>; status = "okay"; - phandle = <0x1c9>; + phandle = <0x1d2>; }; i2s@fddfc000 { @@ -4525,7 +4525,7 @@ rockchip,capture-only; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x278>; + phandle = <0x280>; }; spdif-rx@fde08000 { @@ -4543,7 +4543,7 @@ reset-names = "spdifrx-m"; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x279>; + phandle = <0x281>; }; dsi@fde20000 { @@ -4561,7 +4561,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x27a>; + phandle = <0x282>; ports { #address-cells = <0x1>; @@ -4571,7 +4571,7 @@ reg = <0x0>; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x27b>; + phandle = <0x283>; endpoint@0 { reg = <0x0>; @@ -4583,7 +4583,7 @@ endpoint@1 { reg = <0x1>; remote-endpoint = <0x34>; - status = "okay"; + status = "disabled"; phandle = <0xe2>; }; }; @@ -4614,11 +4614,11 @@ dsi,lanes = <0x4>; panel-init-sequence = <0x390004ff 0x98810315 0x20100 0x15000202 0x150002 0x3531500 0x204d315 0x20500 0x15000206 0xd150002 0x7081500 0x2080015 0x20900 0x1500020a 0x150002 0xb001500 0x20c0015 0x20d00 0x1500020e 0x150002 0xf281500 0x2102815 0x21100 0x15000212 0x150002 0x13001500 0x2140015 0x21500 0x15000216 0x150002 0x17001500 0x2180015 0x21900 0x1500021a 0x150002 0x1b001500 0x21c0015 0x21d00 0x1500021e 0x40150002 0x1f801500 0x2200615 0x22101 0x15000222 0x150002 0x23001500 0x2240015 0x22500 0x15000226 0x150002 0x27001500 0x2283315 0x22933 0x1500022a 0x150002 0x2b001500 0x22c0015 0x22d00 0x1500022e 0x150002 0x2f001500 0x2300015 0x23100 0x15000232 0x150002 0x33001500 0x2340315 0x23500 0x15000236 0x150002 0x37001500 0x2389615 0x23900 0x1500023a 0x150002 0x3b001500 0x23c0015 0x23d00 0x1500023e 0x150002 0x3f001500 0x2400015 0x24100 0x15000242 0x150002 0x43001500 0x2440015 0x25000 0x15000251 0x23150002 0x52451500 0x2536715 0x25489 0x15000255 0xab150002 0x56011500 0x2572315 0x25845 0x15000259 0x67150002 0x5a891500 0x25bab15 0x25ccd 0x1500025d 0xef150002 0x5e001500 0x25f0815 0x26008 0x15000261 0x6150002 0x62061500 0x2630115 0x26401 0x15000265 0x150002 0x66001500 0x2670215 0x26815 0x15000269 0x15150002 0x6a141500 0x26b1415 0x26c0d 0x1500026d 0xd150002 0x6e0c1500 0x26f0c15 0x2700f 0x15000271 0xf150002 0x720e1500 0x2730e15 0x27402 0x15000275 0x8150002 0x76081500 0x2770615 0x27806 0x15000279 0x1150002 0x7a011500 0x27b0015 0x27c00 0x1500027d 0x2150002 0x7e151500 0x27f1515 0x28014 0x15000281 0x14150002 0x820d1500 0x2830d15 0x2840c 0x15000285 0xc150002 0x860f1500 0x2870f15 0x2880e 0x15000289 0xe150002 0x8a023900 0x4ff9881 0x4150002 0xc53a1500 0x26e2b15 0x26f37 0x1500023a 0x24150002 0x8d1a1500 0x287ba15 0x2b2d1 0x15000288 0xb150002 0x38011500 0x2390015 0x2b502 0x15000231 0x25150002 0x3b983900 0x4ff9881 0x1150002 0x220a1500 0x2310015 0x2533d 0x15000255 0x3d150002 0x50851500 0x2518015 0x26006 0x15000262 0x20150002 0xa0001500 0x2a12115 0x2a235 0x150002a3 0x19150002 0xa41e1500 0x2a53315 0x2a627 0x150002a7 0x26150002 0xa8af1500 0x2a91b15 0x2aa27 0x150002ab 0x8d150002 0xac1a1500 0x2ad1b15 0x2ae50 0x150002af 0x26150002 0xb02b1500 0x2b15415 0x2b25e 0x150002b3 0x23150002 0xc0001500 0x2c12115 0x2c235 0x150002c3 0x19150002 0xc41e1500 0x2c53315 0x2c627 0x150002c7 0x26150002 0xc8af1500 0x2c91b15 0x2ca27 0x150002cb 0x8d150002 0xcc1a1500 0x2cd1b15 0x2ce50 0x150002cf 0x26150002 0xd02b1500 0x2d15415 0x2d25e 0x150002d3 0x23390004 0xff988100 0x15780111 0x15050129>; panel-exit-sequence = <0x5000128 0x5000110>; - phandle = <0x27c>; + phandle = <0x284>; display-timings { native-mode = <0xea>; - phandle = <0x27d>; + phandle = <0x285>; timing0 { clock-frequency = <0x41cdb40>; @@ -4669,7 +4669,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x27e>; + phandle = <0x286>; ports { #address-cells = <0x1>; @@ -4679,7 +4679,7 @@ reg = <0x0>; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x27f>; + phandle = <0x287>; endpoint@0 { reg = <0x0>; @@ -4722,11 +4722,11 @@ panel-init-sequence = [23 00 02 fe 21 23 00 02 04 00 23 00 02 00 64 23 00 02 2a 00 23 00 02 26 64 23 00 02 54 00 23 00 02 50 64 23 00 02 7b 00 23 00 02 77 64 23 00 02 a2 00 23 00 02 9d 64 23 00 02 c9 00 23 00 02 c5 64 23 00 02 01 71 23 00 02 27 71 23 00 02 51 71 23 00 02 78 71 23 00 02 9e 71 23 00 02 c6 71 23 00 02 02 89 23 00 02 28 89 23 00 02 52 89 23 00 02 79 89 23 00 02 9f 89 23 00 02 c7 89 23 00 02 03 9e 23 00 02 29 9e 23 00 02 53 9e 23 00 02 7a 9e 23 00 02 a0 9e 23 00 02 c8 9e 23 00 02 09 00 23 00 02 05 b0 23 00 02 31 00 23 00 02 2b b0 23 00 02 5a 00 23 00 02 55 b0 23 00 02 80 00 23 00 02 7c b0 23 00 02 a7 00 23 00 02 a3 b0 23 00 02 ce 00 23 00 02 ca b0 23 00 02 06 c0 23 00 02 2d c0 23 00 02 56 c0 23 00 02 7d c0 23 00 02 a4 c0 23 00 02 cb c0 23 00 02 07 cf 23 00 02 2f cf 23 00 02 58 cf 23 00 02 7e cf 23 00 02 a5 cf 23 00 02 cc cf 23 00 02 08 dd 23 00 02 30 dd 23 00 02 59 dd 23 00 02 7f dd 23 00 02 a6 dd 23 00 02 cd dd 23 00 02 0e 15 23 00 02 0a e9 23 00 02 36 15 23 00 02 32 e9 23 00 02 5f 15 23 00 02 5b e9 23 00 02 85 15 23 00 02 81 e9 23 00 02 ad 15 23 00 02 a9 e9 23 00 02 d3 15 23 00 02 cf e9 23 00 02 0b 14 23 00 02 33 14 23 00 02 5c 14 23 00 02 82 14 23 00 02 aa 14 23 00 02 d0 14 23 00 02 0c 36 23 00 02 34 36 23 00 02 5d 36 23 00 02 83 36 23 00 02 ab 36 23 00 02 d1 36 23 00 02 0d 6b 23 00 02 35 6b 23 00 02 5e 6b 23 00 02 84 6b 23 00 02 ac 6b 23 00 02 d2 6b 23 00 02 13 5a 23 00 02 0f 94 23 00 02 3b 5a 23 00 02 37 94 23 00 02 64 5a 23 00 02 60 94 23 00 02 8a 5a 23 00 02 86 94 23 00 02 b2 5a 23 00 02 ae 94 23 00 02 d8 5a 23 00 02 d4 94 23 00 02 10 d1 23 00 02 38 d1 23 00 02 61 d1 23 00 02 87 d1 23 00 02 af d1 23 00 02 d5 d1 23 00 02 11 04 23 00 02 39 04 23 00 02 62 04 23 00 02 88 04 23 00 02 b0 04 23 00 02 d6 04 23 00 02 12 05 23 00 02 3a 05 23 00 02 63 05 23 00 02 89 05 23 00 02 b1 05 23 00 02 d7 05 23 00 02 18 aa 23 00 02 14 36 23 00 02 42 aa 23 00 02 3d 36 23 00 02 69 aa 23 00 02 65 36 23 00 02 8f aa 23 00 02 8b 36 23 00 02 b7 aa 23 00 02 b3 36 23 00 02 dd aa 23 00 02 d9 36 23 00 02 15 74 23 00 02 3f 74 23 00 02 66 74 23 00 02 8c 74 23 00 02 b4 74 23 00 02 da 74 23 00 02 16 9f 23 00 02 40 9f 23 00 02 67 9f 23 00 02 8d 9f 23 00 02 b5 9f 23 00 02 db 9f 23 00 02 17 dc 23 00 02 41 dc 23 00 02 68 dc 23 00 02 8e dc 23 00 02 b6 dc 23 00 02 dc dc 23 00 02 1d ff 23 00 02 19 03 23 00 02 47 ff 23 00 02 43 03 23 00 02 6e ff 23 00 02 6a 03 23 00 02 94 ff 23 00 02 90 03 23 00 02 bc ff 23 00 02 b8 03 23 00 02 e2 ff 23 00 02 de 03 23 00 02 1a 35 23 00 02 44 35 23 00 02 6b 35 23 00 02 91 35 23 00 02 b9 35 23 00 02 df 35 23 00 02 1b 45 23 00 02 45 45 23 00 02 6c 45 23 00 02 92 45 23 00 02 ba 45 23 00 02 e0 45 23 00 02 1c 55 23 00 02 46 55 23 00 02 6d 55 23 00 02 93 55 23 00 02 bb 55 23 00 02 e1 55 23 00 02 22 ff 23 00 02 1e 68 23 00 02 4c ff 23 00 02 48 68 23 00 02 73 ff 23 00 02 6f 68 23 00 02 99 ff 23 00 02 95 68 23 00 02 c1 ff 23 00 02 bd 68 23 00 02 e7 ff 23 00 02 e3 68 23 00 02 1f 7e 23 00 02 49 7e 23 00 02 70 7e 23 00 02 96 7e 23 00 02 be 7e 23 00 02 e4 7e 23 00 02 20 97 23 00 02 4a 97 23 00 02 71 97 23 00 02 97 97 23 00 02 bf 97 23 00 02 e5 97 23 00 02 21 b5 23 00 02 4b b5 23 00 02 72 b5 23 00 02 98 b5 23 00 02 c0 b5 23 00 02 e6 b5 23 00 02 25 f0 23 00 02 23 e8 23 00 02 4f f0 23 00 02 4d e8 23 00 02 76 f0 23 00 02 74 e8 23 00 02 9c f0 23 00 02 9a e8 23 00 02 c4 f0 23 00 02 c2 e8 23 00 02 ea f0 23 00 02 e8 e8 23 00 02 24 ff 23 00 02 4e ff 23 00 02 75 ff 23 00 02 9b ff 23 00 02 c3 ff 23 00 02 e9 ff 23 00 02 fe 3d 23 00 02 00 04 23 00 02 fe 23 23 00 02 08 82 23 00 02 0a 00 23 00 02 0b 00 23 00 02 0c 01 23 00 02 16 00 23 00 02 18 02 23 00 02 1b 04 23 00 02 19 04 23 00 02 1c 81 23 00 02 1f 00 23 00 02 20 03 23 00 02 23 04 23 00 02 21 01 23 00 02 54 63 23 00 02 55 54 23 00 02 6e 45 23 00 02 6d 36 23 00 02 fe 3d 23 00 02 55 78 23 00 02 fe 20 23 00 02 26 30 23 00 02 fe 3d 23 00 02 20 71 23 00 02 50 8f 23 00 02 51 8f 23 00 02 fe 00 23 00 02 35 00 05 78 01 11 05 00 01 29]; panel-exit-sequence = <0x5000128 0x5000110>; power-supply = <0xef>; - phandle = <0x280>; + phandle = <0x288>; display-timings { native-mode = <0xf0>; - phandle = <0x281>; + phandle = <0x289>; timing0 { clock-frequency = <0x7de2900>; @@ -4773,7 +4773,7 @@ power-domains = <0x57 0x19>; rockchip,vo-grf = <0xf2>; status = "disabled"; - phandle = <0x282>; + phandle = <0x28a>; }; dp@fde50000 { @@ -4789,7 +4789,7 @@ power-domains = <0x57 0x19>; #sound-dai-cells = <0x1>; status = "disabled"; - phandle = <0x1ce>; + phandle = <0x1d7>; ports { #address-cells = <0x1>; @@ -4826,7 +4826,7 @@ reg = <0x1>; endpoint { - phandle = <0x283>; + phandle = <0x28b>; }; }; }; @@ -4843,7 +4843,7 @@ power-domains = <0x57 0x1a>; rockchip,vo-grf = <0xcc>; status = "disabled"; - phandle = <0x284>; + phandle = <0x28c>; }; hdmi@fde80000 { @@ -4865,7 +4865,7 @@ #sound-dai-cells = <0x0>; status = "okay"; enable-gpios = <0xfc 0x9 0x0>; - phandle = <0x1ca>; + phandle = <0x1d3>; ports { #address-cells = <0x1>; @@ -4875,7 +4875,7 @@ reg = <0x0>; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x285>; + phandle = <0x28d>; endpoint@0 { reg = <0x0>; @@ -4914,7 +4914,7 @@ power-domains = <0x57 0x1a>; rockchip,grf = <0xcc>; status = "disabled"; - phandle = <0x286>; + phandle = <0x28e>; ports { #address-cells = <0x1>; @@ -4951,7 +4951,7 @@ reg = <0x1>; endpoint { - phandle = <0x287>; + phandle = <0x28f>; }; }; }; @@ -5182,7 +5182,7 @@ qos@fdf67200 { compatible = "syscon"; reg = <0x0 0xfdf67200 0x0 0x20>; - phandle = <0x288>; + phandle = <0x290>; }; qos@fdf70000 { @@ -5284,7 +5284,7 @@ status = "okay"; reset-gpios = <0xfc 0x2 0x0>; vpcie3v3-supply = <0x105>; - phandle = <0x289>; + phandle = <0x291>; legacy-interrupt-controller { interrupt-controller; @@ -5324,10 +5324,10 @@ resets = <0x2 0x211 0x2 0x220>; reset-names = "pcie", "periph"; rockchip,pipe-grf = <0x6c>; - status = "disabled"; - reset-gpios = <0x108 0x18 0x0>; + status = "okay"; + reset-gpios = <0x108 0x19 0x0>; vpcie3v3-supply = <0x105>; - phandle = <0x28a>; + phandle = <0x292>; legacy-interrupt-controller { interrupt-controller; @@ -5344,7 +5344,7 @@ reg = <0x0 0xfe1c0000 0x0 0x10000>; rockchip,ethernet = <0x109>; status = "disabled"; - phandle = <0x28b>; + phandle = <0x293>; }; ethernet@fe1c0000 { @@ -5380,7 +5380,7 @@ compatible = "snps,dwmac-mdio"; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x28c>; + phandle = <0x294>; phy@1 { compatible = "ethernet-phy-ieee802.3-c22"; @@ -5423,8 +5423,8 @@ phys = <0x107 0x1>; phy-names = "sata-phy"; ports-implemented = <0x1>; - status = "okay"; - phandle = <0x28d>; + status = "disabled"; + phandle = <0x295>; }; sata@fe230000 { @@ -5438,7 +5438,7 @@ phy-names = "sata-phy"; ports-implemented = <0x1>; status = "okay"; - phandle = <0x28e>; + phandle = <0x296>; }; spi@fe2b0000 { @@ -5452,7 +5452,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x28f>; + phandle = <0x297>; }; mmc@fe2c0000 { @@ -5476,7 +5476,7 @@ sd-uhs-sdr104; vqmmc-supply = <0x117>; vmmc-supply = <0x118>; - phandle = <0x290>; + phandle = <0x298>; }; mmc@fe2d0000 { @@ -5490,7 +5490,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x119>; power-domains = <0x57 0x25>; - status = "okay"; + status = "disabled"; no-sd; no-mmc; bus-width = <0x4>; @@ -5501,7 +5501,7 @@ mmc-pwrseq = <0x11a>; non-removable; sd-uhs-sdr104; - phandle = <0x291>; + phandle = <0x299>; }; mmc@fe2e0000 { @@ -5523,7 +5523,7 @@ mmc-hs400-1_8v; mmc-hs400-enhanced-strobe; full-pwr-cycle-in-suspend; - phandle = <0x292>; + phandle = <0x29a>; }; crypto@fe370000 { @@ -5535,7 +5535,7 @@ resets = <0x11b 0xf>; reset-names = "crypto-rst"; status = "disabled"; - phandle = <0x293>; + phandle = <0x29b>; }; rng@fe378000 { @@ -5547,7 +5547,7 @@ resets = <0x11b 0x30>; reset-names = "reset"; status = "okay"; - phandle = <0x294>; + phandle = <0x29c>; }; i2s@fe470000 { @@ -5570,7 +5570,7 @@ pinctrl-2 = <0x11c 0x11d>; #sound-dai-cells = <0x0>; status = "okay"; - phandle = <0x1d7>; + phandle = <0x1e0>; }; i2s@fe480000 { @@ -5588,7 +5588,7 @@ pinctrl-0 = <0x121 0x122 0x123 0x124 0x125 0x126 0x127 0x128 0x129 0x12a>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x295>; + phandle = <0x29d>; }; i2s@fe490000 { @@ -5610,7 +5610,7 @@ #sound-dai-cells = <0x0>; status = "disabled"; rockchip,bclk-fs = <0x20>; - phandle = <0x1c7>; + phandle = <0x1d0>; }; i2s@fe4a0000 { @@ -5631,7 +5631,7 @@ pinctrl-2 = <0x135 0x136>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x296>; + phandle = <0x29e>; }; pdm@fe4b0000 { @@ -5647,7 +5647,7 @@ pinctrl-2 = <0x13c 0x13d>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x297>; + phandle = <0x29f>; }; pdm@fe4c0000 { @@ -5666,7 +5666,7 @@ pinctrl-2 = <0x143 0x144>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x298>; + phandle = <0x2a0>; }; vad@fe4d0000 { @@ -5681,7 +5681,7 @@ rockchip,mode = <0x0>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x299>; + phandle = <0x2a1>; }; spdif-tx@fe4e0000 { @@ -5699,7 +5699,7 @@ pinctrl-0 = <0x145>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x1d1>; + phandle = <0x1da>; }; spdif-tx@fe4f0000 { @@ -5717,7 +5717,7 @@ pinctrl-0 = <0x146>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x1d3>; + phandle = <0x1dc>; }; codec-digital@fe500000 { @@ -5734,14 +5734,14 @@ pinctrl-0 = <0x147>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x29a>; + phandle = <0x2a2>; }; hwspinlock@fe5a0000 { compatible = "rockchip,hwspinlock"; reg = <0x0 0xfe5a0000 0x0 0x100>; #hwlock-cells = <0x1>; - phandle = <0x29b>; + phandle = <0x2a3>; }; interrupt-controller@fe600000 { @@ -5807,7 +5807,7 @@ tx-fifo-depth = <0x1>; rx-fifo-depth = <0x6>; status = "okay"; - phandle = <0x29c>; + phandle = <0x2a4>; }; can@fea60000 { @@ -5823,7 +5823,7 @@ tx-fifo-depth = <0x1>; rx-fifo-depth = <0x6>; status = "okay"; - phandle = <0x29d>; + phandle = <0x2a5>; }; can@fea70000 { @@ -5839,7 +5839,7 @@ tx-fifo-depth = <0x1>; rx-fifo-depth = <0x6>; status = "disabled"; - phandle = <0x29e>; + phandle = <0x2a6>; }; decompress@fea80000 { @@ -5851,7 +5851,7 @@ resets = <0x2 0x118>; reset-names = "dresetn"; status = "disabled"; - phandle = <0x29f>; + phandle = <0x2a7>; }; i2c@fea90000 { @@ -5865,7 +5865,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "okay"; - phandle = <0x2a0>; + phandle = <0x2a8>; rk8602@42 { compatible = "rockchip,rk8602"; @@ -5898,7 +5898,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x2a1>; + phandle = <0x2a9>; }; i2c@feab0000 { @@ -5912,7 +5912,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "okay"; - phandle = <0x2a2>; + phandle = <0x2aa>; es8316@10 { compatible = "everest,es8316"; @@ -5922,7 +5922,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x14f>; #sound-dai-cells = <0x0>; - phandle = <0x1d8>; + phandle = <0x1e1>; }; }; @@ -5937,7 +5937,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x2a3>; + phandle = <0x2ab>; light@47 { compatible = "ls_stk3332"; @@ -5949,7 +5949,7 @@ als_threshold_low = <0xa>; als_ctrl_gain = <0x2>; poll_delay_ms = <0x64>; - phandle = <0x2a4>; + phandle = <0x2ac>; }; proximity@47 { @@ -5962,7 +5962,7 @@ ps_ctrl_gain = <0x3>; ps_led_current = <0x4>; poll_delay_ms = <0x64>; - phandle = <0x2a5>; + phandle = <0x2ad>; }; icm_acc@68 { @@ -5974,7 +5974,7 @@ poll_delay_ms = <0x1e>; type = <0x2>; layout = <0x0>; - phandle = <0x2a6>; + phandle = <0x2ae>; }; icm_gyro@68 { @@ -5984,7 +5984,7 @@ poll_delay_ms = <0x1e>; type = <0x4>; layout = <0x0>; - phandle = <0x2a7>; + phandle = <0x2af>; }; }; @@ -5999,7 +5999,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x2a8>; + phandle = <0x2b0>; gt1x@14 { compatible = "goodix,gt1x"; @@ -6009,7 +6009,7 @@ goodix,rst-gpio = <0x108 0x11 0x0>; goodix,irq-gpio = <0x108 0x10 0x8>; power-supply = <0xef>; - phandle = <0x2a9>; + phandle = <0x2b1>; }; }; @@ -6019,7 +6019,7 @@ interrupts = <0x0 0x121 0x4>; clocks = <0x2 0x5c 0x2 0x5f>; clock-names = "pclk", "timer"; - phandle = <0x2aa>; + phandle = <0x2b2>; }; watchdog@feaf0000 { @@ -6029,7 +6029,7 @@ clock-names = "tclk", "pclk"; interrupts = <0x0 0x13b 0x4>; status = "disabled"; - phandle = <0x2ab>; + phandle = <0x2b3>; }; spi@feb00000 { @@ -6046,7 +6046,7 @@ pinctrl-0 = <0x153 0x154 0x155>; num-cs = <0x2>; status = "disabled"; - phandle = <0x2ac>; + phandle = <0x2b4>; }; spi@feb10000 { @@ -6063,7 +6063,7 @@ pinctrl-0 = <0x156 0x157 0x158>; num-cs = <0x2>; status = "disabled"; - phandle = <0x2ad>; + phandle = <0x2b5>; }; spi@feb20000 { @@ -6082,7 +6082,7 @@ status = "okay"; assigned-clocks = <0x2 0xa5>; assigned-clock-rates = <0xbebc200>; - phandle = <0x2ae>; + phandle = <0x2b6>; rk806single@0 { compatible = "rockchip,rk806"; @@ -6113,7 +6113,7 @@ vcc13-supply = <0x162>; vcc14-supply = <0x162>; vcca-supply = <0x6e>; - phandle = <0x2af>; + phandle = <0x2b7>; pwrkey { status = "okay"; @@ -6122,7 +6122,7 @@ pinctrl_rk806 { gpio-controller; #gpio-cells = <0x2>; - phandle = <0x2b0>; + phandle = <0x2b8>; rk806_dvs1_null { pins = "gpio_pwrctrl2"; @@ -6133,7 +6133,7 @@ rk806_dvs1_slp { pins = "gpio_pwrctrl1"; function = "pin_fun1"; - phandle = <0x2b1>; + phandle = <0x2b9>; }; rk806_dvs1_pwrdn { @@ -6145,7 +6145,7 @@ rk806_dvs1_rst { pins = "gpio_pwrctrl1"; function = "pin_fun3"; - phandle = <0x2b2>; + phandle = <0x2ba>; }; rk806_dvs2_null { @@ -6157,31 +6157,31 @@ rk806_dvs2_slp { pins = "gpio_pwrctrl2"; function = "pin_fun1"; - phandle = <0x2b3>; + phandle = <0x2bb>; }; rk806_dvs2_pwrdn { pins = "gpio_pwrctrl2"; function = "pin_fun2"; - phandle = <0x2b4>; + phandle = <0x2bc>; }; rk806_dvs2_rst { pins = "gpio_pwrctrl2"; function = "pin_fun3"; - phandle = <0x2b5>; + phandle = <0x2bd>; }; rk806_dvs2_dvs { pins = "gpio_pwrctrl2"; function = "pin_fun4"; - phandle = <0x2b6>; + phandle = <0x2be>; }; rk806_dvs2_gpio { pins = "gpio_pwrctrl2"; function = "pin_fun5"; - phandle = <0x2b7>; + phandle = <0x2bf>; }; rk806_dvs3_null { @@ -6193,31 +6193,31 @@ rk806_dvs3_slp { pins = "gpio_pwrctrl3"; function = "pin_fun1"; - phandle = <0x2b8>; + phandle = <0x2c0>; }; rk806_dvs3_pwrdn { pins = "gpio_pwrctrl3"; function = "pin_fun2"; - phandle = <0x2b9>; + phandle = <0x2c1>; }; rk806_dvs3_rst { pins = "gpio_pwrctrl3"; function = "pin_fun3"; - phandle = <0x2ba>; + phandle = <0x2c2>; }; rk806_dvs3_dvs { pins = "gpio_pwrctrl3"; function = "pin_fun4"; - phandle = <0x2bb>; + phandle = <0x2c3>; }; rk806_dvs3_gpio { pins = "gpio_pwrctrl3"; function = "pin_fun5"; - phandle = <0x2bc>; + phandle = <0x2c4>; }; }; @@ -6300,7 +6300,7 @@ regulator-always-on; regulator-boot-on; regulator-name = "vdd2_ddr_s3"; - phandle = <0x2bd>; + phandle = <0x2c5>; regulator-state-mem { regulator-on-in-suspend; @@ -6327,7 +6327,7 @@ regulator-min-microvolt = <0x325aa0>; regulator-max-microvolt = <0x325aa0>; regulator-name = "vcc_3v3_s3"; - phandle = <0x2be>; + phandle = <0x2c6>; regulator-state-mem { regulator-on-in-suspend; @@ -6339,7 +6339,7 @@ regulator-always-on; regulator-boot-on; regulator-name = "vddq_ddr_s0"; - phandle = <0x2bf>; + phandle = <0x2c7>; regulator-state-mem { regulator-off-in-suspend; @@ -6352,7 +6352,7 @@ regulator-min-microvolt = <0x1b7740>; regulator-max-microvolt = <0x1b7740>; regulator-name = "vcc_1v8_s3"; - phandle = <0x2c0>; + phandle = <0x2c8>; regulator-state-mem { regulator-on-in-suspend; @@ -6366,7 +6366,7 @@ regulator-min-microvolt = <0x1b7740>; regulator-max-microvolt = <0x1b7740>; regulator-name = "avcc_1v8_s0"; - phandle = <0x1dd>; + phandle = <0x1e6>; regulator-state-mem { regulator-off-in-suspend; @@ -6393,7 +6393,7 @@ regulator-min-microvolt = <0x124f80>; regulator-max-microvolt = <0x124f80>; regulator-name = "avdd_1v2_s0"; - phandle = <0x2c1>; + phandle = <0x2c9>; regulator-state-mem { regulator-off-in-suspend; @@ -6406,7 +6406,7 @@ regulator-min-microvolt = <0x325aa0>; regulator-max-microvolt = <0x325aa0>; regulator-name = "vcc_3v3_s0"; - phandle = <0x2c2>; + phandle = <0x2ca>; regulator-state-mem { regulator-off-in-suspend; @@ -6432,7 +6432,7 @@ regulator-min-microvolt = <0x1b7740>; regulator-max-microvolt = <0x1b7740>; regulator-name = "pldo6_s3"; - phandle = <0x2c3>; + phandle = <0x2cb>; regulator-state-mem { regulator-on-in-suspend; @@ -6446,7 +6446,7 @@ regulator-min-microvolt = <0xb71b0>; regulator-max-microvolt = <0xb71b0>; regulator-name = "vdd_0v75_s3"; - phandle = <0x2c4>; + phandle = <0x2cc>; regulator-state-mem { regulator-on-in-suspend; @@ -6460,7 +6460,7 @@ regulator-min-microvolt = <0xcf850>; regulator-max-microvolt = <0xcf850>; regulator-name = "vdd_ddr_pll_s0"; - phandle = <0x2c5>; + phandle = <0x2cd>; regulator-state-mem { regulator-off-in-suspend; @@ -6474,7 +6474,7 @@ regulator-min-microvolt = <0xcc77c>; regulator-max-microvolt = <0xcc77c>; regulator-name = "avdd_0v75_s0"; - phandle = <0x1de>; + phandle = <0x1e7>; regulator-state-mem { regulator-off-in-suspend; @@ -6487,7 +6487,7 @@ regulator-min-microvolt = <0xcf850>; regulator-max-microvolt = <0xcf850>; regulator-name = "vdd_0v85_s0"; - phandle = <0x1dc>; + phandle = <0x1e5>; regulator-state-mem { regulator-off-in-suspend; @@ -6500,7 +6500,7 @@ regulator-min-microvolt = <0xb71b0>; regulator-max-microvolt = <0xb71b0>; regulator-name = "vdd_0v75_s0"; - phandle = <0x2c6>; + phandle = <0x2ce>; regulator-state-mem { regulator-off-in-suspend; @@ -6524,7 +6524,7 @@ pinctrl-0 = <0x163 0x164 0x165>; num-cs = <0x2>; status = "disabled"; - phandle = <0x2c7>; + phandle = <0x2cf>; }; serial@feb40000 { @@ -6538,8 +6538,8 @@ dmas = <0x6f 0x8 0x6f 0x9>; pinctrl-names = "default"; pinctrl-0 = <0x166>; - status = "okay"; - phandle = <0x2c8>; + status = "disabled"; + phandle = <0x2d0>; }; serial@feb50000 { @@ -6554,7 +6554,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x167>; status = "disabled"; - phandle = <0x2c9>; + phandle = <0x2d1>; }; serial@feb60000 { @@ -6569,7 +6569,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x168>; status = "disabled"; - phandle = <0x2ca>; + phandle = <0x2d2>; }; serial@feb70000 { @@ -6583,8 +6583,8 @@ dmas = <0xe5 0x9 0xe5 0xa>; pinctrl-names = "default"; pinctrl-0 = <0x169>; - status = "okay"; - phandle = <0x2cb>; + status = "disabled"; + phandle = <0x2d3>; }; serial@feb80000 { @@ -6599,7 +6599,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x16a>; status = "okay"; - phandle = <0x2cc>; + phandle = <0x2d4>; }; serial@feb90000 { @@ -6614,7 +6614,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x16b>; status = "okay"; - phandle = <0x2cd>; + phandle = <0x2d5>; }; serial@feba0000 { @@ -6628,8 +6628,8 @@ dmas = <0xe6 0x7 0xe6 0x8>; pinctrl-names = "default"; pinctrl-0 = <0x16c>; - status = "okay"; - phandle = <0x2ce>; + status = "disabled"; + phandle = <0x2d6>; }; serial@febb0000 { @@ -6644,7 +6644,7 @@ pinctrl-names = "default"; pinctrl-0 = <0x16d>; status = "disabled"; - phandle = <0x2cf>; + phandle = <0x2d7>; }; serial@febc0000 { @@ -6658,8 +6658,8 @@ dmas = <0xe6 0xb 0xe6 0xc>; pinctrl-names = "default"; pinctrl-0 = <0x16e 0x16f>; - status = "okay"; - phandle = <0x2d0>; + status = "disabled"; + phandle = <0x2d8>; }; pwm@febd0000 { @@ -6672,7 +6672,7 @@ clocks = <0x2 0x54 0x2 0x53>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d1>; + phandle = <0x2d9>; }; pwm@febd0010 { @@ -6685,7 +6685,7 @@ clocks = <0x2 0x54 0x2 0x53>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d2>; + phandle = <0x2da>; }; pwm@febd0020 { @@ -6698,7 +6698,7 @@ clocks = <0x2 0x54 0x2 0x53>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d3>; + phandle = <0x2db>; }; pwm@febd0030 { @@ -6711,7 +6711,7 @@ clocks = <0x2 0x54 0x2 0x53>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d4>; + phandle = <0x2dc>; }; pwm@febe0000 { @@ -6724,7 +6724,7 @@ clocks = <0x2 0x57 0x2 0x56>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d5>; + phandle = <0x2dd>; }; pwm@febe0010 { @@ -6737,7 +6737,7 @@ clocks = <0x2 0x57 0x2 0x56>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d6>; + phandle = <0x2de>; }; pwm@febe0020 { @@ -6750,7 +6750,7 @@ clocks = <0x2 0x57 0x2 0x56>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d7>; + phandle = <0x2df>; }; pwm@febe0030 { @@ -6763,7 +6763,7 @@ clocks = <0x2 0x57 0x2 0x56>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x1c6>; + phandle = <0x1cf>; }; pwm@febf0000 { @@ -6776,7 +6776,7 @@ clocks = <0x2 0x5a 0x2 0x59>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d8>; + phandle = <0x2e0>; }; pwm@febf0010 { @@ -6789,7 +6789,7 @@ clocks = <0x2 0x5a 0x2 0x59>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2d9>; + phandle = <0x2e1>; }; pwm@febf0020 { @@ -6802,7 +6802,7 @@ clocks = <0x2 0x5a 0x2 0x59>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2da>; + phandle = <0x2e2>; }; pwm@febf0030 { @@ -6815,7 +6815,7 @@ clocks = <0x2 0x5a 0x2 0x59>; clock-names = "pwm", "pclk"; status = "disabled"; - phandle = <0x2db>; + phandle = <0x2e3>; }; tsadc@fec00000 { @@ -6850,7 +6850,7 @@ reset-names = "saradc-apb"; status = "okay"; vref-supply = <0x17e>; - phandle = <0x1c4>; + phandle = <0x1cd>; }; mailbox@fec60000 { @@ -6861,7 +6861,7 @@ clock-names = "pclk_mailbox"; #mbox-cells = <0x1>; status = "disabled"; - phandle = <0x2dc>; + phandle = <0x2e4>; }; mailbox@fec70000 { @@ -6872,7 +6872,7 @@ clock-names = "pclk_mailbox"; #mbox-cells = <0x1>; status = "disabled"; - phandle = <0x2dd>; + phandle = <0x2e5>; }; i2c@fec80000 { @@ -6886,7 +6886,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "okay"; - phandle = <0x2de>; + phandle = <0x2e6>; nkmcu@15 { compatible = "nk_mcu"; @@ -6904,7 +6904,7 @@ interrupt-parent = <0x15b>; interrupts = <0x8 0x8>; wakeup-source; - phandle = <0x1df>; + phandle = <0x1e8>; }; eeprom@50 { @@ -6929,7 +6929,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x2df>; + phandle = <0x2e7>; }; i2c@feca0000 { @@ -6943,7 +6943,7 @@ #address-cells = <0x1>; #size-cells = <0x0>; status = "disabled"; - phandle = <0x2e0>; + phandle = <0x2e8>; }; spi@fecb0000 { @@ -6960,7 +6960,7 @@ pinctrl-0 = <0x183 0x184 0x185>; num-cs = <0x2>; status = "disabled"; - phandle = <0x2e1>; + phandle = <0x2e9>; }; otp@fecc0000 { @@ -6972,7 +6972,7 @@ clock-names = "otpc", "apb", "arb", "phy"; resets = <0x2 0x12a 0x2 0x129 0x2 0x12b>; reset-names = "otpc", "apb", "arb"; - phandle = <0x2e2>; + phandle = <0x2ea>; cpu-code@2 { reg = <0x2 0x2>; @@ -7075,7 +7075,7 @@ vop-opp-info@61 { reg = <0x61 0x6>; - phandle = <0x2e3>; + phandle = <0x2eb>; }; venc-opp-info@67 { @@ -7092,7 +7092,7 @@ clock-names = "pclk_mailbox"; #mbox-cells = <0x1>; status = "disabled"; - phandle = <0x2e4>; + phandle = <0x2ec>; }; dma-controller@fed10000 { @@ -7150,7 +7150,7 @@ resets = <0x2 0x28 0x2 0x29 0x2 0x2a 0x2 0x2b 0x2 0x482>; reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb"; status = "okay"; - phandle = <0x2e5>; + phandle = <0x2ed>; dp-port { #phy-cells = <0x0>; @@ -7256,7 +7256,7 @@ #address-cells = <0x1>; #size-cells = <0x1>; ranges = <0x0 0x0 0xff001000 0xef000>; - phandle = <0x2e6>; + phandle = <0x2ee>; rkvdec-sram@0 { reg = <0x0 0x78000>; @@ -7313,7 +7313,7 @@ gpio-ranges = <0x191 0x0 0x40 0x20>; interrupt-controller; #interrupt-cells = <0x2>; - phandle = <0x1e8>; + phandle = <0x1be>; }; gpio@fec40000 { @@ -7360,13 +7360,13 @@ pcfg-pull-none-drv-level-0 { bias-disable; drive-strength = <0x0>; - phandle = <0x2e7>; + phandle = <0x2ef>; }; pcfg-pull-none-drv-level-1 { bias-disable; drive-strength = <0x1>; - phandle = <0x2e8>; + phandle = <0x2f0>; }; pcfg-pull-none-drv-level-2 { @@ -7378,31 +7378,31 @@ pcfg-pull-none-drv-level-3 { bias-disable; drive-strength = <0x3>; - phandle = <0x2e9>; + phandle = <0x2f1>; }; pcfg-pull-none-drv-level-4 { bias-disable; drive-strength = <0x4>; - phandle = <0x2ea>; + phandle = <0x2f2>; }; pcfg-pull-none-drv-level-5 { bias-disable; drive-strength = <0x5>; - phandle = <0x2eb>; + phandle = <0x2f3>; }; pcfg-pull-none-drv-level-6 { bias-disable; drive-strength = <0x6>; - phandle = <0x2ec>; + phandle = <0x2f4>; }; pcfg-pull-up-drv-level-0 { bias-pull-up; drive-strength = <0x0>; - phandle = <0x2ed>; + phandle = <0x2f5>; }; pcfg-pull-up-drv-level-1 { @@ -7420,19 +7420,19 @@ pcfg-pull-up-drv-level-3 { bias-pull-up; drive-strength = <0x3>; - phandle = <0x2ee>; + phandle = <0x2f6>; }; pcfg-pull-up-drv-level-4 { bias-pull-up; drive-strength = <0x4>; - phandle = <0x2ef>; + phandle = <0x2f7>; }; pcfg-pull-up-drv-level-5 { bias-pull-up; drive-strength = <0x5>; - phandle = <0x2f0>; + phandle = <0x2f8>; }; pcfg-pull-up-drv-level-6 { @@ -7444,55 +7444,55 @@ pcfg-pull-down-drv-level-0 { bias-pull-down; drive-strength = <0x0>; - phandle = <0x2f1>; + phandle = <0x2f9>; }; pcfg-pull-down-drv-level-1 { bias-pull-down; drive-strength = <0x1>; - phandle = <0x2f2>; + phandle = <0x2fa>; }; pcfg-pull-down-drv-level-2 { bias-pull-down; drive-strength = <0x2>; - phandle = <0x2f3>; + phandle = <0x2fb>; }; pcfg-pull-down-drv-level-3 { bias-pull-down; drive-strength = <0x3>; - phandle = <0x2f4>; + phandle = <0x2fc>; }; pcfg-pull-down-drv-level-4 { bias-pull-down; drive-strength = <0x4>; - phandle = <0x2f5>; + phandle = <0x2fd>; }; pcfg-pull-down-drv-level-5 { bias-pull-down; drive-strength = <0x5>; - phandle = <0x2f6>; + phandle = <0x2fe>; }; pcfg-pull-down-drv-level-6 { bias-pull-down; drive-strength = <0x6>; - phandle = <0x2f7>; + phandle = <0x2ff>; }; pcfg-pull-up-smt { bias-pull-up; input-schmitt-enable; - phandle = <0x2f8>; + phandle = <0x300>; }; pcfg-pull-down-smt { bias-pull-down; input-schmitt-enable; - phandle = <0x2f9>; + phandle = <0x301>; }; pcfg-pull-none-smt { @@ -7505,7 +7505,7 @@ bias-disable; drive-strength = <0x0>; input-schmitt-enable; - phandle = <0x2fa>; + phandle = <0x302>; }; pcfg-pull-none-drv-level-1-smt { @@ -7519,21 +7519,21 @@ bias-disable; drive-strength = <0x2>; input-schmitt-enable; - phandle = <0x2fb>; + phandle = <0x303>; }; pcfg-pull-none-drv-level-3-smt { bias-disable; drive-strength = <0x3>; input-schmitt-enable; - phandle = <0x2fc>; + phandle = <0x304>; }; pcfg-pull-none-drv-level-4-smt { bias-disable; drive-strength = <0x4>; input-schmitt-enable; - phandle = <0x2fd>; + phandle = <0x305>; }; pcfg-pull-none-drv-level-5-smt { @@ -7547,53 +7547,53 @@ bias-disable; drive-strength = <0x6>; input-schmitt-enable; - phandle = <0x2fe>; + phandle = <0x306>; }; pcfg-output-high { output-high; - phandle = <0x2ff>; + phandle = <0x307>; }; pcfg-output-high-pull-up { output-high; bias-pull-up; - phandle = <0x300>; + phandle = <0x308>; }; pcfg-output-high-pull-down { output-high; bias-pull-down; - phandle = <0x301>; + phandle = <0x309>; }; pcfg-output-high-pull-none { output-high; bias-disable; - phandle = <0x302>; + phandle = <0x30a>; }; pcfg-output-low { output-low; - phandle = <0x303>; + phandle = <0x30b>; }; pcfg-output-low-pull-up { output-low; bias-pull-up; - phandle = <0x304>; + phandle = <0x30c>; }; pcfg-output-low-pull-down { output-low; bias-pull-down; - phandle = <0x305>; + phandle = <0x30d>; }; pcfg-output-low-pull-none { output-low; bias-disable; - phandle = <0x306>; + phandle = <0x30e>; }; auddsm { @@ -7621,7 +7621,7 @@ can0m1-pins { rockchip,pins = <0x4 0x1d 0x9 0x192 0x4 0x1c 0x9 0x192>; - phandle = <0x307>; + phandle = <0x30f>; }; }; @@ -7629,7 +7629,7 @@ can1m0-pins { rockchip,pins = <0x3 0xd 0x9 0x192 0x3 0xe 0x9 0x192>; - phandle = <0x308>; + phandle = <0x310>; }; can1m1-pins { @@ -7647,7 +7647,7 @@ can2m1-pins { rockchip,pins = <0x0 0x1c 0xa 0x192 0x0 0x1d 0xa 0x192>; - phandle = <0x309>; + phandle = <0x311>; }; }; @@ -7655,22 +7655,22 @@ cif-clk { rockchip,pins = <0x4 0xc 0x1 0x192>; - phandle = <0x30a>; + phandle = <0x312>; }; cif-dvp-clk { rockchip,pins = <0x4 0x8 0x1 0x192 0x4 0xa 0x1 0x192 0x4 0xb 0x1 0x192>; - phandle = <0x30b>; + phandle = <0x313>; }; cif-dvp-bus16 { rockchip,pins = <0x3 0x14 0x1 0x192 0x3 0x15 0x1 0x192 0x3 0x16 0x1 0x192 0x3 0x17 0x1 0x192 0x3 0x18 0x1 0x192 0x3 0x19 0x1 0x192 0x3 0x1a 0x1 0x192 0x3 0x1b 0x1 0x192>; - phandle = <0x30c>; + phandle = <0x314>; }; cif-dvp-bus8 { rockchip,pins = <0x4 0x0 0x1 0x192 0x4 0x1 0x1 0x192 0x4 0x2 0x1 0x192 0x4 0x3 0x1 0x192 0x4 0x4 0x1 0x192 0x4 0x5 0x1 0x192 0x4 0x6 0x1 0x192 0x4 0x7 0x1 0x192>; - phandle = <0x30d>; + phandle = <0x315>; }; }; @@ -7678,17 +7678,17 @@ clk32k-in { rockchip,pins = <0x0 0xa 0x1 0x192>; - phandle = <0x30e>; + phandle = <0x316>; }; clk32k-out0 { rockchip,pins = <0x0 0xa 0x2 0x192>; - phandle = <0x30f>; + phandle = <0x317>; }; clk32k-out1 { rockchip,pins = <0x2 0x15 0x1 0x192>; - phandle = <0x310>; + phandle = <0x318>; }; }; @@ -7696,7 +7696,7 @@ cpu-pins { rockchip,pins = <0x0 0x19 0x2 0x192 0x0 0x1d 0x2 0x192>; - phandle = <0x311>; + phandle = <0x319>; }; }; @@ -7704,7 +7704,7 @@ ddrphych0-pins { rockchip,pins = <0x4 0x0 0x7 0x192 0x4 0x1 0x7 0x192 0x4 0x2 0x7 0x192 0x4 0x3 0x7 0x192>; - phandle = <0x312>; + phandle = <0x31a>; }; }; @@ -7712,7 +7712,7 @@ ddrphych1-pins { rockchip,pins = <0x4 0x4 0x7 0x192 0x4 0x5 0x7 0x192 0x4 0x6 0x7 0x192 0x4 0x7 0x7 0x192>; - phandle = <0x313>; + phandle = <0x31b>; }; }; @@ -7720,7 +7720,7 @@ ddrphych2-pins { rockchip,pins = <0x4 0x8 0x7 0x192 0x4 0x9 0x7 0x192 0x4 0xa 0x7 0x192 0x4 0xb 0x7 0x192>; - phandle = <0x314>; + phandle = <0x31c>; }; }; @@ -7728,7 +7728,7 @@ ddrphych3-pins { rockchip,pins = <0x4 0xc 0x7 0x192 0x4 0xd 0x7 0x192 0x4 0xe 0x7 0x192 0x4 0xf 0x7 0x192>; - phandle = <0x315>; + phandle = <0x31d>; }; }; @@ -7736,17 +7736,17 @@ dp0m0-pins { rockchip,pins = <0x4 0xc 0x5 0x192>; - phandle = <0x316>; + phandle = <0x31e>; }; dp0m1-pins { rockchip,pins = <0x0 0x14 0xa 0x192>; - phandle = <0x317>; + phandle = <0x31f>; }; dp0m2-pins { rockchip,pins = <0x1 0x0 0x5 0x192>; - phandle = <0x318>; + phandle = <0x320>; }; }; @@ -7759,12 +7759,12 @@ dp1m1-pins { rockchip,pins = <0x0 0x15 0xa 0x192>; - phandle = <0x319>; + phandle = <0x321>; }; dp1m2-pins { rockchip,pins = <0x1 0x1 0x5 0x192>; - phandle = <0x31a>; + phandle = <0x322>; }; }; @@ -7772,27 +7772,27 @@ emmc-rstnout { rockchip,pins = <0x2 0x3 0x1 0x192>; - phandle = <0x31b>; + phandle = <0x323>; }; emmc-bus8 { rockchip,pins = <0x2 0x18 0x1 0x193 0x2 0x19 0x1 0x193 0x2 0x1a 0x1 0x193 0x2 0x1b 0x1 0x193 0x2 0x1c 0x1 0x193 0x2 0x1d 0x1 0x193 0x2 0x1e 0x1 0x193 0x2 0x1f 0x1 0x193>; - phandle = <0x31c>; + phandle = <0x324>; }; emmc-clk { rockchip,pins = <0x2 0x1 0x1 0x193>; - phandle = <0x31d>; + phandle = <0x325>; }; emmc-cmd { rockchip,pins = <0x2 0x0 0x1 0x193>; - phandle = <0x31e>; + phandle = <0x326>; }; emmc-data-strobe { rockchip,pins = <0x2 0x2 0x1 0x192>; - phandle = <0x31f>; + phandle = <0x327>; }; }; @@ -7800,7 +7800,7 @@ eth1-pins { rockchip,pins = <0x3 0x6 0x1 0x192>; - phandle = <0x320>; + phandle = <0x328>; }; }; @@ -7808,32 +7808,32 @@ fspim0-pins { rockchip,pins = <0x2 0x0 0x2 0x193 0x2 0x1e 0x2 0x193 0x2 0x18 0x2 0x193 0x2 0x19 0x2 0x193 0x2 0x1a 0x2 0x193 0x2 0x1b 0x2 0x193>; - phandle = <0x321>; + phandle = <0x329>; }; fspim0-cs1 { rockchip,pins = <0x2 0x1f 0x2 0x193>; - phandle = <0x322>; + phandle = <0x32a>; }; fspim2-pins { rockchip,pins = <0x3 0x5 0x5 0x193 0x3 0x14 0x2 0x193 0x3 0x0 0x5 0x193 0x3 0x1 0x5 0x193 0x3 0x2 0x5 0x193 0x3 0x3 0x5 0x193>; - phandle = <0x323>; + phandle = <0x32b>; }; fspim2-cs1 { rockchip,pins = <0x3 0x15 0x2 0x193>; - phandle = <0x324>; + phandle = <0x32c>; }; fspim1-pins { rockchip,pins = <0x2 0xb 0x3 0x193 0x2 0xc 0x3 0x193 0x2 0x6 0x3 0x193 0x2 0x7 0x3 0x193 0x2 0x8 0x3 0x193 0x2 0x9 0x3 0x193>; - phandle = <0x325>; + phandle = <0x32d>; }; fspim1-cs1 { rockchip,pins = <0x2 0xd 0x3 0x193>; - phandle = <0x326>; + phandle = <0x32e>; }; }; @@ -7846,7 +7846,7 @@ gmac1-clkinout { rockchip,pins = <0x3 0xe 0x1 0x192>; - phandle = <0x327>; + phandle = <0x32f>; }; gmac1-rx-bus2 { @@ -7871,22 +7871,22 @@ gmac1-ppsclk { rockchip,pins = <0x3 0x11 0x1 0x192>; - phandle = <0x328>; + phandle = <0x330>; }; gmac1-ppstrig { rockchip,pins = <0x3 0x10 0x1 0x192>; - phandle = <0x329>; + phandle = <0x331>; }; gmac1-ptp-ref-clk { rockchip,pins = <0x3 0xf 0x1 0x192>; - phandle = <0x32a>; + phandle = <0x332>; }; gmac1-txer { rockchip,pins = <0x3 0xa 0x1 0x192>; - phandle = <0x32b>; + phandle = <0x333>; }; }; @@ -7894,7 +7894,7 @@ gpu-pins { rockchip,pins = <0x0 0x15 0x2 0x192>; - phandle = <0x32c>; + phandle = <0x334>; }; }; @@ -7902,22 +7902,22 @@ hdmim0-rx-cec { rockchip,pins = <0x4 0xd 0x5 0x192>; - phandle = <0x32d>; + phandle = <0x335>; }; hdmim0-rx-hpdin { rockchip,pins = <0x4 0xe 0x5 0x192>; - phandle = <0x32e>; + phandle = <0x336>; }; hdmim0-rx-scl { rockchip,pins = <0x0 0x1a 0xb 0x192>; - phandle = <0x32f>; + phandle = <0x337>; }; hdmim0-rx-sda { rockchip,pins = <0x0 0x19 0xb 0x192>; - phandle = <0x330>; + phandle = <0x338>; }; hdmim0-tx0-cec { @@ -7952,52 +7952,52 @@ hdmim1-rx-cec { rockchip,pins = <0x3 0x19 0x5 0x192>; - phandle = <0x331>; + phandle = <0x339>; }; hdmim1-rx-hpdin { rockchip,pins = <0x3 0x1c 0x5 0x192>; - phandle = <0x332>; + phandle = <0x33a>; }; hdmim1-rx-scl { rockchip,pins = <0x3 0x1a 0x5 0x196>; - phandle = <0x333>; + phandle = <0x33b>; }; hdmim1-rx-sda { rockchip,pins = <0x3 0x1b 0x5 0x196>; - phandle = <0x334>; + phandle = <0x33c>; }; hdmim1-tx0-cec { rockchip,pins = <0x0 0x19 0xd 0x192>; - phandle = <0x335>; + phandle = <0x33d>; }; hdmim1-tx0-hpd { rockchip,pins = <0x3 0x1c 0x3 0x192>; - phandle = <0x336>; + phandle = <0x33e>; }; hdmim1-tx0-scl { rockchip,pins = <0x0 0x1d 0xb 0x194>; - phandle = <0x337>; + phandle = <0x33f>; }; hdmim1-tx0-sda { rockchip,pins = <0x0 0x1c 0xb 0x195>; - phandle = <0x338>; + phandle = <0x340>; }; hdmim1-tx1-cec { rockchip,pins = <0x0 0x1a 0xd 0x192>; - phandle = <0x339>; + phandle = <0x341>; }; hdmim1-tx1-hpd { rockchip,pins = <0x3 0xf 0x5 0x192>; - phandle = <0x33a>; + phandle = <0x342>; }; hdmim1-tx1-scl { @@ -8012,32 +8012,32 @@ hdmim2-rx-cec { rockchip,pins = <0x1 0xf 0x5 0x192>; - phandle = <0x33b>; + phandle = <0x343>; }; hdmim2-rx-hpdin { rockchip,pins = <0x1 0xe 0x5 0x192>; - phandle = <0x33c>; + phandle = <0x344>; }; hdmim2-rx-scl { rockchip,pins = <0x1 0x1e 0x5 0x192>; - phandle = <0x33d>; + phandle = <0x345>; }; hdmim2-rx-sda { rockchip,pins = <0x1 0x1f 0x5 0x192>; - phandle = <0x33e>; + phandle = <0x346>; }; hdmim2-tx0-scl { rockchip,pins = <0x3 0x17 0x5 0x194>; - phandle = <0x33f>; + phandle = <0x347>; }; hdmim2-tx0-sda { rockchip,pins = <0x3 0x18 0x5 0x195>; - phandle = <0x340>; + phandle = <0x348>; }; hdmim2-tx1-cec { @@ -8047,62 +8047,62 @@ hdmim2-tx1-scl { rockchip,pins = <0x1 0x4 0x5 0x194>; - phandle = <0x341>; + phandle = <0x349>; }; hdmim2-tx1-sda { rockchip,pins = <0x1 0x3 0x5 0x195>; - phandle = <0x342>; + phandle = <0x34a>; }; hdmi-debug0 { rockchip,pins = <0x1 0x7 0x7 0x192>; - phandle = <0x343>; + phandle = <0x34b>; }; hdmi-debug1 { rockchip,pins = <0x1 0x8 0x7 0x192>; - phandle = <0x344>; + phandle = <0x34c>; }; hdmi-debug2 { rockchip,pins = <0x1 0x9 0x7 0x192>; - phandle = <0x345>; + phandle = <0x34d>; }; hdmi-debug3 { rockchip,pins = <0x1 0xa 0x7 0x192>; - phandle = <0x346>; + phandle = <0x34e>; }; hdmi-debug4 { rockchip,pins = <0x1 0xb 0x7 0x192>; - phandle = <0x347>; + phandle = <0x34f>; }; hdmi-debug5 { rockchip,pins = <0x1 0xc 0x7 0x192>; - phandle = <0x348>; + phandle = <0x350>; }; hdmi-debug6 { rockchip,pins = <0x1 0x0 0x7 0x192>; - phandle = <0x349>; + phandle = <0x351>; }; hdmim0-tx1-cec { rockchip,pins = <0x2 0x14 0x4 0x192>; - phandle = <0x34a>; + phandle = <0x352>; }; hdmim0-tx1-scl { rockchip,pins = <0x2 0xd 0x4 0x192>; - phandle = <0x34b>; + phandle = <0x353>; }; hdmim0-tx1-sda { rockchip,pins = <0x2 0xc 0x4 0x192>; - phandle = <0x34c>; + phandle = <0x354>; }; hdmirx-det { @@ -8115,7 +8115,7 @@ i2c0m0-xfer { rockchip,pins = <0x0 0xb 0x2 0x196 0x0 0x6 0x2 0x196>; - phandle = <0x34d>; + phandle = <0x355>; }; i2c0m2-xfer { @@ -8125,7 +8125,7 @@ i2c0m1-xfer { rockchip,pins = <0x4 0x15 0x9 0x196 0x4 0x16 0x9 0x196>; - phandle = <0x34e>; + phandle = <0x356>; }; }; @@ -8133,12 +8133,12 @@ i2c1m0-xfer { rockchip,pins = <0x0 0xd 0x9 0x196 0x0 0xe 0x9 0x196>; - phandle = <0x34f>; + phandle = <0x357>; }; i2c1m1-xfer { rockchip,pins = <0x0 0x8 0x2 0x196 0x0 0x9 0x2 0x196>; - phandle = <0x350>; + phandle = <0x358>; }; i2c1m2-xfer { @@ -8148,12 +8148,12 @@ i2c1m3-xfer { rockchip,pins = <0x2 0x1c 0x9 0x196 0x2 0x1d 0x9 0x196>; - phandle = <0x351>; + phandle = <0x359>; }; i2c1m4-xfer { rockchip,pins = <0x1 0x1a 0x9 0x196 0x1 0x1b 0x9 0x196>; - phandle = <0x352>; + phandle = <0x35a>; }; }; @@ -8166,22 +8166,22 @@ i2c2m2-xfer { rockchip,pins = <0x2 0x3 0x9 0x196 0x2 0x2 0x9 0x196>; - phandle = <0x353>; + phandle = <0x35b>; }; i2c2m3-xfer { rockchip,pins = <0x1 0x15 0x9 0x196 0x1 0x14 0x9 0x196>; - phandle = <0x354>; + phandle = <0x35c>; }; i2c2m4-xfer { rockchip,pins = <0x1 0x1 0x9 0x196 0x1 0x0 0x9 0x196>; - phandle = <0x355>; + phandle = <0x35d>; }; i2c2m1-xfer { rockchip,pins = <0x2 0x11 0x9 0x196 0x2 0x10 0x9 0x196>; - phandle = <0x356>; + phandle = <0x35e>; }; }; @@ -8194,22 +8194,22 @@ i2c3m1-xfer { rockchip,pins = <0x3 0xf 0x9 0x196 0x3 0x10 0x9 0x196>; - phandle = <0x357>; + phandle = <0x35f>; }; i2c3m2-xfer { rockchip,pins = <0x4 0x4 0x9 0x196 0x4 0x5 0x9 0x196>; - phandle = <0x358>; + phandle = <0x360>; }; i2c3m4-xfer { rockchip,pins = <0x4 0x18 0x9 0x196 0x4 0x19 0x9 0x196>; - phandle = <0x359>; + phandle = <0x361>; }; i2c3m3-xfer { rockchip,pins = <0x2 0xa 0x9 0x196 0x2 0xb 0x9 0x196>; - phandle = <0x35a>; + phandle = <0x362>; }; }; @@ -8217,22 +8217,22 @@ i2c4m0-xfer { rockchip,pins = <0x3 0x6 0x9 0x196 0x3 0x5 0x9 0x196>; - phandle = <0x35b>; + phandle = <0x363>; }; i2c4m2-xfer { rockchip,pins = <0x0 0x15 0x9 0x196 0x0 0x14 0x9 0x196>; - phandle = <0x35c>; + phandle = <0x364>; }; i2c4m3-xfer { rockchip,pins = <0x1 0x3 0x9 0x196 0x1 0x2 0x9 0x196>; - phandle = <0x35d>; + phandle = <0x365>; }; i2c4m4-xfer { rockchip,pins = <0x1 0x17 0x9 0x196 0x1 0x16 0x9 0x196>; - phandle = <0x35e>; + phandle = <0x366>; }; i2c4m1-xfer { @@ -8250,22 +8250,22 @@ i2c5m1-xfer { rockchip,pins = <0x4 0xe 0x9 0x196 0x4 0xf 0x9 0x196>; - phandle = <0x35f>; + phandle = <0x367>; }; i2c5m2-xfer { rockchip,pins = <0x4 0x6 0x9 0x196 0x4 0x7 0x9 0x196>; - phandle = <0x360>; + phandle = <0x368>; }; i2c5m3-xfer { rockchip,pins = <0x1 0xe 0x9 0x196 0x1 0xf 0x9 0x196>; - phandle = <0x361>; + phandle = <0x369>; }; i2c5m4-xfer { rockchip,pins = <0x2 0xe 0x9 0x196 0x2 0xf 0x9 0x196>; - phandle = <0x362>; + phandle = <0x36a>; }; }; @@ -8278,22 +8278,22 @@ i2c6m1-xfer { rockchip,pins = <0x1 0x13 0x9 0x196 0x1 0x12 0x9 0x196>; - phandle = <0x363>; + phandle = <0x36b>; }; i2c6m3-xfer { rockchip,pins = <0x4 0x9 0x9 0x196 0x4 0x8 0x9 0x196>; - phandle = <0x364>; + phandle = <0x36c>; }; i2c6m4-xfer { rockchip,pins = <0x3 0x1 0x9 0x196 0x3 0x0 0x9 0x196>; - phandle = <0x365>; + phandle = <0x36d>; }; i2c6m2-xfer { rockchip,pins = <0x2 0x13 0x9 0x196 0x2 0x12 0x9 0x196>; - phandle = <0x366>; + phandle = <0x36e>; }; }; @@ -8306,17 +8306,17 @@ i2c7m2-xfer { rockchip,pins = <0x3 0x1a 0x9 0x196 0x3 0x1b 0x9 0x196>; - phandle = <0x367>; + phandle = <0x36f>; }; i2c7m3-xfer { rockchip,pins = <0x4 0xa 0x9 0x196 0x4 0xb 0x9 0x196>; - phandle = <0x368>; + phandle = <0x370>; }; i2c7m1-xfer { rockchip,pins = <0x4 0x13 0x9 0x196 0x4 0x14 0x9 0x196>; - phandle = <0x369>; + phandle = <0x371>; }; }; @@ -8329,22 +8329,22 @@ i2c8m2-xfer { rockchip,pins = <0x1 0x1e 0x9 0x196 0x1 0x1f 0x9 0x196>; - phandle = <0x36a>; + phandle = <0x372>; }; i2c8m3-xfer { rockchip,pins = <0x4 0x10 0x9 0x196 0x4 0x11 0x9 0x196>; - phandle = <0x36b>; + phandle = <0x373>; }; i2c8m4-xfer { rockchip,pins = <0x3 0x12 0x9 0x196 0x3 0x13 0x9 0x196>; - phandle = <0x36c>; + phandle = <0x374>; }; i2c8m1-xfer { rockchip,pins = <0x2 0x8 0x9 0x196 0x2 0x9 0x9 0x196>; - phandle = <0x36d>; + phandle = <0x375>; }; }; @@ -8377,17 +8377,17 @@ i2s0-sdi1 { rockchip,pins = <0x1 0x1b 0x2 0x192>; - phandle = <0x36e>; + phandle = <0x376>; }; i2s0-sdi2 { rockchip,pins = <0x1 0x1a 0x2 0x192>; - phandle = <0x36f>; + phandle = <0x377>; }; i2s0-sdi3 { rockchip,pins = <0x1 0x19 0x2 0x192>; - phandle = <0x370>; + phandle = <0x378>; }; i2s0-sdo0 { @@ -8397,17 +8397,17 @@ i2s0-sdo1 { rockchip,pins = <0x1 0x18 0x1 0x192>; - phandle = <0x371>; + phandle = <0x379>; }; i2s0-sdo2 { rockchip,pins = <0x1 0x19 0x1 0x192>; - phandle = <0x372>; + phandle = <0x37a>; }; i2s0-sdo3 { rockchip,pins = <0x1 0x1a 0x1 0x192>; - phandle = <0x373>; + phandle = <0x37b>; }; }; @@ -8420,7 +8420,7 @@ i2s1m0-mclk { rockchip,pins = <0x4 0x0 0x3 0x196>; - phandle = <0x374>; + phandle = <0x37c>; }; i2s1m0-sclk { @@ -8470,57 +8470,57 @@ i2s1m1-lrck { rockchip,pins = <0x0 0xf 0x1 0x196>; - phandle = <0x375>; + phandle = <0x37d>; }; i2s1m1-mclk { rockchip,pins = <0x0 0xd 0x1 0x196>; - phandle = <0x376>; + phandle = <0x37e>; }; i2s1m1-sclk { rockchip,pins = <0x0 0xe 0x1 0x196>; - phandle = <0x377>; + phandle = <0x37f>; }; i2s1m1-sdi0 { rockchip,pins = <0x0 0x15 0x1 0x192>; - phandle = <0x378>; + phandle = <0x380>; }; i2s1m1-sdi1 { rockchip,pins = <0x0 0x16 0x1 0x192>; - phandle = <0x379>; + phandle = <0x381>; }; i2s1m1-sdi2 { rockchip,pins = <0x0 0x17 0x1 0x192>; - phandle = <0x37a>; + phandle = <0x382>; }; i2s1m1-sdi3 { rockchip,pins = <0x0 0x18 0x1 0x192>; - phandle = <0x37b>; + phandle = <0x383>; }; i2s1m1-sdo0 { rockchip,pins = <0x0 0x19 0x1 0x192>; - phandle = <0x37c>; + phandle = <0x384>; }; i2s1m1-sdo1 { rockchip,pins = <0x0 0x1a 0x1 0x192>; - phandle = <0x37d>; + phandle = <0x385>; }; i2s1m1-sdo2 { rockchip,pins = <0x0 0x1c 0x1 0x192>; - phandle = <0x37e>; + phandle = <0x386>; }; i2s1m1-sdo3 { rockchip,pins = <0x0 0x1d 0x1 0x192>; - phandle = <0x37f>; + phandle = <0x387>; }; }; @@ -8538,7 +8538,7 @@ i2s2m1-mclk { rockchip,pins = <0x3 0xc 0x3 0x196>; - phandle = <0x380>; + phandle = <0x388>; }; i2s2m1-sclk { @@ -8548,17 +8548,17 @@ i2s2m1-sdi { rockchip,pins = <0x3 0xa 0x3 0x192>; - phandle = <0x381>; + phandle = <0x389>; }; i2s2m1-sdo { rockchip,pins = <0x3 0xb 0x3 0x192>; - phandle = <0x382>; + phandle = <0x38a>; }; i2s2m0-idle { rockchip,pins = <0x2 0x10 0x0 0x192 0x2 0xf 0x0 0x192>; - phandle = <0x383>; + phandle = <0x38b>; }; i2s2m0-lrck { @@ -8568,7 +8568,7 @@ i2s2m0-mclk { rockchip,pins = <0x2 0xe 0x2 0x196>; - phandle = <0x384>; + phandle = <0x38c>; }; i2s2m0-sclk { @@ -8601,7 +8601,7 @@ i2s3-mclk { rockchip,pins = <0x3 0x0 0x3 0x196>; - phandle = <0x385>; + phandle = <0x38d>; }; i2s3-sclk { @@ -8624,17 +8624,17 @@ jtagm0-pins { rockchip,pins = <0x4 0x1a 0x5 0x192 0x4 0x1b 0x5 0x192>; - phandle = <0x386>; + phandle = <0x38e>; }; jtagm1-pins { rockchip,pins = <0x4 0x18 0x5 0x192 0x4 0x19 0x5 0x192>; - phandle = <0x387>; + phandle = <0x38f>; }; jtagm2-pins { rockchip,pins = <0x0 0xd 0x2 0x192 0x0 0xe 0x2 0x192>; - phandle = <0x388>; + phandle = <0x390>; }; }; @@ -8642,7 +8642,7 @@ litcpu-pins { rockchip,pins = <0x0 0x1b 0x1 0x192>; - phandle = <0x389>; + phandle = <0x391>; }; }; @@ -8650,12 +8650,12 @@ mcum0-pins { rockchip,pins = <0x4 0x1c 0x5 0x192 0x4 0x1d 0x5 0x192>; - phandle = <0x38a>; + phandle = <0x392>; }; mcum1-pins { rockchip,pins = <0x3 0x1c 0x6 0x192 0x3 0x1d 0x6 0x192>; - phandle = <0x38b>; + phandle = <0x393>; }; }; @@ -8663,62 +8663,62 @@ mipim0-camera0-clk { rockchip,pins = <0x4 0x9 0x1 0x192>; - phandle = <0x38c>; + phandle = <0x394>; }; mipim0-camera1-clk { rockchip,pins = <0x1 0xe 0x2 0x192>; - phandle = <0x38d>; + phandle = <0x395>; }; mipim0-camera2-clk { rockchip,pins = <0x1 0xf 0x2 0x192>; - phandle = <0x38e>; + phandle = <0x396>; }; mipim0-camera3-clk { rockchip,pins = <0x1 0x1e 0x2 0x192>; - phandle = <0x38f>; + phandle = <0x397>; }; mipim0-camera4-clk { rockchip,pins = <0x1 0x1f 0x2 0x192>; - phandle = <0x390>; + phandle = <0x398>; }; mipim1-camera0-clk { rockchip,pins = <0x3 0x5 0x4 0x192>; - phandle = <0x391>; + phandle = <0x399>; }; mipim1-camera1-clk { rockchip,pins = <0x3 0x6 0x4 0x192>; - phandle = <0x392>; + phandle = <0x39a>; }; mipim1-camera2-clk { rockchip,pins = <0x3 0x7 0x4 0x192>; - phandle = <0x393>; + phandle = <0x39b>; }; mipim1-camera3-clk { rockchip,pins = <0x3 0x8 0x4 0x192>; - phandle = <0x394>; + phandle = <0x39c>; }; mipim1-camera4-clk { rockchip,pins = <0x3 0x9 0x4 0x192>; - phandle = <0x395>; + phandle = <0x39d>; }; mipi-te0 { rockchip,pins = <0x3 0x12 0x2 0x192>; - phandle = <0x396>; + phandle = <0x39e>; }; mipi-te1 { rockchip,pins = <0x3 0x13 0x2 0x192>; - phandle = <0x397>; + phandle = <0x39f>; }; }; @@ -8726,7 +8726,7 @@ npu-pins { rockchip,pins = <0x0 0x16 0x2 0x192>; - phandle = <0x398>; + phandle = <0x3a0>; }; }; @@ -8734,17 +8734,17 @@ pcie20x1m0-pins { rockchip,pins = <0x3 0x17 0x4 0x192 0x3 0x19 0x4 0x192 0x3 0x18 0x4 0x192>; - phandle = <0x399>; + phandle = <0x3a1>; }; pcie20x1m1-pins { rockchip,pins = <0x4 0xf 0x4 0x192 0x4 0x11 0x4 0x192 0x4 0x10 0x4 0x192>; - phandle = <0x39a>; + phandle = <0x3a2>; }; pcie20x1-2-button-rstn { rockchip,pins = <0x4 0xb 0x4 0x192>; - phandle = <0x39b>; + phandle = <0x3a3>; }; }; @@ -8752,7 +8752,7 @@ pcie30phy-pins { rockchip,pins = <0x1 0x14 0x4 0x192 0x1 0x19 0x4 0x192>; - phandle = <0x39c>; + phandle = <0x3a4>; }; }; @@ -8760,27 +8760,27 @@ pcie30x1m0-pins { rockchip,pins = <0x0 0x10 0xc 0x192 0x0 0x15 0xc 0x192 0x0 0x14 0xc 0x192 0x0 0xd 0xc 0x192 0x0 0xf 0xc 0x192 0x0 0xe 0xc 0x192>; - phandle = <0x39d>; + phandle = <0x3a5>; }; pcie30x1m1-pins { rockchip,pins = <0x4 0x3 0x4 0x192 0x4 0x5 0x4 0x192 0x4 0x4 0x4 0x192 0x4 0x0 0x4 0x192 0x4 0x2 0x4 0x192 0x4 0x1 0x4 0x192>; - phandle = <0x39e>; + phandle = <0x3a6>; }; pcie30x1m2-pins { rockchip,pins = <0x1 0xd 0x4 0x192 0x1 0xc 0x4 0x192 0x1 0xb 0x4 0x192 0x1 0x0 0x4 0x192 0x1 0x7 0x4 0x192 0x1 0x1 0x4 0x192>; - phandle = <0x39f>; + phandle = <0x3a7>; }; pcie30x1-0-button-rstn { rockchip,pins = <0x4 0x9 0x4 0x192>; - phandle = <0x3a0>; + phandle = <0x3a8>; }; pcie30x1-1-button-rstn { rockchip,pins = <0x4 0xa 0x4 0x192>; - phandle = <0x3a1>; + phandle = <0x3a9>; }; }; @@ -8788,27 +8788,27 @@ pcie30x2m0-pins { rockchip,pins = <0x0 0x19 0xc 0x192 0x0 0x1c 0xc 0x192 0x0 0x1a 0xc 0x192>; - phandle = <0x3a2>; + phandle = <0x3aa>; }; pcie30x2m1-pins { rockchip,pins = <0x4 0x6 0x4 0x192 0x4 0x8 0x4 0x192 0x4 0x7 0x4 0x192>; - phandle = <0x3a3>; + phandle = <0x3ab>; }; pcie30x2m2-pins { rockchip,pins = <0x3 0x1a 0x4 0x192 0x3 0x1c 0x4 0x192 0x3 0x1b 0x4 0x192>; - phandle = <0x3a4>; + phandle = <0x3ac>; }; pcie30x2m3-pins { rockchip,pins = <0x1 0x1f 0x4 0x192 0x1 0xf 0x4 0x192 0x1 0xe 0x4 0x192>; - phandle = <0x3a5>; + phandle = <0x3ad>; }; pcie30x2-button-rstn { rockchip,pins = <0x3 0x11 0x4 0x192>; - phandle = <0x3a6>; + phandle = <0x3ae>; }; }; @@ -8816,27 +8816,27 @@ pcie30x4m0-pins { rockchip,pins = <0x0 0x16 0xc 0x192 0x0 0x18 0xc 0x192 0x0 0x17 0xc 0x192>; - phandle = <0x3a7>; + phandle = <0x3af>; }; pcie30x4m1-pins { rockchip,pins = <0x4 0xc 0x4 0x192 0x4 0xe 0x4 0x192 0x4 0xd 0x4 0x192>; - phandle = <0x3a8>; + phandle = <0x3b0>; }; pcie30x4m2-pins { rockchip,pins = <0x3 0x14 0x4 0x192 0x3 0x16 0x4 0x192 0x3 0x15 0x4 0x192>; - phandle = <0x3a9>; + phandle = <0x3b1>; }; pcie30x4m3-pins { rockchip,pins = <0x1 0x8 0x4 0x192 0x1 0xa 0x4 0x192 0x1 0x9 0x4 0x192>; - phandle = <0x3aa>; + phandle = <0x3b2>; }; pcie30x4-button-rstn { rockchip,pins = <0x3 0x1d 0x4 0x192>; - phandle = <0x3ab>; + phandle = <0x3b3>; }; pcie30x4-clkreqn-m1 { @@ -8884,37 +8884,37 @@ pdm0m1-clk { rockchip,pins = <0x0 0x10 0x2 0x192>; - phandle = <0x3ac>; + phandle = <0x3b4>; }; pdm0m1-clk1 { rockchip,pins = <0x0 0x14 0x2 0x192>; - phandle = <0x3ad>; + phandle = <0x3b5>; }; pdm0m1-idle { rockchip,pins = <0x0 0x10 0x0 0x192 0x0 0x14 0x0 0x192>; - phandle = <0x3ae>; + phandle = <0x3b6>; }; pdm0m1-sdi0 { rockchip,pins = <0x0 0x17 0x2 0x192>; - phandle = <0x3af>; + phandle = <0x3b7>; }; pdm0m1-sdi1 { rockchip,pins = <0x0 0x18 0x2 0x192>; - phandle = <0x3b0>; + phandle = <0x3b8>; }; pdm0m1-sdi2 { rockchip,pins = <0x0 0x1c 0x2 0x192>; - phandle = <0x3b1>; + phandle = <0x3b9>; }; pdm0m1-sdi3 { rockchip,pins = <0x0 0x1e 0x2 0x192>; - phandle = <0x3b2>; + phandle = <0x3ba>; }; }; @@ -8957,37 +8957,37 @@ pdm1m1-clk { rockchip,pins = <0x1 0xc 0x2 0x192>; - phandle = <0x3b3>; + phandle = <0x3bb>; }; pdm1m1-clk1 { rockchip,pins = <0x1 0xb 0x2 0x192>; - phandle = <0x3b4>; + phandle = <0x3bc>; }; pdm1m1-idle { rockchip,pins = <0x1 0xc 0x0 0x192 0x1 0xb 0x0 0x192>; - phandle = <0x3b5>; + phandle = <0x3bd>; }; pdm1m1-sdi0 { rockchip,pins = <0x1 0x7 0x2 0x192>; - phandle = <0x3b6>; + phandle = <0x3be>; }; pdm1m1-sdi1 { rockchip,pins = <0x1 0x8 0x2 0x192>; - phandle = <0x3b7>; + phandle = <0x3bf>; }; pdm1m1-sdi2 { rockchip,pins = <0x1 0x9 0x2 0x192>; - phandle = <0x3b8>; + phandle = <0x3c0>; }; pdm1m1-sdi3 { rockchip,pins = <0x1 0xa 0x2 0x192>; - phandle = <0x3b9>; + phandle = <0x3c1>; }; }; @@ -9003,7 +9003,7 @@ pmu-pins { rockchip,pins = <0x0 0x5 0x3 0x192>; - phandle = <0x3ba>; + phandle = <0x3c2>; }; }; @@ -9016,12 +9016,12 @@ pwm0m1-pins { rockchip,pins = <0x1 0x1a 0xb 0x192>; - phandle = <0x3bb>; + phandle = <0x3c3>; }; pwm0m2-pins { rockchip,pins = <0x1 0x2 0xb 0x192>; - phandle = <0x3bc>; + phandle = <0x3c4>; }; }; @@ -9034,12 +9034,12 @@ pwm1m1-pins { rockchip,pins = <0x1 0x1b 0xb 0x192>; - phandle = <0x3bd>; + phandle = <0x3c5>; }; pwm1m2-pins { rockchip,pins = <0x1 0x3 0xb 0x192>; - phandle = <0x3be>; + phandle = <0x3c6>; }; }; @@ -9052,12 +9052,12 @@ pwm2m1-pins { rockchip,pins = <0x3 0x9 0xb 0x192>; - phandle = <0x3bf>; + phandle = <0x3c7>; }; pwm2m2-pins { rockchip,pins = <0x4 0x12 0xb 0x192>; - phandle = <0x3c0>; + phandle = <0x3c8>; }; }; @@ -9065,7 +9065,7 @@ pwm3m0-pins { rockchip,pins = <0x0 0x1c 0x3 0x192>; - phandle = <0x3c1>; + phandle = <0x3c9>; }; pwm3m1-pins { @@ -9075,12 +9075,12 @@ pwm3m2-pins { rockchip,pins = <0x1 0x12 0xb 0x192>; - phandle = <0x3c2>; + phandle = <0x3ca>; }; pwm3m3-pins { rockchip,pins = <0x1 0x7 0xb 0x192>; - phandle = <0x3c3>; + phandle = <0x3cb>; }; }; @@ -9093,7 +9093,7 @@ pwm4m1-pins { rockchip,pins = <0x4 0x13 0xb 0x192>; - phandle = <0x3c4>; + phandle = <0x3cc>; }; }; @@ -9106,12 +9106,12 @@ pwm5m1-pins { rockchip,pins = <0x0 0x16 0xb 0x192>; - phandle = <0x3c5>; + phandle = <0x3cd>; }; pwm5m2-pins { rockchip,pins = <0x4 0x14 0xb 0x192>; - phandle = <0x3c6>; + phandle = <0x3ce>; }; }; @@ -9124,12 +9124,12 @@ pwm6m1-pins { rockchip,pins = <0x4 0x11 0xb 0x192>; - phandle = <0x3c7>; + phandle = <0x3cf>; }; pwm6m2-pins { rockchip,pins = <0x4 0x15 0xb 0x192>; - phandle = <0x3c8>; + phandle = <0x3d0>; }; }; @@ -9142,17 +9142,17 @@ pwm7m1-pins { rockchip,pins = <0x4 0x1c 0xb 0x192>; - phandle = <0x3c9>; + phandle = <0x3d1>; }; pwm7m2-pins { rockchip,pins = <0x1 0x13 0xb 0x192>; - phandle = <0x3ca>; + phandle = <0x3d2>; }; pwm7m3-pins { rockchip,pins = <0x4 0x16 0xb 0x192>; - phandle = <0x3cb>; + phandle = <0x3d3>; }; }; @@ -9165,12 +9165,12 @@ pwm8m1-pins { rockchip,pins = <0x4 0x18 0xb 0x192>; - phandle = <0x3cc>; + phandle = <0x3d4>; }; pwm8m2-pins { rockchip,pins = <0x3 0x18 0xb 0x192>; - phandle = <0x3cd>; + phandle = <0x3d5>; }; }; @@ -9183,12 +9183,12 @@ pwm9m1-pins { rockchip,pins = <0x4 0x19 0xb 0x192>; - phandle = <0x3ce>; + phandle = <0x3d6>; }; pwm9m2-pins { rockchip,pins = <0x3 0x19 0xb 0x192>; - phandle = <0x3cf>; + phandle = <0x3d7>; }; }; @@ -9196,12 +9196,12 @@ pwm10m0-pins { rockchip,pins = <0x3 0x0 0xb 0x192>; - phandle = <0x3d0>; + phandle = <0x3d8>; }; pwm10m1-pins { rockchip,pins = <0x4 0x1b 0xb 0x192>; - phandle = <0x3d1>; + phandle = <0x3d9>; }; pwm10m2-pins { @@ -9214,17 +9214,17 @@ pwm11m0-pins { rockchip,pins = <0x3 0x1 0xb 0x192>; - phandle = <0x3d2>; + phandle = <0x3da>; }; pwm11m1-pins { rockchip,pins = <0x4 0xc 0xb 0x192>; - phandle = <0x3d3>; + phandle = <0x3db>; }; pwm11m2-pins { rockchip,pins = <0x1 0x14 0xb 0x192>; - phandle = <0x3d4>; + phandle = <0x3dc>; }; pwm11m3-pins { @@ -9242,7 +9242,7 @@ pwm12m1-pins { rockchip,pins = <0x4 0xd 0xb 0x192>; - phandle = <0x3d5>; + phandle = <0x3dd>; }; }; @@ -9255,12 +9255,12 @@ pwm13m1-pins { rockchip,pins = <0x4 0xe 0xb 0x192>; - phandle = <0x3d6>; + phandle = <0x3de>; }; pwm13m2-pins { rockchip,pins = <0x1 0xf 0xb 0x192>; - phandle = <0x3d7>; + phandle = <0x3df>; }; }; @@ -9273,12 +9273,12 @@ pwm14m1-pins { rockchip,pins = <0x4 0xa 0xb 0x192>; - phandle = <0x3d8>; + phandle = <0x3e0>; }; pwm14m2-pins { rockchip,pins = <0x1 0x1e 0xb 0x192>; - phandle = <0x3d9>; + phandle = <0x3e1>; }; }; @@ -9291,17 +9291,17 @@ pwm15m1-pins { rockchip,pins = <0x4 0xb 0xb 0x192>; - phandle = <0x3da>; + phandle = <0x3e2>; }; pwm15m2-pins { rockchip,pins = <0x1 0x16 0xb 0x192>; - phandle = <0x3db>; + phandle = <0x3e3>; }; pwm15m3-pins { rockchip,pins = <0x1 0x1f 0xb 0x192>; - phandle = <0x3dc>; + phandle = <0x3e4>; }; }; @@ -9309,7 +9309,7 @@ refclk-pins { rockchip,pins = <0x0 0x0 0x1 0x192>; - phandle = <0x3dd>; + phandle = <0x3e5>; }; }; @@ -9317,7 +9317,7 @@ sata-pins { rockchip,pins = <0x0 0x16 0xd 0x192 0x0 0x1c 0xd 0x192 0x0 0x1d 0xd 0x192>; - phandle = <0x3de>; + phandle = <0x3e6>; }; }; @@ -9325,12 +9325,12 @@ sata0m0-pins { rockchip,pins = <0x4 0xe 0x6 0x192>; - phandle = <0x3df>; + phandle = <0x3e7>; }; sata0m1-pins { rockchip,pins = <0x1 0xb 0x6 0x192>; - phandle = <0x3e0>; + phandle = <0x3e8>; }; }; @@ -9338,12 +9338,12 @@ sata1m0-pins { rockchip,pins = <0x4 0xd 0x6 0x192>; - phandle = <0x3e1>; + phandle = <0x3e9>; }; sata1m1-pins { rockchip,pins = <0x1 0x1 0x6 0x192>; - phandle = <0x3e2>; + phandle = <0x3ea>; }; }; @@ -9351,12 +9351,12 @@ sata2m0-pins { rockchip,pins = <0x4 0x9 0x6 0x192>; - phandle = <0x3e3>; + phandle = <0x3eb>; }; sata2m1-pins { rockchip,pins = <0x1 0xf 0x6 0x192>; - phandle = <0x3e4>; + phandle = <0x3ec>; }; }; @@ -9364,7 +9364,7 @@ sdiom1-pins { rockchip,pins = <0x3 0x5 0x2 0x192 0x3 0x4 0x2 0x197 0x3 0x0 0x2 0x197 0x3 0x1 0x2 0x197 0x3 0x2 0x2 0x197 0x3 0x3 0x2 0x197>; - phandle = <0x3e5>; + phandle = <0x3ed>; }; sdiom0-pins { @@ -9397,7 +9397,7 @@ sdmmc-pwren { rockchip,pins = <0x0 0x5 0x2 0x192>; - phandle = <0x3e6>; + phandle = <0x3ee>; }; }; @@ -9410,7 +9410,7 @@ spdif0m1-tx { rockchip,pins = <0x4 0xc 0x6 0x192>; - phandle = <0x3e7>; + phandle = <0x3ef>; }; }; @@ -9423,12 +9423,12 @@ spdif1m1-tx { rockchip,pins = <0x4 0x9 0x2 0x192>; - phandle = <0x3e8>; + phandle = <0x3f0>; }; spdif1m2-tx { rockchip,pins = <0x4 0x11 0x3 0x192>; - phandle = <0x3e9>; + phandle = <0x3f1>; }; }; @@ -9451,47 +9451,47 @@ spi0m1-pins { rockchip,pins = <0x4 0x2 0x8 0x199 0x4 0x0 0x8 0x199 0x4 0x1 0x8 0x199>; - phandle = <0x3ea>; + phandle = <0x3f2>; }; spi0m1-cs0 { rockchip,pins = <0x4 0xa 0x8 0x199>; - phandle = <0x3eb>; + phandle = <0x3f3>; }; spi0m1-cs1 { rockchip,pins = <0x4 0x9 0x8 0x199>; - phandle = <0x3ec>; + phandle = <0x3f4>; }; spi0m2-pins { rockchip,pins = <0x1 0xb 0x8 0x199 0x1 0x9 0x8 0x199 0x1 0xa 0x8 0x199>; - phandle = <0x3ed>; + phandle = <0x3f5>; }; spi0m2-cs0 { rockchip,pins = <0x1 0xc 0x8 0x199>; - phandle = <0x3ee>; + phandle = <0x3f6>; }; spi0m2-cs1 { rockchip,pins = <0x1 0xd 0x8 0x199>; - phandle = <0x3ef>; + phandle = <0x3f7>; }; spi0m3-pins { rockchip,pins = <0x3 0x1b 0x8 0x199 0x3 0x19 0x8 0x199 0x3 0x1a 0x8 0x199>; - phandle = <0x3f0>; + phandle = <0x3f8>; }; spi0m3-cs0 { rockchip,pins = <0x3 0x1c 0x8 0x199>; - phandle = <0x3f1>; + phandle = <0x3f9>; }; spi0m3-cs1 { rockchip,pins = <0x3 0x1d 0x8 0x199>; - phandle = <0x3f2>; + phandle = <0x3fa>; }; }; @@ -9514,32 +9514,32 @@ spi1m2-pins { rockchip,pins = <0x1 0x1a 0x8 0x199 0x1 0x18 0x8 0x199 0x1 0x19 0x8 0x199>; - phandle = <0x3f3>; + phandle = <0x3fb>; }; spi1m2-cs0 { rockchip,pins = <0x1 0x1b 0x8 0x199>; - phandle = <0x3f4>; + phandle = <0x3fc>; }; spi1m2-cs1 { rockchip,pins = <0x1 0x1d 0x8 0x199>; - phandle = <0x3f5>; + phandle = <0x3fd>; }; spi1m0-pins { rockchip,pins = <0x2 0x10 0x8 0x19a 0x2 0x11 0x8 0x19a 0x2 0x12 0x8 0x19a>; - phandle = <0x3f6>; + phandle = <0x3fe>; }; spi1m0-cs0 { rockchip,pins = <0x2 0x13 0x8 0x19a>; - phandle = <0x3f7>; + phandle = <0x3ff>; }; spi1m0-cs1 { rockchip,pins = <0x2 0x14 0x8 0x19a>; - phandle = <0x3f8>; + phandle = <0x400>; }; }; @@ -9547,32 +9547,32 @@ spi2m0-pins { rockchip,pins = <0x1 0x6 0x8 0x199 0x1 0x4 0x8 0x199 0x1 0x5 0x8 0x199>; - phandle = <0x3f9>; + phandle = <0x401>; }; spi2m0-cs0 { rockchip,pins = <0x1 0x7 0x8 0x199>; - phandle = <0x3fa>; + phandle = <0x402>; }; spi2m0-cs1 { rockchip,pins = <0x1 0x8 0x8 0x199>; - phandle = <0x3fb>; + phandle = <0x403>; }; spi2m1-pins { rockchip,pins = <0x4 0x6 0x8 0x199 0x4 0x4 0x8 0x199 0x4 0x5 0x8 0x199>; - phandle = <0x3fc>; + phandle = <0x404>; }; spi2m1-cs0 { rockchip,pins = <0x4 0x7 0x8 0x199>; - phandle = <0x3fd>; + phandle = <0x405>; }; spi2m1-cs1 { rockchip,pins = <0x4 0x8 0x8 0x199>; - phandle = <0x3fe>; + phandle = <0x406>; }; spi2m2-pins { @@ -9587,7 +9587,7 @@ spi2m2-cs1 { rockchip,pins = <0x0 0x8 0x1 0x19a>; - phandle = <0x3ff>; + phandle = <0x407>; }; }; @@ -9610,47 +9610,47 @@ spi3m2-pins { rockchip,pins = <0x0 0x1b 0x8 0x199 0x0 0x18 0x8 0x199 0x0 0x1a 0x8 0x199>; - phandle = <0x400>; + phandle = <0x408>; }; spi3m2-cs0 { rockchip,pins = <0x0 0x1c 0x8 0x199>; - phandle = <0x401>; + phandle = <0x409>; }; spi3m2-cs1 { rockchip,pins = <0x0 0x1d 0x8 0x199>; - phandle = <0x402>; + phandle = <0x40a>; }; spi3m3-pins { rockchip,pins = <0x3 0x18 0x8 0x199 0x3 0x16 0x8 0x199 0x3 0x17 0x8 0x199>; - phandle = <0x403>; + phandle = <0x40b>; }; spi3m3-cs0 { rockchip,pins = <0x3 0x14 0x8 0x199>; - phandle = <0x404>; + phandle = <0x40c>; }; spi3m3-cs1 { rockchip,pins = <0x3 0x15 0x8 0x199>; - phandle = <0x405>; + phandle = <0x40d>; }; spi3m0-pins { rockchip,pins = <0x4 0x16 0x8 0x19a 0x4 0x14 0x8 0x19a 0x4 0x15 0x8 0x19a>; - phandle = <0x406>; + phandle = <0x40e>; }; spi3m0-cs0 { rockchip,pins = <0x4 0x12 0x8 0x19a>; - phandle = <0x407>; + phandle = <0x40f>; }; spi3m0-cs1 { rockchip,pins = <0x4 0x13 0x8 0x19a>; - phandle = <0x408>; + phandle = <0x410>; }; }; @@ -9673,27 +9673,27 @@ spi4m1-pins { rockchip,pins = <0x3 0x2 0x8 0x199 0x3 0x0 0x8 0x199 0x3 0x1 0x8 0x199>; - phandle = <0x409>; + phandle = <0x411>; }; spi4m1-cs0 { rockchip,pins = <0x3 0x3 0x8 0x199>; - phandle = <0x40a>; + phandle = <0x412>; }; spi4m1-cs1 { rockchip,pins = <0x3 0x4 0x8 0x199>; - phandle = <0x40b>; + phandle = <0x413>; }; spi4m2-pins { rockchip,pins = <0x1 0x2 0x8 0x199 0x1 0x0 0x8 0x199 0x1 0x1 0x8 0x199>; - phandle = <0x40c>; + phandle = <0x414>; }; spi4m2-cs0 { rockchip,pins = <0x1 0x3 0x8 0x199>; - phandle = <0x40d>; + phandle = <0x415>; }; }; @@ -9701,7 +9701,7 @@ tsadcm1-shut { rockchip,pins = <0x0 0x2 0x2 0x192>; - phandle = <0x40e>; + phandle = <0x416>; }; tsadc-shut { @@ -9711,7 +9711,7 @@ tsadc-shut-org { rockchip,pins = <0x0 0x1 0x1 0x192>; - phandle = <0x40f>; + phandle = <0x417>; }; }; @@ -9719,7 +9719,7 @@ uart0m0-xfer { rockchip,pins = <0x0 0x14 0x4 0x197 0x0 0x15 0x4 0x197>; - phandle = <0x410>; + phandle = <0x418>; }; uart0m1-xfer { @@ -9729,17 +9729,17 @@ uart0m2-xfer { rockchip,pins = <0x4 0x4 0xa 0x197 0x4 0x3 0xa 0x197>; - phandle = <0x411>; + phandle = <0x419>; }; uart0-ctsn { rockchip,pins = <0x0 0x19 0x4 0x192>; - phandle = <0x412>; + phandle = <0x41a>; }; uart0-rtsn { rockchip,pins = <0x0 0x16 0x4 0x192>; - phandle = <0x413>; + phandle = <0x41b>; }; }; @@ -9747,32 +9747,32 @@ uart1m1-xfer { rockchip,pins = <0x1 0xf 0xa 0x197 0x1 0xe 0xa 0x197>; - phandle = <0x414>; + phandle = <0x41c>; }; uart1m1-ctsn { rockchip,pins = <0x1 0x1f 0xa 0x192>; - phandle = <0x415>; + phandle = <0x41d>; }; uart1m1-rtsn { rockchip,pins = <0x1 0x1e 0xa 0x192>; - phandle = <0x416>; + phandle = <0x41e>; }; uart1m2-xfer { rockchip,pins = <0x0 0x1a 0xa 0x197 0x0 0x19 0xa 0x197>; - phandle = <0x417>; + phandle = <0x41f>; }; uart1m2-ctsn { rockchip,pins = <0x0 0x18 0xa 0x192>; - phandle = <0x418>; + phandle = <0x420>; }; uart1m2-rtsn { rockchip,pins = <0x0 0x17 0xa 0x192>; - phandle = <0x419>; + phandle = <0x421>; }; uart1m0-xfer { @@ -9782,12 +9782,12 @@ uart1m0-ctsn { rockchip,pins = <0x2 0x11 0xa 0x192>; - phandle = <0x41a>; + phandle = <0x422>; }; uart1m0-rtsn { rockchip,pins = <0x2 0x10 0xa 0x192>; - phandle = <0x41b>; + phandle = <0x423>; }; }; @@ -9795,7 +9795,7 @@ uart2m0-xfer { rockchip,pins = <0x0 0xe 0xa 0x197 0x0 0xd 0xa 0x197>; - phandle = <0x1eb>; + phandle = <0x1f3>; }; uart2m1-xfer { @@ -9805,17 +9805,17 @@ uart2m2-xfer { rockchip,pins = <0x3 0xa 0xa 0x197 0x3 0x9 0xa 0x197>; - phandle = <0x41c>; + phandle = <0x424>; }; uart2-ctsn { rockchip,pins = <0x3 0xc 0xa 0x192>; - phandle = <0x41d>; + phandle = <0x425>; }; uart2-rtsn { rockchip,pins = <0x3 0xb 0xa 0x192>; - phandle = <0x41e>; + phandle = <0x426>; }; }; @@ -9823,7 +9823,7 @@ uart3m0-xfer { rockchip,pins = <0x1 0x10 0xa 0x197 0x1 0x11 0xa 0x197>; - phandle = <0x41f>; + phandle = <0x427>; }; uart3m1-xfer { @@ -9833,17 +9833,17 @@ uart3m2-xfer { rockchip,pins = <0x4 0x6 0xa 0x197 0x4 0x5 0xa 0x197>; - phandle = <0x420>; + phandle = <0x428>; }; uart3-ctsn { rockchip,pins = <0x1 0x13 0xa 0x192>; - phandle = <0x421>; + phandle = <0x429>; }; uart3-rtsn { rockchip,pins = <0x1 0x12 0xa 0x192>; - phandle = <0x422>; + phandle = <0x42a>; }; }; @@ -9856,22 +9856,22 @@ uart4m1-xfer { rockchip,pins = <0x3 0x18 0xa 0x197 0x3 0x19 0xa 0x197>; - phandle = <0x423>; + phandle = <0x42b>; }; uart4m2-xfer { rockchip,pins = <0x1 0xa 0xa 0x197 0x1 0xb 0xa 0x197>; - phandle = <0x424>; + phandle = <0x42c>; }; uart4-ctsn { rockchip,pins = <0x1 0x17 0xa 0x192>; - phandle = <0x425>; + phandle = <0x42d>; }; uart4-rtsn { rockchip,pins = <0x1 0x15 0xa 0x192>; - phandle = <0x426>; + phandle = <0x42e>; }; }; @@ -9879,17 +9879,17 @@ uart5m0-xfer { rockchip,pins = <0x4 0x1c 0xa 0x197 0x4 0x1d 0xa 0x197>; - phandle = <0x427>; + phandle = <0x42f>; }; uart5m0-ctsn { rockchip,pins = <0x4 0x1a 0xa 0x192>; - phandle = <0x428>; + phandle = <0x430>; }; uart5m0-rtsn { rockchip,pins = <0x4 0x1b 0xa 0x192>; - phandle = <0x429>; + phandle = <0x431>; }; uart5m1-xfer { @@ -9899,17 +9899,17 @@ uart5m1-ctsn { rockchip,pins = <0x2 0x2 0xa 0x192>; - phandle = <0x42a>; + phandle = <0x432>; }; uart5m1-rtsn { rockchip,pins = <0x2 0x3 0xa 0x192>; - phandle = <0x42b>; + phandle = <0x433>; }; uart5m2-xfer { rockchip,pins = <0x2 0x1c 0xa 0x197 0x2 0x1d 0xa 0x197>; - phandle = <0x42c>; + phandle = <0x434>; }; }; @@ -9917,17 +9917,17 @@ uart6m1-xfer { rockchip,pins = <0x1 0x0 0xa 0x197 0x1 0x1 0xa 0x197>; - phandle = <0x42d>; + phandle = <0x435>; }; uart6m1-ctsn { rockchip,pins = <0x1 0x3 0xa 0x192>; - phandle = <0x42e>; + phandle = <0x436>; }; uart6m1-rtsn { rockchip,pins = <0x1 0x2 0xa 0x192>; - phandle = <0x42f>; + phandle = <0x437>; }; uart6m2-xfer { @@ -9937,17 +9937,17 @@ uart6m0-xfer { rockchip,pins = <0x2 0x6 0xa 0x197 0x2 0x7 0xa 0x197>; - phandle = <0x430>; + phandle = <0x438>; }; uart6m0-ctsn { rockchip,pins = <0x2 0x9 0xa 0x192>; - phandle = <0x431>; + phandle = <0x439>; }; uart6m0-rtsn { rockchip,pins = <0x2 0x8 0xa 0x192>; - phandle = <0x432>; + phandle = <0x43a>; }; }; @@ -9960,32 +9960,32 @@ uart7m1-ctsn { rockchip,pins = <0x3 0x13 0xa 0x192>; - phandle = <0x433>; + phandle = <0x43b>; }; uart7m1-rtsn { rockchip,pins = <0x3 0x12 0xa 0x192>; - phandle = <0x434>; + phandle = <0x43c>; }; uart7m2-xfer { rockchip,pins = <0x1 0xc 0xa 0x197 0x1 0xd 0xa 0x197>; - phandle = <0x435>; + phandle = <0x43d>; }; uart7m0-xfer { rockchip,pins = <0x2 0xc 0xa 0x197 0x2 0xd 0xa 0x197>; - phandle = <0x436>; + phandle = <0x43e>; }; uart7m0-ctsn { rockchip,pins = <0x4 0x16 0xa 0x192>; - phandle = <0x437>; + phandle = <0x43f>; }; uart7m0-rtsn { rockchip,pins = <0x4 0x12 0xa 0x192>; - phandle = <0x438>; + phandle = <0x440>; }; }; @@ -9993,17 +9993,17 @@ uart8m0-xfer { rockchip,pins = <0x4 0x9 0xa 0x197 0x4 0x8 0xa 0x197>; - phandle = <0x439>; + phandle = <0x441>; }; uart8m0-ctsn { rockchip,pins = <0x4 0xb 0xa 0x192>; - phandle = <0x43a>; + phandle = <0x442>; }; uart8m0-rtsn { rockchip,pins = <0x4 0xa 0xa 0x192>; - phandle = <0x43b>; + phandle = <0x443>; }; uart8m1-xfer { @@ -10013,17 +10013,17 @@ uart8m1-ctsn { rockchip,pins = <0x3 0x5 0xa 0x192>; - phandle = <0x43c>; + phandle = <0x444>; }; uart8m1-rtsn { rockchip,pins = <0x3 0x4 0xa 0x192>; - phandle = <0x43d>; + phandle = <0x445>; }; uart8-xfer { rockchip,pins = <0x4 0x9 0xa 0x197>; - phandle = <0x43e>; + phandle = <0x446>; }; }; @@ -10031,32 +10031,32 @@ uart9m1-xfer { rockchip,pins = <0x4 0xd 0xa 0x197 0x4 0xc 0xa 0x197>; - phandle = <0x43f>; + phandle = <0x447>; }; uart9m1-ctsn { rockchip,pins = <0x4 0x1 0xa 0x192>; - phandle = <0x440>; + phandle = <0x448>; }; uart9m1-rtsn { rockchip,pins = <0x4 0x0 0xa 0x192>; - phandle = <0x441>; + phandle = <0x449>; }; uart9m2-xfer { rockchip,pins = <0x3 0x1c 0xa 0x197 0x3 0x1d 0xa 0x197>; - phandle = <0x442>; + phandle = <0x44a>; }; uart9m2-ctsn { rockchip,pins = <0x3 0x1b 0xa 0x192>; - phandle = <0x443>; + phandle = <0x44b>; }; uart9m2-rtsn { rockchip,pins = <0x3 0x1a 0xa 0x192>; - phandle = <0x444>; + phandle = <0x44c>; }; uart9m0-xfer { @@ -10071,7 +10071,7 @@ uart9m0-rtsn { rockchip,pins = <0x4 0x14 0xa 0x192>; - phandle = <0x1e4>; + phandle = <0x1ed>; }; }; @@ -10079,7 +10079,7 @@ vop-pins { rockchip,pins = <0x1 0x2 0x1 0x192>; - phandle = <0x445>; + phandle = <0x44d>; }; }; @@ -10087,7 +10087,7 @@ bt656-pins { rockchip,pins = <0x4 0x8 0x2 0x19b 0x4 0x0 0x2 0x19b 0x4 0x1 0x2 0x19b 0x4 0x2 0x2 0x19b 0x4 0x3 0x2 0x19b 0x4 0x4 0x2 0x19b 0x4 0x5 0x2 0x19b 0x4 0x6 0x2 0x19b 0x4 0x7 0x2 0x19b>; - phandle = <0x446>; + phandle = <0x44e>; }; }; @@ -10102,170 +10102,170 @@ pcfg-pull-none-drv-level-7 { bias-disable; drive-strength = <0x7>; - phandle = <0x447>; + phandle = <0x44f>; }; pcfg-pull-none-drv-level-8 { bias-disable; drive-strength = <0x8>; - phandle = <0x448>; + phandle = <0x450>; }; pcfg-pull-none-drv-level-9 { bias-disable; drive-strength = <0x9>; - phandle = <0x449>; + phandle = <0x451>; }; pcfg-pull-none-drv-level-10 { bias-disable; drive-strength = <0xa>; - phandle = <0x44a>; + phandle = <0x452>; }; pcfg-pull-none-drv-level-11 { bias-disable; drive-strength = <0xb>; - phandle = <0x44b>; + phandle = <0x453>; }; pcfg-pull-none-drv-level-12 { bias-disable; drive-strength = <0xc>; - phandle = <0x44c>; + phandle = <0x454>; }; pcfg-pull-none-drv-level-13 { bias-disable; drive-strength = <0xd>; - phandle = <0x44d>; + phandle = <0x455>; }; pcfg-pull-none-drv-level-14 { bias-disable; drive-strength = <0xe>; - phandle = <0x44e>; + phandle = <0x456>; }; pcfg-pull-none-drv-level-15 { bias-disable; drive-strength = <0xf>; - phandle = <0x44f>; + phandle = <0x457>; }; pcfg-pull-up-drv-level-7 { bias-pull-up; drive-strength = <0x7>; - phandle = <0x450>; + phandle = <0x458>; }; pcfg-pull-up-drv-level-8 { bias-pull-up; drive-strength = <0x8>; - phandle = <0x451>; + phandle = <0x459>; }; pcfg-pull-up-drv-level-9 { bias-pull-up; drive-strength = <0x9>; - phandle = <0x452>; + phandle = <0x45a>; }; pcfg-pull-up-drv-level-10 { bias-pull-up; drive-strength = <0xa>; - phandle = <0x453>; + phandle = <0x45b>; }; pcfg-pull-up-drv-level-11 { bias-pull-up; drive-strength = <0xb>; - phandle = <0x454>; + phandle = <0x45c>; }; pcfg-pull-up-drv-level-12 { bias-pull-up; drive-strength = <0xc>; - phandle = <0x455>; + phandle = <0x45d>; }; pcfg-pull-up-drv-level-13 { bias-pull-up; drive-strength = <0xd>; - phandle = <0x456>; + phandle = <0x45e>; }; pcfg-pull-up-drv-level-14 { bias-pull-up; drive-strength = <0xe>; - phandle = <0x457>; + phandle = <0x45f>; }; pcfg-pull-up-drv-level-15 { bias-pull-up; drive-strength = <0xf>; - phandle = <0x458>; + phandle = <0x460>; }; pcfg-pull-down-drv-level-7 { bias-pull-down; drive-strength = <0x7>; - phandle = <0x459>; + phandle = <0x461>; }; pcfg-pull-down-drv-level-8 { bias-pull-down; drive-strength = <0x8>; - phandle = <0x45a>; + phandle = <0x462>; }; pcfg-pull-down-drv-level-9 { bias-pull-down; drive-strength = <0x9>; - phandle = <0x45b>; + phandle = <0x463>; }; pcfg-pull-down-drv-level-10 { bias-pull-down; drive-strength = <0xa>; - phandle = <0x45c>; + phandle = <0x464>; }; pcfg-pull-down-drv-level-11 { bias-pull-down; drive-strength = <0xb>; - phandle = <0x45d>; + phandle = <0x465>; }; pcfg-pull-down-drv-level-12 { bias-pull-down; drive-strength = <0xc>; - phandle = <0x45e>; + phandle = <0x466>; }; pcfg-pull-down-drv-level-13 { bias-pull-down; drive-strength = <0xd>; - phandle = <0x45f>; + phandle = <0x467>; }; pcfg-pull-down-drv-level-14 { bias-pull-down; drive-strength = <0xe>; - phandle = <0x460>; + phandle = <0x468>; }; pcfg-pull-down-drv-level-15 { bias-pull-down; drive-strength = <0xf>; - phandle = <0x461>; + phandle = <0x469>; }; eth0 { eth0-pins { rockchip,pins = <0x2 0x13 0x1 0x192>; - phandle = <0x462>; + phandle = <0x1c4>; }; }; @@ -10273,52 +10273,52 @@ gmac0-miim { rockchip,pins = <0x4 0x14 0x1 0x192 0x4 0x15 0x1 0x192>; - phandle = <0x463>; + phandle = <0x1bf>; }; gmac0-clkinout { rockchip,pins = <0x4 0x13 0x1 0x192>; - phandle = <0x464>; + phandle = <0x1c5>; }; gmac0-rx-bus2 { rockchip,pins = <0x2 0x11 0x1 0x192 0x2 0x12 0x1 0x192 0x4 0x12 0x1 0x192>; - phandle = <0x465>; + phandle = <0x1c1>; }; gmac0-tx-bus2 { rockchip,pins = <0x2 0xe 0x1 0x192 0x2 0xf 0x1 0x192 0x2 0x10 0x1 0x192>; - phandle = <0x466>; + phandle = <0x1c0>; }; gmac0-rgmii-clk { rockchip,pins = <0x2 0x8 0x1 0x192 0x2 0xb 0x1 0x192>; - phandle = <0x467>; + phandle = <0x1c2>; }; gmac0-rgmii-bus { rockchip,pins = <0x2 0x6 0x1 0x192 0x2 0x7 0x1 0x192 0x2 0x9 0x1 0x192 0x2 0xa 0x1 0x192>; - phandle = <0x468>; + phandle = <0x1c3>; }; gmac0-ppsclk { rockchip,pins = <0x2 0x14 0x1 0x192>; - phandle = <0x469>; + phandle = <0x46a>; }; gmac0-ppstring { rockchip,pins = <0x2 0xd 0x1 0x192>; - phandle = <0x46a>; + phandle = <0x46b>; }; gmac0-ptp-refclk { rockchip,pins = <0x2 0xc 0x1 0x192>; - phandle = <0x46b>; + phandle = <0x46c>; }; gmac0-txer { rockchip,pins = <0x4 0x16 0x1 0x192>; - phandle = <0x46c>; + phandle = <0x46d>; }; }; @@ -10326,17 +10326,17 @@ mipicsi0-pwr { rockchip,pins = <0x1 0x1a 0x0 0x192>; - phandle = <0x46d>; + phandle = <0x46e>; }; mipicsi1-pwr { rockchip,pins = <0x1 0x1b 0x0 0x192>; - phandle = <0x46e>; + phandle = <0x46f>; }; mipidcphy0-pwr { rockchip,pins = <0x2 0x14 0x0 0x192>; - phandle = <0x46f>; + phandle = <0x470>; }; }; @@ -10344,7 +10344,7 @@ vga-hpdin-l { rockchip,pins = <0x3 0x6 0x0 0x192>; - phandle = <0x470>; + phandle = <0x471>; }; }; @@ -10352,12 +10352,12 @@ hp-det { rockchip,pins = <0x3 0x1d 0x0 0x192>; - phandle = <0x1e1>; + phandle = <0x1ea>; }; spk-con { rockchip,pins = <0x4 0xc 0x0 0x192>; - phandle = <0x1d9>; + phandle = <0x1e2>; }; }; @@ -10373,7 +10373,7 @@ lcd-rst-gpio { rockchip,pins = <0x1 0x0 0x0 0x192>; - phandle = <0x471>; + phandle = <0x472>; }; }; @@ -10381,7 +10381,7 @@ wifi-enable-h { rockchip,pins = <0x1 0x16 0x0 0x197>; - phandle = <0x1e0>; + phandle = <0x1e9>; }; }; @@ -10397,7 +10397,7 @@ vcc5v0-host-en { rockchip,pins = <0x4 0x8 0x0 0x192>; - phandle = <0x1e3>; + phandle = <0x1ec>; }; }; @@ -10405,20 +10405,20 @@ uart9-gpios { rockchip,pins = <0x4 0x14 0x0 0x192>; - phandle = <0x1e6>; + phandle = <0x1ef>; }; bt-reset-gpio { rockchip,pins = <0x0 0xa 0x0 0x192>; - phandle = <0x1e5>; + phandle = <0x1ee>; }; }; ndj_io_init { ndj_io_gpio_col { - rockchip,pins = <0x0 0x1b 0x0 0x192 0x4 0x1 0x0 0x192 0x4 0xe 0x0 0x192 0x0 0x16 0x0 0x192 0x2 0xc 0x0 0x192 0x2 0x13 0x0 0x192 0x4 0xd 0x0 0x192 0x1 0x9 0x0 0x192 0x1 0x0 0x0 0x192 0x1 0xa 0x0 0x192 0x1 0x1 0x0 0x192 0x1 0xb 0x0 0x192 0x1 0x2 0x0 0x192 0x1 0xc 0x0 0x192 0x1 0x3 0x0 0x192>; - phandle = <0x1e7>; + rockchip,pins = <0x1 0x3 0x0 0x192>; + phandle = <0x1f0>; }; }; }; @@ -10435,28 +10435,28 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19c>; status = "disabled"; - phandle = <0x472>; + phandle = <0x473>; }; rkcif-mipi-lvds4-sditf-vir1 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19c>; status = "disabled"; - phandle = <0x473>; + phandle = <0x474>; }; rkcif-mipi-lvds4-sditf-vir2 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19c>; status = "disabled"; - phandle = <0x474>; + phandle = <0x475>; }; rkcif-mipi-lvds4-sditf-vir3 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19c>; status = "disabled"; - phandle = <0x475>; + phandle = <0x476>; }; rkcif-mipi-lvds5 { @@ -10471,28 +10471,28 @@ compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19d>; status = "disabled"; - phandle = <0x476>; + phandle = <0x477>; }; rkcif-mipi-lvds5-sditf-vir1 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19d>; status = "disabled"; - phandle = <0x477>; + phandle = <0x478>; }; rkcif-mipi-lvds5-sditf-vir2 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19d>; status = "disabled"; - phandle = <0x478>; + phandle = <0x479>; }; rkcif-mipi-lvds5-sditf-vir3 { compatible = "rockchip,rkcif-sditf"; rockchip,cif = <0x19d>; status = "disabled"; - phandle = <0x479>; + phandle = <0x47a>; }; usbdrd3_1 { @@ -10503,7 +10503,7 @@ #size-cells = <0x2>; ranges; status = "okay"; - phandle = <0x47a>; + phandle = <0x47b>; usb@fc400000 { compatible = "snps,dwc3"; @@ -10521,26 +10521,26 @@ snps,dis-del-phy-power-chg-quirk; snps,dis-tx-ipgap-linecheck-quirk; status = "okay"; - phandle = <0x47b>; + phandle = <0x47c>; }; }; syscon@fd5b8000 { compatible = "rockchip,pcie30-phy-grf", "syscon"; reg = <0x0 0xfd5b8000 0x0 0x10000>; - phandle = <0x1c3>; + phandle = <0x1cc>; }; syscon@fd5c0000 { compatible = "rockchip,pipe-phy-grf", "syscon"; reg = <0x0 0xfd5c0000 0x0 0x100>; - phandle = <0x1c2>; + phandle = <0x1cb>; }; syscon@fd5cc000 { compatible = "rockchip,rk3588-usbdpphy-grf", "syscon"; reg = <0x0 0xfd5cc000 0x0 0x4000>; - phandle = <0x1c0>; + phandle = <0x1c9>; }; syscon@fd5d4000 { @@ -10548,7 +10548,7 @@ reg = <0x0 0xfd5d4000 0x0 0x4000>; #address-cells = <0x1>; #size-cells = <0x1>; - phandle = <0x1bf>; + phandle = <0x1c8>; usb2-phy@4000 { compatible = "rockchip,rk3588-usb2phy"; @@ -10562,7 +10562,7 @@ #clock-cells = <0x0>; rockchip,usbctrl-grf = <0x6a>; status = "okay"; - phandle = <0x1c1>; + phandle = <0x1ca>; otg-port { #phy-cells = <0x0>; @@ -10576,7 +10576,7 @@ syscon@fd5e4000 { compatible = "rockchip,rk3588-hdptxphy-grf", "syscon"; reg = <0x0 0xfd5e4000 0x0 0x100>; - phandle = <0x1be>; + phandle = <0x1c7>; }; spdif-tx@fddb8000 { @@ -10592,7 +10592,7 @@ power-domains = <0x57 0x19>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x1cf>; + phandle = <0x1d8>; }; i2s@fddc8000 { @@ -10611,7 +10611,7 @@ rockchip,playback-only; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x47c>; + phandle = <0x47d>; }; spdif-tx@fdde8000 { @@ -10627,7 +10627,7 @@ power-domains = <0x57 0x1a>; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x47d>; + phandle = <0x47e>; }; i2s@fddf4000 { @@ -10648,7 +10648,7 @@ rockchip,playback-only; #sound-dai-cells = <0x0>; status = "okay"; - phandle = <0x1cb>; + phandle = <0x1d4>; }; i2s@fddf8000 { @@ -10667,7 +10667,7 @@ rockchip,capture-only; #sound-dai-cells = <0x0>; status = "okay"; - phandle = <0x1db>; + phandle = <0x1e4>; }; i2s@fde00000 { @@ -10686,7 +10686,7 @@ rockchip,capture-only; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x47e>; + phandle = <0x47f>; }; spdif-rx@fde10000 { @@ -10704,7 +10704,7 @@ reset-names = "spdifrx-m"; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x47f>; + phandle = <0x480>; }; spdif-rx@fde18000 { @@ -10722,7 +10722,7 @@ reset-names = "spdifrx-m"; #sound-dai-cells = <0x0>; status = "disabled"; - phandle = <0x480>; + phandle = <0x481>; }; dp@fde60000 { @@ -10740,7 +10740,7 @@ status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <0x1a1>; - phandle = <0x1d0>; + phandle = <0x1d9>; ports { #address-cells = <0x1>; @@ -10777,7 +10777,7 @@ reg = <0x1>; endpoint { - phandle = <0x481>; + phandle = <0x482>; }; }; }; @@ -10802,7 +10802,7 @@ #sound-dai-cells = <0x0>; status = "disabled"; enable-gpios = <0xfc 0xa 0x0>; - phandle = <0x1cc>; + phandle = <0x1d5>; ports { #address-cells = <0x1>; @@ -10812,7 +10812,7 @@ reg = <0x0>; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x482>; + phandle = <0x483>; endpoint@0 { reg = <0x0>; @@ -10852,7 +10852,7 @@ rockchip,grf = <0xcc>; status = "okay"; force-hpd; - phandle = <0x483>; + phandle = <0x484>; ports { #address-cells = <0x1>; @@ -10890,7 +10890,7 @@ endpoint { remote-endpoint = <0x1af>; - phandle = <0x1ea>; + phandle = <0x1f2>; }; }; }; @@ -10911,11 +10911,11 @@ reset-names = "rst_a", "rst_p", "rst_ref", "rst_biu"; pinctrl-0 = <0x1b0 0x1b1>; pinctrl-names = "default"; - status = "okay"; + status = "disabled"; #sound-dai-cells = <0x1>; hpd-trigger-level = <0x1>; hdmirx-det-gpios = <0x1b2 0xe 0x1>; - phandle = <0x1da>; + phandle = <0x1e3>; }; pcie@fe150000 { @@ -10952,7 +10952,7 @@ vpcie3v3-supply = <0x105>; pinctrl-names = "default"; pinctrl-0 = <0x1b6>; - phandle = <0x484>; + phandle = <0x485>; legacy-interrupt-controller { interrupt-controller; @@ -10994,7 +10994,7 @@ reset-names = "pcie", "periph"; rockchip,pipe-grf = <0x6c>; status = "disabled"; - phandle = <0x485>; + phandle = <0x486>; legacy-interrupt-controller { interrupt-controller; @@ -11037,7 +11037,7 @@ status = "okay"; reset-gpios = <0xfc 0x5 0x0>; vpcie3v3-supply = <0x105>; - phandle = <0x486>; + phandle = <0x487>; legacy-interrupt-controller { interrupt-controller; @@ -11054,11 +11054,11 @@ reg = <0x0 0xfe1b0000 0x0 0x10000>; rockchip,ethernet = <0x1ba>; status = "disabled"; - phandle = <0x487>; + phandle = <0x488>; }; ethernet@fe1b0000 { - compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a"; + compatible = "atemsys", "!!rockchip,rk3588-gmac", "!!snps,dwmac-4.20a"; reg = <0x0 0xfe1b0000 0x0 0x10000>; interrupts = <0x0 0xe3 0x4 0x0 0xe2 0x4>; interrupt-names = "macirq", "eth_wake_irq"; @@ -11074,14 +11074,29 @@ snps,axi-config = <0x1bb>; snps,mtl-rx-config = <0x1bc>; snps,mtl-tx-config = <0x1bd>; - status = "disabled"; + status = "okay"; + phy-mode = "rgmii-rxid"; + clock_in_out = "output"; + snps,reset-gpio = <0x1be 0xd 0x1>; + snps,reset-active-low; + snps,reset-delays-us = <0x0 0x4e20 0x186a0>; + pinctrl-names = "default"; + pinctrl-0 = <0x1bf 0x1c0 0x1c1 0x1c2 0x1c3 0x1c4 0x1c5>; + tx_delay = <0x44>; + phy-handle = <0x1c6>; phandle = <0x1ba>; mdio { compatible = "snps,dwmac-mdio"; #address-cells = <0x1>; #size-cells = <0x0>; - phandle = <0x488>; + phandle = <0x489>; + + phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0x1>; + phandle = <0x1c6>; + }; }; stmmac-axi-config { @@ -11119,7 +11134,7 @@ phy-names = "sata-phy"; ports-implemented = <0x1>; status = "disabled"; - phandle = <0x489>; + phandle = <0x48a>; }; phy@fed70000 { @@ -11129,7 +11144,7 @@ clock-names = "ref", "apb"; resets = <0x2 0x486 0x2 0xc003f 0x2 0xc0040 0x2 0xc0041>; reset-names = "apb", "init", "cmn", "lane"; - rockchip,grf = <0x1be>; + rockchip,grf = <0x1c7>; #phy-cells = <0x0>; status = "okay"; phandle = <0x1ac>; @@ -11142,7 +11157,7 @@ clock-names = "ref", "apb"; resets = <0x2 0x491 0x2 0x486 0x2 0xc003f 0x2 0xc0040 0x2 0xc0041 0x2 0x48f 0x2 0x490>; reset-names = "phy", "apb", "init", "cmn", "lane", "ropll", "lcpll"; - rockchip,grf = <0x1be>; + rockchip,grf = <0x1c7>; #phy-cells = <0x0>; status = "disabled"; phandle = <0x1a9>; @@ -11157,16 +11172,16 @@ phy@fed90000 { compatible = "rockchip,rk3588-usbdp-phy"; reg = <0x0 0xfed90000 0x0 0x10000>; - rockchip,u2phy-grf = <0x1bf>; + rockchip,u2phy-grf = <0x1c8>; rockchip,usb-grf = <0x6a>; - rockchip,usbdpphy-grf = <0x1c0>; + rockchip,usbdpphy-grf = <0x1c9>; rockchip,vo-grf = <0xf2>; - clocks = <0x2 0x2b6 0x2 0x280 0x2 0x26a 0x1c1>; + clocks = <0x2 0x2b6 0x2 0x280 0x2 0x26a 0x1ca>; clock-names = "refclk", "immortal", "pclk", "utmi"; resets = <0x2 0x2f 0x2 0x30 0x2 0x31 0x2 0x32 0x2 0x484>; reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb"; status = "okay"; - phandle = <0x48a>; + phandle = <0x48b>; dp-port { #phy-cells = <0x0>; @@ -11192,7 +11207,7 @@ resets = <0x2 0x20006 0x2 0x4d7>; reset-names = "combphy-apb", "combphy"; rockchip,pipe-grf = <0x6c>; - rockchip,pipe-phy-grf = <0x1c2>; + rockchip,pipe-phy-grf = <0x1cb>; rockchip,pcie1ln-sel-bits = <0x100 0x0 0x0 0x0>; status = "okay"; phandle = <0x1b9>; @@ -11207,7 +11222,7 @@ resets = <0x2 0x2000a>; reset-names = "phy"; rockchip,pipe-grf = <0x6c>; - rockchip,phy-grf = <0x1c3>; + rockchip,phy-grf = <0x1cc>; status = "disabled"; rockchip,pcie30-phymode = <0x4>; phandle = <0x1b5>; @@ -11215,11 +11230,11 @@ adc-keys { compatible = "adc-keys"; - io-channels = <0x1c4 0x1>; + io-channels = <0x1cd 0x1>; io-channel-names = "buttons"; keyup-threshold-microvolt = <0x1b7740>; poll-interval = <0x64>; - phandle = <0x48b>; + phandle = <0x48c>; vol-up-key { label = "volume up"; @@ -11250,7 +11265,7 @@ compatible = "pwm-backlight"; brightness-levels = <0x0 0x14 0x14 0x15 0x15 0x16 0x16 0x17 0x17 0x18 0x18 0x19 0x19 0x1a 0x1a 0x1b 0x1b 0x1c 0x1c 0x1d 0x1d 0x1e 0x1e 0x1f 0x1f 0x20 0x20 0x21 0x21 0x22 0x22 0x23 0x23 0x24 0x24 0x25 0x25 0x26 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff>; default-brightness-level = <0xc8>; - pwms = <0x1c5 0x0 0x61a8 0x0>; + pwms = <0x1ce 0x0 0x61a8 0x0>; status = "okay"; phandle = <0xee>; }; @@ -11259,7 +11274,7 @@ compatible = "pwm-backlight"; brightness-levels = <0x0 0x14 0x14 0x15 0x15 0x16 0x16 0x17 0x17 0x18 0x18 0x19 0x19 0x1a 0x1a 0x1b 0x1b 0x1c 0x1c 0x1d 0x1d 0x1e 0x1e 0x1f 0x1f 0x20 0x20 0x21 0x21 0x22 0x22 0x23 0x23 0x24 0x24 0x25 0x25 0x26 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff>; default-brightness-level = <0xc8>; - pwms = <0x1c6 0x0 0x61a8 0x0>; + pwms = <0x1cf 0x0 0x61a8 0x0>; status = "disabled"; phandle = <0xe9>; }; @@ -11268,7 +11283,7 @@ status = "disabled"; compatible = "delta,dfbmcs320"; #sound-dai-cells = <0x1>; - phandle = <0x1c8>; + phandle = <0x1d1>; }; bt-sound { @@ -11278,14 +11293,14 @@ simple-audio-card,bitclock-inversion = <0x0>; simple-audio-card,mclk-fs = <0x100>; simple-audio-card,name = "rockchip,bt"; - phandle = <0x48c>; + phandle = <0x48d>; simple-audio-card,cpu { - sound-dai = <0x1c7>; + sound-dai = <0x1d0>; }; simple-audio-card,codec { - sound-dai = <0x1c8 0x1>; + sound-dai = <0x1d1 0x1>; }; }; @@ -11294,10 +11309,10 @@ compatible = "rockchip,hdmi"; rockchip,mclk-fs = <0x80>; rockchip,card-name = "rockchip-hdmi0"; - rockchip,cpu = <0x1c9>; - rockchip,codec = <0x1ca>; + rockchip,cpu = <0x1d2>; + rockchip,codec = <0x1d3>; rockchip,jack-det; - phandle = <0x48d>; + phandle = <0x48e>; }; hdmi1-sound { @@ -11305,10 +11320,10 @@ compatible = "rockchip,hdmi"; rockchip,mclk-fs = <0x80>; rockchip,card-name = "rockchip-hdmi1"; - rockchip,cpu = <0x1cb>; - rockchip,codec = <0x1cc>; + rockchip,cpu = <0x1d4>; + rockchip,codec = <0x1d5>; rockchip,jack-det; - phandle = <0x48e>; + phandle = <0x48f>; }; dp0-sound { @@ -11316,10 +11331,10 @@ compatible = "rockchip,hdmi"; rockchip,card-name = "rockchip-dp0"; rockchip,mclk-fs = <0x200>; - rockchip,cpu = <0x1cd>; - rockchip,codec = <0x1ce 0x1>; + rockchip,cpu = <0x1d6>; + rockchip,codec = <0x1d7 0x1>; rockchip,jack-det; - phandle = <0x48f>; + phandle = <0x490>; }; dp1-sound { @@ -11327,20 +11342,20 @@ compatible = "rockchip,hdmi"; rockchip,card-name = "rockchip-dp1"; rockchip,mclk-fs = <0x200>; - rockchip,cpu = <0x1cf>; - rockchip,codec = <0x1d0 0x1>; + rockchip,cpu = <0x1d8>; + rockchip,codec = <0x1d9 0x1>; rockchip,jack-det; - phandle = <0x490>; + phandle = <0x491>; }; leds { compatible = "gpio-leds"; - phandle = <0x491>; + phandle = <0x492>; sys_led { gpios = <0x1b2 0x1e 0x0>; linux,default-trigger = "heartbeat"; - phandle = <0x492>; + phandle = <0x493>; }; }; @@ -11348,7 +11363,7 @@ status = "disabled"; compatible = "linux,spdif-dit"; #sound-dai-cells = <0x0>; - phandle = <0x1d2>; + phandle = <0x1db>; }; spdif-tx0-sound { @@ -11356,14 +11371,14 @@ compatible = "simple-audio-card"; simple-audio-card,mclk-fs = <0x80>; simple-audio-card,name = "rockchip,spdif-tx0"; - phandle = <0x493>; + phandle = <0x494>; simple-audio-card,cpu { - sound-dai = <0x1d1>; + sound-dai = <0x1da>; }; simple-audio-card,codec { - sound-dai = <0x1d2>; + sound-dai = <0x1db>; }; }; @@ -11371,7 +11386,7 @@ status = "disabled"; compatible = "linux,spdif-dit"; #sound-dai-cells = <0x0>; - phandle = <0x1d4>; + phandle = <0x1dd>; }; spdif-tx1-sound { @@ -11379,14 +11394,14 @@ compatible = "simple-audio-card"; simple-audio-card,mclk-fs = <0x80>; simple-audio-card,name = "rockchip,spdif-tx1"; - phandle = <0x494>; + phandle = <0x495>; simple-audio-card,cpu { - sound-dai = <0x1d3>; + sound-dai = <0x1dc>; }; simple-audio-card,codec { - sound-dai = <0x1d4>; + sound-dai = <0x1dd>; }; }; @@ -11401,7 +11416,7 @@ regulator-boot-on; regulator-min-microvolt = <0xb71b00>; regulator-max-microvolt = <0xb71b00>; - phandle = <0x1d5>; + phandle = <0x1de>; }; vcc5v0-sys { @@ -11411,7 +11426,7 @@ regulator-boot-on; regulator-min-microvolt = <0x4c4b40>; regulator-max-microvolt = <0x4c4b40>; - vin-supply = <0x1d5>; + vin-supply = <0x1de>; phandle = <0x6e>; }; @@ -11422,8 +11437,8 @@ regulator-boot-on; regulator-min-microvolt = <0x4c4b40>; regulator-max-microvolt = <0x4c4b40>; - vin-supply = <0x1d5>; - phandle = <0x1d6>; + vin-supply = <0x1de>; + phandle = <0x1df>; }; vcc5v0-usb { @@ -11433,8 +11448,8 @@ regulator-boot-on; regulator-min-microvolt = <0x4c4b40>; regulator-max-microvolt = <0x4c4b40>; - vin-supply = <0x1d6>; - phandle = <0x1e2>; + vin-supply = <0x1df>; + phandle = <0x1eb>; }; reserved-memory { @@ -11459,7 +11474,7 @@ drm-cubic-lut@00000000 { compatible = "rockchip,drm-cubic-lut"; reg = <0x0 0x0 0x0 0x0>; - phandle = <0x495>; + phandle = <0x496>; }; ramoops@110000 { @@ -11469,25 +11484,25 @@ console-size = <0x80000>; ftrace-size = <0x0>; pmsg-size = <0x50000>; - phandle = <0x496>; + phandle = <0x497>; }; }; es8316-sound { - status = "okay"; + status = "disabled"; compatible = "rockchip,multicodecs-card"; rockchip,card-name = "rockchip-es8316"; rockchip,format = "i2s"; rockchip,mclk-fs = <0x100>; - rockchip,cpu = <0x1d7>; - rockchip,codec = <0x1d8>; + rockchip,cpu = <0x1e0>; + rockchip,codec = <0x1e1>; poll-interval = <0x64>; - io-channels = <0x1c4 0x3>; + io-channels = <0x1cd 0x3>; io-channel-names = "adc-detect"; keyup-threshold-microvolt = <0x1b7740>; - pinctrl-0 = <0x1d9>; + pinctrl-0 = <0x1e2>; pinctrl-names = "default"; - phandle = <0x497>; + phandle = <0x498>; play-pause-key { label = "playpause"; @@ -11499,21 +11514,21 @@ pwm-fan { compatible = "pwm-fan"; #cooling-cells = <0x2>; - pwms = <0x1c5 0x0 0xc350 0x0>; + pwms = <0x1ce 0x0 0xc350 0x0>; cooling-levels = <0x0 0x32 0x64 0x96 0xc8 0xff>; rockchip,temp-trips = <0xc350 0x1 0xd6d8 0x2 0xea60 0x3 0xfde8 0x4 0x11170 0x5>; - phandle = <0x498>; + phandle = <0x499>; }; hdmiin-sound { compatible = "rockchip,hdmi"; rockchip,mclk-fs = <0x80>; rockchip,format = "i2s"; - rockchip,bitclock-master = <0x1da>; - rockchip,frame-master = <0x1da>; + rockchip,bitclock-master = <0x1e3>; + rockchip,frame-master = <0x1e3>; rockchip,card-name = "rockchip,hdmiin"; - rockchip,cpu = <0x1db>; - rockchip,codec = <0x1da 0x0>; + rockchip,cpu = <0x1e4>; + rockchip,codec = <0x1e3 0x0>; rockchip,jack-det; }; @@ -11524,8 +11539,8 @@ regulator-always-on; regulator-min-microvolt = <0xcf850>; regulator-max-microvolt = <0xcf850>; - vin-supply = <0x1dc>; - phandle = <0x499>; + vin-supply = <0x1e5>; + phandle = <0x49a>; }; pcie20-avdd1v8 { @@ -11535,8 +11550,8 @@ regulator-always-on; regulator-min-microvolt = <0x1b7740>; regulator-max-microvolt = <0x1b7740>; - vin-supply = <0x1dd>; - phandle = <0x49a>; + vin-supply = <0x1e6>; + phandle = <0x49b>; }; pcie30-avdd0v75 { @@ -11546,8 +11561,8 @@ regulator-always-on; regulator-min-microvolt = <0xb71b0>; regulator-max-microvolt = <0xb71b0>; - vin-supply = <0x1de>; - phandle = <0x49b>; + vin-supply = <0x1e7>; + phandle = <0x49c>; }; pcie30-avdd1v8 { @@ -11557,16 +11572,16 @@ regulator-always-on; regulator-min-microvolt = <0x1b7740>; regulator-max-microvolt = <0x1b7740>; - vin-supply = <0x1dd>; - phandle = <0x49c>; + vin-supply = <0x1e6>; + phandle = <0x49d>; }; sdio-pwrseq { compatible = "mmc-pwrseq-simple"; - clocks = <0x1df>; + clocks = <0x1e8>; clock-names = "ext_clock"; pinctrl-names = "default"; - pinctrl-0 = <0x1e0>; + pinctrl-0 = <0x1e9>; post-power-on-delay-ms = <0xc8>; reset-gpios = <0x1b2 0x16 0x1>; phandle = <0x11a>; @@ -11578,9 +11593,9 @@ headset_gpio = <0x108 0x1d 0x0>; spk_ctl_gpio = <0xfc 0xc 0x1>; pinctrl-names = "default"; - pinctrl-0 = <0x1e1>; - io-channels = <0x1c4 0x3>; - phandle = <0x49d>; + pinctrl-0 = <0x1ea>; + io-channels = <0x1cd 0x3>; + phandle = <0x49e>; }; vcc-1v1-nldo-s3 { @@ -11612,7 +11627,7 @@ regulator-max-microvolt = <0x325aa0>; enable-active-high; startup-delay-us = <0x1388>; - vin-supply = <0x1d5>; + vin-supply = <0x1de>; phandle = <0x105>; }; @@ -11625,9 +11640,9 @@ regulator-max-microvolt = <0x4c4b40>; enable-active-high; gpio = <0xfc 0x8 0x0>; - vin-supply = <0x1e2>; + vin-supply = <0x1eb>; pinctrl-names = "default"; - pinctrl-0 = <0x1e3>; + pinctrl-0 = <0x1ec>; phandle = <0x6b>; }; @@ -11640,61 +11655,31 @@ wireless-bluetooth { compatible = "bluetooth-platdata"; - clocks = <0x1df>; + clocks = <0x1e8>; clock-names = "ext_clock"; uart_rts_gpios = <0xfc 0x14 0x1>; pinctrl-names = "default", "rts_gpio"; - pinctrl-0 = <0x1e4 0x1e5>; - pinctrl-1 = <0x1e6>; + pinctrl-0 = <0x1ed 0x1ee>; + pinctrl-1 = <0x1ef>; BT,reset_gpio = <0x15b 0xa 0x0>; - status = "okay"; - phandle = <0x49e>; + status = "disabled"; + phandle = <0x49f>; }; wireless-wlan { compatible = "wlan-platdata"; wifi_chip_type = "ap6398s"; - status = "okay"; - phandle = <0x49f>; + status = "disabled"; + phandle = <0x4a0>; }; ndj_io_init { compatible = "nk_io_control"; pinctrl-names = "default"; - pinctrl-0 = <0x1e7>; - - vcc_12v { - gpio_num = <0x15b 0x1b 0x0>; - gpio_function = <0x0>; - }; - - vcc_3v { - gpio_num = <0xfc 0x1 0x0>; - gpio_function = <0x0>; - }; + pinctrl-0 = <0x1f0>; hub_5V_reset { gpio_num = <0xfc 0xe 0x0>; - gpio_function = <0x3>; - }; - - 4g_power { - gpio_num = <0x15b 0x16 0x0>; - gpio_function = <0x0>; - }; - - wake_wifi_bt { - gpio_num = <0x1e8 0xd 0x1>; - gpio_function = <0x0>; - }; - - air_mode_4g { - gpio_num = <0x1e8 0xc 0x1>; - gpio_function = <0x0>; - }; - - reset_4g { - gpio_num = <0x1e8 0x13 0x1>; gpio_function = <0x3>; }; }; @@ -11712,15 +11697,15 @@ bpc = <0x8>; prepare-delay-ms = <0xc8>; enable-delay-ms = <0x14>; - lvds-gpio0 = <0x1e8 0x15 0x0>; + lvds-gpio0 = <0x1be 0x15 0x0>; lvds-gpio1 = <0xfc 0x12 0x0>; lvds-gpio2 = <0xfc 0x13 0x0>; lvds-gpio3 = <0xfc 0x16 0x0>; nodka-lvds = <0xf>; - phandle = <0x4a0>; + phandle = <0x4a1>; display-timings { - native-mode = <0x1e9>; + native-mode = <0x1f1>; timing0 { clock-frequency = <0x459e440>; @@ -11736,14 +11721,14 @@ vsync-active = <0x0>; de-active = <0x0>; pixelclk-active = <0x0>; - phandle = <0x1e9>; + phandle = <0x1f1>; }; }; port { endpoint { - remote-endpoint = <0x1ea>; + remote-endpoint = <0x1f2>; phandle = <0x1af>; }; }; @@ -11751,19 +11736,19 @@ chosen { bootargs = "earlycon=uart8250,mmio32,0xfeb50000 console=ttyFIQ0 irqchip.gicv3_pseudo_nmi=0 root=PARTUUID=614e0000-0000 rw rootwait net.ifnames=0"; - phandle = <0x4a1>; + phandle = <0x4a2>; }; cspmu@fd10c000 { compatible = "rockchip,cspmu"; reg = <0x0 0xfd10c000 0x0 0x1000 0x0 0xfd10d000 0x0 0x1000 0x0 0xfd10e000 0x0 0x1000 0x0 0xfd10f000 0x0 0x1000 0x0 0xfd12c000 0x0 0x1000 0x0 0xfd12d000 0x0 0x1000 0x0 0xfd12e000 0x0 0x1000 0x0 0xfd12f000 0x0 0x1000>; - phandle = <0x4a2>; + phandle = <0x4a3>; }; debug@fd104000 { compatible = "rockchip,debug"; reg = <0x0 0xfd104000 0x0 0x1000 0x0 0xfd105000 0x0 0x1000 0x0 0xfd106000 0x0 0x1000 0x0 0xfd107000 0x0 0x1000 0x0 0xfd124000 0x0 0x1000 0x0 0xfd125000 0x0 0x1000 0x0 0xfd126000 0x0 0x1000 0x0 0xfd127000 0x0 0x1000>; - phandle = <0x4a3>; + phandle = <0x4a4>; }; fiq-debugger { @@ -11774,9 +11759,9 @@ rockchip,baudrate = <0x16e360>; interrupts = <0x0 0x1a7 0x8>; pinctrl-names = "default"; - pinctrl-0 = <0x1eb>; + pinctrl-0 = <0x1f3>; status = "okay"; - phandle = <0x4a4>; + phandle = <0x4a5>; }; __symbols__ { @@ -12159,7 +12144,7 @@ gmac_uio1 = "/uio@fe1c0000"; gmac1 = "/ethernet@fe1c0000"; mdio1 = "/ethernet@fe1c0000/mdio"; - rgmii_phy = "/ethernet@fe1c0000/mdio/phy@1"; + rgmii_phy1 = "/ethernet@fe1c0000/mdio/phy@1"; gmac1_stmmac_axi_setup = "/ethernet@fe1c0000/stmmac-axi-config"; gmac1_mtl_rx_setup = "/ethernet@fe1c0000/rx-queues-config"; gmac1_mtl_tx_setup = "/ethernet@fe1c0000/tx-queues-config"; @@ -12920,6 +12905,7 @@ gmac_uio0 = "/uio@fe1b0000"; gmac0 = "/ethernet@fe1b0000"; mdio0 = "/ethernet@fe1b0000/mdio"; + rgmii_phy0 = "/ethernet@fe1b0000/mdio/phy@1"; gmac0_stmmac_axi_setup = "/ethernet@fe1b0000/stmmac-axi-config"; gmac0_mtl_rx_setup = "/ethernet@fe1b0000/rx-queues-config"; gmac0_mtl_tx_setup = "/ethernet@fe1b0000/tx-queues-config"; diff --git a/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi b/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi index f344d09..e3c25d4 100755 --- a/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi +++ b/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi @@ -684,7 +684,7 @@ }; gmac0: ethernet@fe1b0000 { - compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a"; + compatible = "atemsys","!!rockchip,rk3588-gmac", "!!snps,dwmac-4.20a"; reg = <0x0 0xfe1b0000 0x0 0x10000>; interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>; diff --git a/kernel/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/kernel/arch/arm64/boot/dts/rockchip/rk3588s.dtsi old mode 100644 new mode 100755 diff --git a/kernel/arch/arm64/configs/rockchip_defconfig b/kernel/arch/arm64/configs/rockchip_defconfig index 2e6450c..ab294dc 100644 --- a/kernel/arch/arm64/configs/rockchip_defconfig +++ b/kernel/arch/arm64/configs/rockchip_defconfig @@ -98,7 +98,6 @@ CONFIG_PM_ADVANCED_DEBUG=y CONFIG_ENERGY_MODEL=y CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y CONFIG_ARM_PSCI_CPUIDLE=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y @@ -108,6 +107,7 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_INTERACTIVE=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y CONFIG_CPUFREQ_DT=y CONFIG_ARM_ROCKCHIP_CPUFREQ=y CONFIG_ARM_SCMI_PROTOCOL=y @@ -920,8 +920,6 @@ CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=y -CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=y -CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=y CONFIG_IIO=y CONFIG_IIO_BUFFER_CB=y CONFIG_ROCKCHIP_SARADC=y @@ -1028,3 +1026,4 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_ENABLE_DEFAULT_TRACERS=y # CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_DOVETAIL=y diff --git a/kernel/arch/arm64/configs/rockchip_linux_defconfig b/kernel/arch/arm64/configs/rockchip_linux_defconfig index 52fd3e9..da35330 100644 --- a/kernel/arch/arm64/configs/rockchip_linux_defconfig +++ b/kernel/arch/arm64/configs/rockchip_linux_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.10.160 Kernel Configuration +# Linux/arm64 5.10.161 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0" CONFIG_CC_IS_GCC=y @@ -59,6 +59,8 @@ CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_HAVE_IRQ_PIPELINE=y +CONFIG_IRQ_PIPELINE=y # end of IRQ subsystem CONFIG_GENERIC_IRQ_MULTI_HANDLER=y @@ -122,8 +124,6 @@ # # Scheduler features # -CONFIG_UCLAMP_TASK=y -CONFIG_UCLAMP_BUCKETS_COUNT=20 # end of Scheduler features CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y @@ -140,7 +140,6 @@ CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y -CONFIG_UCLAMP_TASK_GROUP=y CONFIG_CGROUP_PIDS=y # CONFIG_CGROUP_RDMA is not set CONFIG_CGROUP_FREEZER=y @@ -191,6 +190,7 @@ CONFIG_POSIX_TIMERS=y CONFIG_PRINTK=y CONFIG_PRINTK_NMI=y +# CONFIG_RAW_PRINTK is not set CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y @@ -400,6 +400,9 @@ CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_HAVE_DOVETAIL=y +CONFIG_DOVETAIL=y +CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE=y # CONFIG_PARAVIRT is not set # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set # CONFIG_KEXEC is not set @@ -509,43 +512,28 @@ # # CPU Idle # -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_DT_IDLE_STATES=y - -# -# ARM CPU Idle Drivers -# -CONFIG_ARM_CPUIDLE=y -CONFIG_ARM_PSCI_CPUIDLE=y -CONFIG_ARM_PSCI_CPUIDLE_DOMAIN=y -# end of ARM CPU Idle Drivers +# CONFIG_CPU_IDLE is not set # end of CPU Idle # # CPU Frequency scaling # CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_TIMES=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set # @@ -593,7 +581,6 @@ CONFIG_EFI_EARLYCON=y CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_PSCI_CHECKER is not set CONFIG_HAVE_ARM_SMCCC=y CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y CONFIG_ARM_SMCCC_SOC_ID=y @@ -660,6 +647,7 @@ CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y CONFIG_MMU_GATHER_TABLE_FREE=y CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM=y CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y CONFIG_HAVE_CMPXCHG_LOCAL=y @@ -850,6 +838,137 @@ # CONFIG_GKI_HIDDEN_DMA_CONFIGS is not set # CONFIG_GKI_HIDDEN_ETHERNET_CONFIGS is not set # CONFIG_GKI_HACKS_TO_FIX is not set +CONFIG_XENOMAI=y + +# +# Core features +# +# CONFIG_XENO_OPT_SCHED_CLASSES is not set +CONFIG_XENO_OPT_STATS=y +# CONFIG_XENO_OPT_SHIRQ is not set +CONFIG_XENO_OPT_RR_QUANTUM=1000 +CONFIG_XENO_OPT_AUTOTUNE=y +# CONFIG_XENO_OPT_SCALABLE_SCHED is not set +CONFIG_XENO_OPT_TIMER_LIST=y +# CONFIG_XENO_OPT_TIMER_RBTREE is not set +CONFIG_XENO_OPT_VFILE=y +# end of Core features + +# +# Sizes and static limits +# +CONFIG_XENO_OPT_REGISTRY_NRSLOTS=8192 +CONFIG_XENO_OPT_SYS_HEAPSZ=8192 +CONFIG_XENO_OPT_PRIVATE_HEAPSZ=512 +CONFIG_XENO_OPT_SHARED_HEAPSZ=512 +CONFIG_XENO_OPT_NRTIMERS=512 +# end of Sizes and static limits + +# +# Latency settings +# +CONFIG_XENO_OPT_TIMING_SCHEDLAT=0 +CONFIG_XENO_OPT_TIMING_KSCHEDLAT=0 +CONFIG_XENO_OPT_TIMING_IRQLAT=0 +# end of Latency settings + +# CONFIG_XENO_OPT_DEBUG is not set + +# +# Drivers +# +CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE=y +CONFIG_XENO_DRIVERS_AUTOTUNE=y + +# +# Serial drivers +# +# CONFIG_XENO_DRIVERS_16550A is not set +# end of Serial drivers + +# +# Testing drivers +# +CONFIG_XENO_DRIVERS_TIMERBENCH=y +CONFIG_XENO_DRIVERS_SWITCHTEST=y +CONFIG_XENO_DRIVERS_HEAPCHECK=y +# CONFIG_XENO_DRIVERS_RTDMTEST is not set +# end of Testing drivers + +# +# CAN drivers +# +# CONFIG_XENO_DRIVERS_CAN is not set +# end of CAN drivers + +# +# RTnet +# +# CONFIG_XENO_DRIVERS_NET is not set +# end of RTnet + +# +# ANALOGY drivers +# +# CONFIG_XENO_DRIVERS_ANALOGY is not set +# end of ANALOGY drivers + +# +# Real-time IPC drivers +# +# CONFIG_XENO_DRIVERS_RTIPC is not set +# end of Real-time IPC drivers + +# +# UDD support +# +# CONFIG_XENO_DRIVERS_UDD is not set +# end of UDD support + +# +# Real-time GPIO drivers +# +# CONFIG_XENO_DRIVERS_GPIO is not set +# end of Real-time GPIO drivers + +# +# GPIOPWM support +# +# CONFIG_XENO_DRIVERS_GPIOPWM is not set +# end of GPIOPWM support + +# +# Real-time SPI master drivers +# +# end of Real-time SPI master drivers +# end of Drivers + +CONFIG_XENO_ARCH_FPU=y +CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK=y + +# +# WARNING! Page migration (CONFIG_MIGRATION) may increase +# + +# +# latency. +# + +# +# WARNING! At least one of APM, CPU frequency scaling, ACPI 'processor' +# + +# +# or CPU idle features is enabled. Any of these options may +# + +# +# cause troubles with Xenomai. You should disable them. +# +CONFIG_XENO_VERSION_MAJOR=3 +CONFIG_XENO_VERSION_MINOR=2 +CONFIG_XENO_REVISION_LEVEL=4 +CONFIG_XENO_VERSION_STRING="3.2.4" CONFIG_FREEZER=y # @@ -924,7 +1043,6 @@ # end of Memory Management options CONFIG_NET=y -CONFIG_COMPAT_NETLINK_MESSAGES=y CONFIG_NET_INGRESS=y CONFIG_SKB_EXTENSIONS=y @@ -1361,10 +1479,6 @@ # CONFIG_AF_KCM is not set CONFIG_FIB_RULES=y CONFIG_WIRELESS=y -CONFIG_WIRELESS_EXT=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_WEXT_PRIV=y CONFIG_CFG80211=y # CONFIG_NL80211_TESTMODE is not set # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set @@ -1952,55 +2066,43 @@ # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set -CONFIG_NET_VENDOR_AURORA=y -# CONFIG_AURORA_NB8800 is not set +# CONFIG_NET_VENDOR_AURORA is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_BROCADE is not set -CONFIG_NET_VENDOR_CADENCE=y -# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CADENCE is not set # CONFIG_NET_VENDOR_CAVIUM is not set # CONFIG_NET_VENDOR_CHELSIO is not set # CONFIG_NET_VENDOR_CISCO is not set -CONFIG_NET_VENDOR_CORTINA=y -# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_DNET is not set # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_GOOGLE=y -# CONFIG_GVE is not set +# CONFIG_NET_VENDOR_GOOGLE is not set # CONFIG_NET_VENDOR_HISILICON is not set -CONFIG_NET_VENDOR_HUAWEI=y -# CONFIG_HINIC is not set +# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_JME is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MELLANOX is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set -CONFIG_NET_VENDOR_MICROSEMI=y +# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MYRI is not set # CONFIG_FEALNX is not set # CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETERION=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -CONFIG_NET_VENDOR_NI=y -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +# CONFIG_NET_VENDOR_NETERION is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set # CONFIG_ETHOC is not set @@ -2015,8 +2117,8 @@ CONFIG_NET_VENDOR_REALTEK=y # CONFIG_8139CP is not set # CONFIG_8139TOO is not set -# CONFIG_R8168 is not set -CONFIG_R8169=y +CONFIG_R8168=y +# CONFIG_R8169 is not set # CONFIG_R8125 is not set # CONFIG_REALTEK_PGTOOL is not set # CONFIG_NET_VENDOR_RENESAS is not set @@ -2188,82 +2290,7 @@ # CONFIG_USB_VL600 is not set # CONFIG_USB_NET_CH9200 is not set # CONFIG_USB_NET_AQC111 is not set -CONFIG_WLAN=y -# CONFIG_WIRELESS_WDS is not set -# CONFIG_WLAN_VENDOR_ADMTEK is not set -CONFIG_ATH_COMMON=y -CONFIG_WLAN_VENDOR_ATH=y -# CONFIG_ATH_DEBUG is not set -# CONFIG_ATH5K is not set -# CONFIG_ATH5K_PCI is not set -CONFIG_ATH9K_HW=y -CONFIG_ATH9K_COMMON=y -CONFIG_ATH9K_BTCOEX_SUPPORT=y -CONFIG_ATH9K=y -CONFIG_ATH9K_PCI=y -# CONFIG_ATH9K_AHB is not set -# CONFIG_ATH9K_DEBUGFS is not set -# CONFIG_ATH9K_DYNACK is not set -# CONFIG_ATH9K_WOW is not set -CONFIG_ATH9K_RFKILL=y -# CONFIG_ATH9K_CHANNEL_CONTEXT is not set -CONFIG_ATH9K_PCOEM=y -# CONFIG_ATH9K_PCI_NO_EEPROM is not set -# CONFIG_ATH9K_HTC is not set -# CONFIG_ATH9K_HWRNG is not set -# CONFIG_CARL9170 is not set -# CONFIG_ATH6KL is not set -# CONFIG_AR5523 is not set -# CONFIG_WIL6210 is not set -# CONFIG_ATH10K is not set -# CONFIG_WCN36XX is not set -CONFIG_WLAN_VENDOR_ATMEL=y -# CONFIG_ATMEL is not set -# CONFIG_AT76C50X_USB is not set -CONFIG_WLAN_VENDOR_BROADCOM=y -# CONFIG_B43 is not set -# CONFIG_B43LEGACY is not set -# CONFIG_BRCMSMAC is not set -# CONFIG_BRCMFMAC is not set -# CONFIG_WLAN_VENDOR_CISCO is not set -# CONFIG_WLAN_VENDOR_INTEL is not set -# CONFIG_WLAN_VENDOR_INTERSIL is not set -# CONFIG_WLAN_VENDOR_MARVELL is not set -# CONFIG_WLAN_VENDOR_MEDIATEK is not set -CONFIG_WLAN_VENDOR_MICROCHIP=y -# CONFIG_WILC1000_SDIO is not set -# CONFIG_WILC1000_SPI is not set -# CONFIG_WLAN_VENDOR_RALINK is not set -# CONFIG_WLAN_VENDOR_REALTEK is not set -# CONFIG_WLAN_VENDOR_RSI is not set -# CONFIG_WLAN_VENDOR_ST is not set -# CONFIG_WLAN_VENDOR_TI is not set -# CONFIG_RTL8188EU is not set -# CONFIG_RTL8822BU is not set -# CONFIG_RTL8821CU is not set -# CONFIG_WLAN_VENDOR_ZYDAS is not set -# CONFIG_WLAN_VENDOR_QUANTENNA is not set -CONFIG_WL_ROCKCHIP=y -CONFIG_WIFI_BUILD_MODULE=y -# CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP is not set -# CONFIG_WIFI_GENERATE_RANDOM_MAC_ADDR is not set -CONFIG_BCMDHD=y -CONFIG_AP6XXX=m -CONFIG_BCMDHD_SDIO=y -# CONFIG_BCMDHD_PCIE is not set -CONFIG_BCMDHD_FW_PATH="/vendor/etc/firmware/fw_bcmdhd.bin" -CONFIG_BCMDHD_NVRAM_PATH="/vendor/etc/firmware/nvram.txt" -# CONFIG_BCMDHD_STATIC_IF is not set -# CONFIG_CYW_BCMDHD is not set -# CONFIG_INFINEON_DHD is not set -CONFIG_RTL8852BE=m -# CONFIG_RTL8852BU is not set -# CONFIG_RTL8821CS is not set -# CONFIG_SPARD_WLAN_SUPPORT is not set -# CONFIG_AIC_WLAN_SUPPORT is not set -# CONFIG_MAC80211_HWSIM is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set -# CONFIG_VIRT_WIFI is not set +# CONFIG_WLAN is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -4394,7 +4421,11 @@ CONFIG_DUMMY_CONSOLE=y CONFIG_DUMMY_CONSOLE_COLUMNS=80 CONFIG_DUMMY_CONSOLE_ROWS=25 -# CONFIG_FRAMEBUFFER_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set # end of Console display driver support # CONFIG_LOGO is not set @@ -5450,16 +5481,9 @@ # CONFIG_GREYBUS is not set CONFIG_STAGING=y -# CONFIG_PRISM2_USB is not set # CONFIG_COMEDI is not set -# CONFIG_RTL8192U is not set -# CONFIG_RTLLIB is not set -# CONFIG_RTL8723BS is not set -# CONFIG_R8712U is not set -# CONFIG_R8188EU is not set # CONFIG_RTS5208 is not set # CONFIG_VT6655 is not set -# CONFIG_VT6656 is not set # # IIO staging drivers @@ -5798,10 +5822,10 @@ # DEVFREQ Drivers # CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=y -CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=y +# CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ is not set CONFIG_PM_DEVFREQ_EVENT=y CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI=y -CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=y +# CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP is not set CONFIG_EXTCON=y # @@ -7041,8 +7065,9 @@ CONFIG_GENERIC_GETTIMEOFDAY=y CONFIG_GENERIC_VDSO_TIME_NS=y CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y -CONFIG_FONT_AUTOSELECT=y CONFIG_SG_POOL=y CONFIG_ARCH_STACKWALK=y CONFIG_SBITMAP=y @@ -7149,6 +7174,8 @@ # end of Memory Debugging # CONFIG_DEBUG_SHIRQ is not set +# CONFIG_DEBUG_IRQ_PIPELINE is not set +# CONFIG_DEBUG_DOVETAIL is not set # # Debug Oops, Lockups and Hangs diff --git a/kernel/arch/arm64/configs/rockchip_linux_defconfig.rej b/kernel/arch/arm64/configs/rockchip_linux_defconfig.rej new file mode 100644 index 0000000..26ecf6b --- /dev/null +++ b/kernel/arch/arm64/configs/rockchip_linux_defconfig.rej @@ -0,0 +1,32 @@ +--- arch/arm64/configs/rockchip_linux_defconfig ++++ arch/arm64/configs/rockchip_linux_defconfig +@@ -71,7 +71,6 @@ CONFIG_PM_ADVANCED_DEBUG=y + CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y + CONFIG_ENERGY_MODEL=y + CONFIG_CPU_IDLE=y +-CONFIG_ARM_CPUIDLE=y + CONFIG_ARM_PSCI_CPUIDLE=y + CONFIG_CPU_FREQ=y + CONFIG_CPU_FREQ_STAT=y +@@ -81,6 +80,7 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y + CONFIG_CPU_FREQ_GOV_ONDEMAND=y + CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y + CONFIG_CPU_FREQ_GOV_INTERACTIVE=y ++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y + CONFIG_CPUFREQ_DT=y + CONFIG_ARM_ROCKCHIP_CPUFREQ=y + CONFIG_ARM_SCMI_PROTOCOL=y +@@ -537,8 +537,6 @@ CONFIG_DEVFREQ_GOV_PERFORMANCE=y + CONFIG_DEVFREQ_GOV_POWERSAVE=y + CONFIG_DEVFREQ_GOV_USERSPACE=y + CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=y +-CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=y +-CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=y + CONFIG_IIO=y + CONFIG_ROCKCHIP_SARADC=y + CONFIG_SENSORS_ISL29018=y +@@ -634,3 +632,4 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60 + CONFIG_FUNCTION_TRACER=y + CONFIG_BLK_DEV_IO_TRACE=y + CONFIG_LKDTM=y ++CONFIG_DOVETAIL=y diff --git a/kernel/arch/arm64/include/asm/daifflags.h b/kernel/arch/arm64/include/asm/daifflags.h index cfdde3a..982c807 100644 --- a/kernel/arch/arm64/include/asm/daifflags.h +++ b/kernel/arch/arm64/include/asm/daifflags.h @@ -12,6 +12,12 @@ #include <asm/cpufeature.h> #include <asm/ptrace.h> +/* + * irq_pipeline: DAIF masking is only used in contexts where hard + * interrupt masking applies, so no need to virtualize for the inband + * stage here (the pipeline core does assume this). + */ + #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) @@ -35,7 +41,7 @@ if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); - trace_hardirqs_off(); + trace_hardirqs_off_pipelined(); } static inline unsigned long local_daif_save_flags(void) @@ -72,7 +78,7 @@ !(read_sysreg(daif) & PSR_I_BIT)); if (!irq_disabled) { - trace_hardirqs_on(); + trace_hardirqs_on_pipelined(); if (system_uses_irq_prio_masking()) { gic_write_pmr(GIC_PRIO_IRQON); @@ -117,7 +123,7 @@ write_sysreg(flags, daif); if (irq_disabled) - trace_hardirqs_off(); + trace_hardirqs_off_pipelined(); } /* @@ -129,7 +135,7 @@ unsigned long flags = regs->pstate & DAIF_MASK; if (interrupts_enabled(regs)) - trace_hardirqs_on(); + trace_hardirqs_on_pipelined(); if (system_uses_irq_prio_masking()) gic_write_pmr(regs->pmr_save); diff --git a/kernel/arch/arm64/include/asm/dovetail.h b/kernel/arch/arm64/include/asm/dovetail.h new file mode 100644 index 0000000..6fca696 --- /dev/null +++ b/kernel/arch/arm64/include/asm/dovetail.h @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef _ASM_ARM64_DOVETAIL_H +#define _ASM_ARM64_DOVETAIL_H + +#include <asm/fpsimd.h> + +/* ARM64 traps */ +#define ARM64_TRAP_ACCESS 0 /* Data or instruction access exception */ +#define ARM64_TRAP_ALIGN 1 /* SP/PC alignment abort */ +#define ARM64_TRAP_SEA 2 /* Synchronous external abort */ +#define ARM64_TRAP_DEBUG 3 /* Debug trap */ +#define ARM64_TRAP_UNDI 4 /* Undefined instruction */ +#define ARM64_TRAP_UNDSE 5 /* Undefined synchronous exception */ +#define ARM64_TRAP_FPE 6 /* FPSIMD exception */ +#define ARM64_TRAP_SVE 7 /* SVE access trap */ +#define ARM64_TRAP_BTI 8 /* Branch target identification */ + +#ifdef CONFIG_DOVETAIL + +static inline void arch_dovetail_exec_prepare(void) +{ } + +static inline void arch_dovetail_switch_prepare(bool leave_inband) +{ } + +static inline void arch_dovetail_switch_finish(bool enter_inband) +{ + fpsimd_restore_current_oob(); +} + +/* + * 172 is __NR_prctl from unistd32 in ARM32 mode, without #inclusion + * hell. At the end of the day, this number is written in stone to + * honor the ABI stability promise anyway. + */ +#define arch_dovetail_is_syscall(__nr) \ + (is_compat_task() ? (__nr) == 172 : (__nr) == __NR_prctl) + +#endif + +/* + * Pass the trap event to the companion core. Return true if running + * in-band afterwards. + */ +#define mark_cond_trap_entry(__trapnr, __regs) \ + ({ \ + oob_trap_notify(__trapnr, __regs); \ + running_inband(); \ + }) + +/* + * Pass the trap event to the companion core. We expect the current + * context to be running on the in-band stage upon return so that our + * caller can tread on common kernel code. + */ +#define mark_trap_entry(__trapnr, __regs) \ + do { \ + bool __ret = mark_cond_trap_entry(__trapnr, __regs); \ + BUG_ON(dovetail_debug() && !__ret); \ + } while (0) + +#define mark_trap_exit(__trapnr, __regs) \ + oob_trap_unwind(__trapnr, __regs) + +#endif /* _ASM_ARM64_DOVETAIL_H */ diff --git a/kernel/arch/arm64/include/asm/efi.h b/kernel/arch/arm64/include/asm/efi.h index 16892f0..6f52727 100644 --- a/kernel/arch/arm64/include/asm/efi.h +++ b/kernel/arch/arm64/include/asm/efi.h @@ -115,6 +115,10 @@ static inline void efi_set_pgd(struct mm_struct *mm) { + unsigned long flags; + + protect_inband_mm(flags); + __switch_mm(mm); if (system_uses_ttbr0_pan()) { @@ -139,6 +143,8 @@ update_saved_ttbr0(current, current->active_mm); } } + + unprotect_inband_mm(flags); } void efi_virtmap_load(void); diff --git a/kernel/arch/arm64/include/asm/fpsimd.h b/kernel/arch/arm64/include/asm/fpsimd.h index 05c9c55..47417a0 100644 --- a/kernel/arch/arm64/include/asm/fpsimd.h +++ b/kernel/arch/arm64/include/asm/fpsimd.h @@ -43,6 +43,7 @@ extern void fpsimd_signal_preserve_current_state(void); extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); +extern void fpsimd_restore_current_oob(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); extern void fpsimd_bind_task_to_cpu(void); diff --git a/kernel/arch/arm64/include/asm/irq_pipeline.h b/kernel/arch/arm64/include/asm/irq_pipeline.h new file mode 100644 index 0000000..5861ab3 --- /dev/null +++ b/kernel/arch/arm64/include/asm/irq_pipeline.h @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef _ASM_ARM64_IRQ_PIPELINE_H +#define _ASM_ARM64_IRQ_PIPELINE_H + +#include <asm-generic/irq_pipeline.h> + +#ifdef CONFIG_IRQ_PIPELINE + +/* + * In order to cope with the limited number of SGIs available to us, + * In-band IPI messages are multiplexed over SGI0, whereas out-of-band + * IPIs are directly mapped to SGI1-2. + */ +#define OOB_NR_IPI 2 +#define OOB_IPI_OFFSET 1 /* SGI1 */ +#define TIMER_OOB_IPI (ipi_irq_base + OOB_IPI_OFFSET) +#define RESCHEDULE_OOB_IPI (TIMER_OOB_IPI + 1) + +extern int ipi_irq_base; + +static inline notrace +unsigned long arch_irqs_virtual_to_native_flags(int stalled) +{ + return (!!stalled) << IRQMASK_I_POS; +} + +static inline notrace +unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags) +{ + return (!!hard_irqs_disabled_flags(flags)) << IRQMASK_i_POS; +} + +static inline notrace unsigned long arch_local_irq_save(void) +{ + int stalled = inband_irq_save(); + barrier(); + return arch_irqs_virtual_to_native_flags(stalled); +} + +static inline notrace void arch_local_irq_enable(void) +{ + barrier(); + inband_irq_enable(); +} + +static inline notrace void arch_local_irq_disable(void) +{ + inband_irq_disable(); + barrier(); +} + +static inline notrace unsigned long arch_local_save_flags(void) +{ + int stalled = inband_irqs_disabled(); + barrier(); + return arch_irqs_virtual_to_native_flags(stalled); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return native_irqs_disabled_flags(flags); +} + +static inline notrace void arch_local_irq_restore(unsigned long flags) +{ + inband_irq_restore(arch_irqs_disabled_flags(flags)); + barrier(); +} + +static inline +void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src) +{ + dst->pstate = src->pstate; + dst->pc = src->pc; +} + +static inline bool arch_steal_pipelined_tick(struct pt_regs *regs) +{ + return !!(regs->pstate & IRQMASK_I_BIT); +} + +static inline int arch_enable_oob_stage(void) +{ + return 0; +} + +/* + * We use neither the generic entry code nor + * kentry_enter/exit_pipelined yet. We still build a no-op version of + * the latter for now, until we enventually switch to using whichever + * of them is available first. + */ +#define arch_kentry_get_irqstate(__regs) 0 + +#define arch_kentry_set_irqstate(__regs, __irqstate) \ + do { (void)__irqstate; } while (0) + +#else /* !CONFIG_IRQ_PIPELINE */ + +static inline unsigned long arch_local_irq_save(void) +{ + return native_irq_save(); +} + +static inline void arch_local_irq_enable(void) +{ + native_irq_enable(); +} + +static inline void arch_local_irq_disable(void) +{ + native_irq_disable(); +} + +static inline unsigned long arch_local_save_flags(void) +{ + return native_save_flags(); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + native_irq_restore(flags); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return native_irqs_disabled_flags(flags); +} + +#endif /* !CONFIG_IRQ_PIPELINE */ + +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +#endif /* _ASM_ARM64_IRQ_PIPELINE_H */ diff --git a/kernel/arch/arm64/include/asm/irqflags.h b/kernel/arch/arm64/include/asm/irqflags.h index ff328e5..177c7e7 100644 --- a/kernel/arch/arm64/include/asm/irqflags.h +++ b/kernel/arch/arm64/include/asm/irqflags.h @@ -10,6 +10,10 @@ #include <asm/ptrace.h> #include <asm/sysreg.h> +#define IRQMASK_I_BIT PSR_I_BIT +#define IRQMASK_I_POS 7 +#define IRQMASK_i_POS 31 + /* * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai' @@ -26,7 +30,7 @@ /* * CPU interrupt mask handling. */ -static inline void arch_local_irq_enable(void) +static inline void native_irq_enable(void) { if (system_has_prio_mask_debugging()) { u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); @@ -35,7 +39,7 @@ } asm volatile(ALTERNATIVE( - "msr daifclr, #2 // arch_local_irq_enable", + "msr daifclr, #2 // native_irq_enable", __msr_s(SYS_ICC_PMR_EL1, "%0"), ARM64_HAS_IRQ_PRIO_MASKING) : @@ -45,7 +49,7 @@ pmr_sync(); } -static inline void arch_local_irq_disable(void) +static inline void native_irq_disable(void) { if (system_has_prio_mask_debugging()) { u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); @@ -54,7 +58,7 @@ } asm volatile(ALTERNATIVE( - "msr daifset, #2 // arch_local_irq_disable", + "msr daifset, #2 // native_irq_disable", __msr_s(SYS_ICC_PMR_EL1, "%0"), ARM64_HAS_IRQ_PRIO_MASKING) : @@ -62,10 +66,17 @@ : "memory"); } +static inline void native_irq_sync(void) +{ + native_irq_enable(); + isb(); + native_irq_disable(); +} + /* * Save the current interrupt enable state. */ -static inline unsigned long arch_local_save_flags(void) +static inline unsigned long native_save_flags(void) { unsigned long flags; @@ -80,7 +91,7 @@ return flags; } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline int native_irqs_disabled_flags(unsigned long flags) { int res; @@ -95,23 +106,18 @@ return res; } -static inline int arch_irqs_disabled(void) -{ - return arch_irqs_disabled_flags(arch_local_save_flags()); -} - -static inline unsigned long arch_local_irq_save(void) +static inline unsigned long native_irq_save(void) { unsigned long flags; - flags = arch_local_save_flags(); + flags = native_save_flags(); /* * There are too many states with IRQs disabled, just keep the current * state if interrupts are already disabled/masked. */ - if (!arch_irqs_disabled_flags(flags)) - arch_local_irq_disable(); + if (!native_irqs_disabled_flags(flags)) + native_irq_disable(); return flags; } @@ -119,7 +125,7 @@ /* * restore saved IRQ state */ -static inline void arch_local_irq_restore(unsigned long flags) +static inline void native_irq_restore(unsigned long flags) { asm volatile(ALTERNATIVE( "msr daif, %0", @@ -132,4 +138,12 @@ pmr_sync(); } +static inline bool native_irqs_disabled(void) +{ + unsigned long flags = native_save_flags(); + return native_irqs_disabled_flags(flags); +} + +#include <asm/irq_pipeline.h> + #endif /* __ASM_IRQFLAGS_H */ diff --git a/kernel/arch/arm64/include/asm/mmu_context.h b/kernel/arch/arm64/include/asm/mmu_context.h index cc58614..1b0551e 100644 --- a/kernel/arch/arm64/include/asm/mmu_context.h +++ b/kernel/arch/arm64/include/asm/mmu_context.h @@ -15,6 +15,7 @@ #include <linux/sched/hotplug.h> #include <linux/mm_types.h> #include <linux/pgtable.h> +#include <linux/irq_pipeline.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> @@ -112,6 +113,9 @@ static inline void cpu_uninstall_idmap(void) { struct mm_struct *mm = current->active_mm; + unsigned long flags; + + flags = hard_cond_local_irq_save(); cpu_set_reserved_ttbr0(); local_flush_tlb_all(); @@ -119,15 +123,23 @@ if (mm != &init_mm && !system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); + + hard_cond_local_irq_restore(flags); } static inline void cpu_install_idmap(void) { + unsigned long flags; + + flags = hard_cond_local_irq_save(); + cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); + + hard_cond_local_irq_restore(flags); } /* @@ -230,7 +242,7 @@ } static inline void -switch_mm(struct mm_struct *prev, struct mm_struct *next, +do_switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) @@ -245,8 +257,26 @@ update_saved_ttbr0(tsk, next); } +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned long flags; + + protect_inband_mm(flags); + do_switch_mm(prev, next, tsk); + unprotect_inband_mm(flags); +} + #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, current) +#define activate_mm(prev,next) do_switch_mm(prev, next, current) + +static inline void +switch_oob_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + do_switch_mm(prev, next, tsk); +} static inline const struct cpumask * task_cpu_possible_mask(struct task_struct *p) diff --git a/kernel/arch/arm64/include/asm/ptrace.h b/kernel/arch/arm64/include/asm/ptrace.h index f834744..ac73af4 100644 --- a/kernel/arch/arm64/include/asm/ptrace.h +++ b/kernel/arch/arm64/include/asm/ptrace.h @@ -201,7 +201,13 @@ /* Only valid for some EL1 exceptions. */ u64 lockdep_hardirqs; +#ifdef CONFIG_IRQ_PIPELINE + u64 exit_rcu : 1, + oob_on_entry : 1, + stalled_on_entry : 1; +#else u64 exit_rcu; +#endif }; static inline bool in_syscall(struct pt_regs const *regs) diff --git a/kernel/arch/arm64/include/asm/syscall.h b/kernel/arch/arm64/include/asm/syscall.h index 03e2089..8827217 100644 --- a/kernel/arch/arm64/include/asm/syscall.h +++ b/kernel/arch/arm64/include/asm/syscall.h @@ -73,6 +73,11 @@ memcpy(args, ®s->regs[1], 5 * sizeof(args[0])); } +static inline unsigned long syscall_get_arg0(struct pt_regs *regs) +{ + return regs->orig_x0; +} + static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, const unsigned long *args) diff --git a/kernel/arch/arm64/include/asm/thread_info.h b/kernel/arch/arm64/include/asm/thread_info.h index cdcf307..1499c2c 100644 --- a/kernel/arch/arm64/include/asm/thread_info.h +++ b/kernel/arch/arm64/include/asm/thread_info.h @@ -14,6 +14,7 @@ struct task_struct; +#include <dovetail/thread_info.h> #include <asm/memory.h> #include <asm/stack_pointer.h> #include <asm/types.h> @@ -25,6 +26,7 @@ */ struct thread_info { unsigned long flags; /* low level flags */ + unsigned long local_flags; /* local (synchronous) flags */ mm_segment_t addr_limit; /* address limit */ #ifdef CONFIG_ARM64_SW_TTBR0_PAN u64 ttbr0; /* saved TTBR0_EL1 */ @@ -45,6 +47,7 @@ void *scs_base; void *scs_sp; #endif + struct oob_thread_state oob_state; }; #define thread_saved_pc(tsk) \ @@ -59,6 +62,8 @@ void arch_release_task_struct(struct task_struct *tsk); +#define ti_local_flags(__ti) ((__ti)->local_flags) + #endif #define TIF_SIGPENDING 0 /* signal pending */ @@ -69,11 +74,12 @@ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ #define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */ #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ -#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ +#define TIF_SYSCALL_TRACE 13 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ #define TIF_SECCOMP 11 /* syscall secure computing */ #define TIF_SYSCALL_EMU 12 /* syscall emulation active */ +#define TIF_RETUSER 8 /* INBAND_TASK_RETUSER is pending */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 @@ -83,6 +89,7 @@ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ +#define TIF_MAYDAY 27 /* Emergency trap pending */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -100,11 +107,13 @@ #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_MAYDAY (1 << TIF_MAYDAY) +#define _TIF_RETUSER (1 << TIF_RETUSER) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_RETUSER) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ @@ -126,4 +135,12 @@ INIT_SCS \ } +/* + * Local (synchronous) thread flags. + */ +#define _TLF_OOB 0x0001 +#define _TLF_DOVETAIL 0x0002 +#define _TLF_OFFSTAGE 0x0004 +#define _TLF_OOBTRAP 0x0008 + #endif /* __ASM_THREAD_INFO_H */ diff --git a/kernel/arch/arm64/include/asm/uaccess.h b/kernel/arch/arm64/include/asm/uaccess.h index 5d0111a..ceb30f4 100644 --- a/kernel/arch/arm64/include/asm/uaccess.h +++ b/kernel/arch/arm64/include/asm/uaccess.h @@ -113,7 +113,7 @@ { unsigned long flags, ttbr; - local_irq_save(flags); + flags = hard_local_irq_save(); ttbr = read_sysreg(ttbr1_el1); ttbr &= ~TTBR_ASID_MASK; /* reserved_pg_dir placed before swapper_pg_dir */ @@ -122,7 +122,7 @@ /* Set reserved ASID */ write_sysreg(ttbr, ttbr1_el1); isb(); - local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline void __uaccess_ttbr0_enable(void) @@ -134,7 +134,7 @@ * variable and the MSR. A context switch could trigger an ASID * roll-over and an update of 'ttbr0'. */ - local_irq_save(flags); + flags = hard_local_irq_save(); ttbr0 = READ_ONCE(current_thread_info()->ttbr0); /* Restore active ASID */ @@ -147,7 +147,7 @@ /* Restore user page table */ write_sysreg(ttbr0, ttbr0_el1); isb(); - local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline bool uaccess_ttbr0_disable(void) diff --git a/kernel/arch/arm64/include/asm/vdso.h b/kernel/arch/arm64/include/asm/vdso.h index f99dcb9..c63c5ac 100644 --- a/kernel/arch/arm64/include/asm/vdso.h +++ b/kernel/arch/arm64/include/asm/vdso.h @@ -13,6 +13,11 @@ #define VDSO_LBASE 0x0 #define __VVAR_PAGES 2 +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO +#define __VPRIV_PAGES 1 +#else +#define __VPRIV_PAGES 0 +#endif #ifndef __ASSEMBLY__ diff --git a/kernel/arch/arm64/include/asm/vdso/gettimeofday.h b/kernel/arch/arm64/include/asm/vdso/gettimeofday.h index 4b4c0da..030fe8d 100644 --- a/kernel/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/kernel/arch/arm64/include/asm/vdso/gettimeofday.h @@ -102,6 +102,71 @@ } #endif +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + +#include <uapi/linux/fcntl.h> + +extern struct vdso_priv _vdso_priv; /* vdso.lds.S */ + +static __always_inline struct vdso_priv *__arch_get_vdso_priv(void) +{ + return &_vdso_priv; +} + +static __always_inline int clock_open_device(const char *path, int mode) +{ + register int sc asm("w8") = __NR_openat; + register long ret asm("x0"); + register long x0 asm("x0") = AT_FDCWD; + register long x1 asm("x1") = (long)path; + register long x2 asm("x2") = mode; + + asm volatile( + "svc #0\n" + : "=r" (ret) + : "r" (sc), + "r" (x0), "r" (x1), "r" (x2) + : "cc", "memory"); + + return ret; +} + +static __always_inline int clock_ioctl_device(int fd, unsigned int cmd, long arg) +{ + register int sc asm("w8") = __NR_ioctl; + register long ret asm("x0"); + register long x0 asm("x0") = fd; + register long x1 asm("x1") = cmd; + register long x2 asm("x2") = arg; + + asm volatile( + "svc #0\n" + : "=r" (ret) + : "r" (sc), + "r" (x0), "r" (x1), "r" (x2) + : "cc", "memory"); + + return ret; +} + +static __always_inline int clock_close_device(int fd) +{ + register int sc asm("w8") = __NR_close; + register long ret asm("x0"); + register long x0 asm("x0") = fd; + + asm volatile( + "svc #0\n" + : "=r" (ret) + : "r" (sc), + "r" (x0) + : "cc", "memory"); + + return ret; +} + +#endif /* CONFIG_GENERIC_CLOCKSOURCE_VDSO */ + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/kernel/arch/arm64/include/dovetail/irq.h b/kernel/arch/arm64/include/dovetail/irq.h new file mode 120000 index 0000000..86483e7 --- /dev/null +++ b/kernel/arch/arm64/include/dovetail/irq.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h \ No newline at end of file diff --git a/kernel/arch/arm64/include/dovetail/thread_info.h b/kernel/arch/arm64/include/dovetail/thread_info.h new file mode 120000 index 0000000..e932ae3 --- /dev/null +++ b/kernel/arch/arm64/include/dovetail/thread_info.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h \ No newline at end of file diff --git a/kernel/arch/arm64/kernel/Makefile b/kernel/arch/arm64/kernel/Makefile index 64e84b7..6c4fd5b 100644 --- a/kernel/arch/arm64/kernel/Makefile +++ b/kernel/arch/arm64/kernel/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o obj-$(CONFIG_PARAVIRT) += paravirt.o +obj-$(CONFIG_IRQ_PIPELINE) += irq_pipeline.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ diff --git a/kernel/arch/arm64/kernel/asm-offsets.c b/kernel/arch/arm64/kernel/asm-offsets.c index 93da876..26b54f4 100644 --- a/kernel/arch/arm64/kernel/asm-offsets.c +++ b/kernel/arch/arm64/kernel/asm-offsets.c @@ -29,6 +29,7 @@ DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); BLANK(); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); + DEFINE(TSK_TI_LOCAL_FLAGS, offsetof(struct task_struct, thread_info.local_flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN diff --git a/kernel/arch/arm64/kernel/debug-monitors.c b/kernel/arch/arm64/kernel/debug-monitors.c index d7f904c..73382fc 100644 --- a/kernel/arch/arm64/kernel/debug-monitors.c +++ b/kernel/arch/arm64/kernel/debug-monitors.c @@ -232,7 +232,7 @@ return; if (interrupts_enabled(regs)) - local_irq_enable(); + local_irq_enable_full(); arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs), "User debug trap"); diff --git a/kernel/arch/arm64/kernel/entry-common.c b/kernel/arch/arm64/kernel/entry-common.c index 64cfe4a..120cff8 100644 --- a/kernel/arch/arm64/kernel/entry-common.c +++ b/kernel/arch/arm64/kernel/entry-common.c @@ -8,6 +8,7 @@ #include <linux/context_tracking.h> #include <linux/ptrace.h> #include <linux/thread_info.h> +#include <linux/irqstage.h> #include <asm/cpufeature.h> #include <asm/daifflags.h> @@ -21,7 +22,7 @@ * This is intended to match the logic in irqentry_enter(), handling the kernel * mode transitions only. */ -static void noinstr enter_from_kernel_mode(struct pt_regs *regs) +static void noinstr __enter_from_kernel_mode(struct pt_regs *regs) { regs->exit_rcu = false; @@ -41,11 +42,50 @@ mte_check_tfsr_entry(); } +static void noinstr enter_from_kernel_mode(struct pt_regs *regs) +{ +#ifdef CONFIG_IRQ_PIPELINE + /* + * CAUTION: we may switch in-band as a result of handling a + * trap, so if we are running out-of-band, we must make sure + * not to perform the RCU exit since we did not enter it in + * the first place. + */ + regs->oob_on_entry = running_oob(); + if (regs->oob_on_entry) { + regs->exit_rcu = false; + return; + } + + /* + * We trapped from kernel space running in-band, we need to + * record the virtual interrupt state into the current + * register frame (regs->stalled_on_entry) in order to + * reinstate it from exit_to_kernel_mode(). Next we stall the + * in-band stage in order to mirror the current hardware state + * (i.e. hardirqs are off). + */ + regs->stalled_on_entry = test_and_stall_inband_nocheck(); +#endif + + __enter_from_kernel_mode(regs); + +#ifdef CONFIG_IRQ_PIPELINE + /* + * Our caller is going to inherit the hardware interrupt state + * from the trapped context once we have returned: if running + * in-band, align the stall bit on the upcoming state. + */ + if (running_inband() && interrupts_enabled(regs)) + unstall_inband_nocheck(); +#endif +} + /* * This is intended to match the logic in irqentry_exit(), handling the kernel * mode transitions only, and with preemption handled elsewhere. */ -static void noinstr exit_to_kernel_mode(struct pt_regs *regs) +static void noinstr __exit_to_kernel_mode(struct pt_regs *regs) { lockdep_assert_irqs_disabled(); @@ -67,8 +107,35 @@ } } +/* + * This is intended to match the logic in irqentry_exit(), handling the kernel + * mode transitions only, and with preemption handled elsewhere. + */ +static void noinstr exit_to_kernel_mode(struct pt_regs *regs) +{ + if (running_oob()) + return; + + __exit_to_kernel_mode(regs); + +#ifdef CONFIG_IRQ_PIPELINE + /* + * Reinstate the virtual interrupt state which was in effect + * on entry to the trap. + */ + if (!regs->oob_on_entry) { + if (regs->stalled_on_entry) + stall_inband_nocheck(); + else + unstall_inband_nocheck(); + } +#endif + return; +} + void noinstr arm64_enter_nmi(struct pt_regs *regs) { + /* irq_pipeline: running this code oob is ok. */ regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); __nmi_enter(); @@ -99,18 +166,57 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) { - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) + /* + * IRQ pipeline: the interrupt entry is special in that we may + * run the lockdep and RCU prologue/epilogue only if the IRQ + * is going to be dispatched to its handler on behalf of the + * current context, i.e. only if running in-band and + * unstalled. If so, we also have to reconcile the hardware + * and virtual interrupt states temporarily in order to run + * such prologue. + */ + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) { arm64_enter_nmi(regs); - else - enter_from_kernel_mode(regs); + } else { +#ifdef CONFIG_IRQ_PIPELINE + if (running_inband()) { + regs->stalled_on_entry = test_inband_stall(); + if (!regs->stalled_on_entry) { + stall_inband_nocheck(); + __enter_from_kernel_mode(regs); + unstall_inband_nocheck(); + } + } +#else + __enter_from_kernel_mode(regs); +#endif + } } asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) { - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) { arm64_exit_nmi(regs); - else - exit_to_kernel_mode(regs); + } else { +#ifdef CONFIG_IRQ_PIPELINE + /* + * See enter_el1_irq_or_nmi() for details. UGLY: we + * also have to tell the tracer that irqs are off, + * since sync_current_irq_stage() did the opposite on + * exit. Hopefully, at some point arm64 will convert + * to the generic entry code which exhibits a less + * convoluted logic. + */ + if (running_inband() && !regs->stalled_on_entry) { + stall_inband_nocheck(); + trace_hardirqs_off(); + __exit_to_kernel_mode(regs); + unstall_inband_nocheck(); + } +#else + __exit_to_kernel_mode(regs); +#endif + } } static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) @@ -231,20 +337,32 @@ asmlinkage void noinstr enter_from_user_mode(void) { - lockdep_hardirqs_off(CALLER_ADDR0); - CT_WARN_ON(ct_state() != CONTEXT_USER); - user_exit_irqoff(); - trace_hardirqs_off_finish(); + if (running_inband()) { + lockdep_hardirqs_off(CALLER_ADDR0); + WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall()); + CT_WARN_ON(ct_state() != CONTEXT_USER); + stall_inband_nocheck(); + user_exit_irqoff(); + unstall_inband_nocheck(); + trace_hardirqs_off_finish(); + } } asmlinkage void noinstr exit_to_user_mode(void) { - mte_check_tfsr_exit(); + if (running_inband()) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + user_enter_irqoff(); + lockdep_hardirqs_on(CALLER_ADDR0); + unstall_inband_nocheck(); + } +} - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - user_enter_irqoff(); - lockdep_hardirqs_on(CALLER_ADDR0); +asmlinkage void noinstr enter_el0_irq(void) +{ + if (running_inband() && !test_inband_stall()) + enter_from_user_mode(); } static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) diff --git a/kernel/arch/arm64/kernel/entry.S b/kernel/arch/arm64/kernel/entry.S index 9f19e6b..49a7349 100644 --- a/kernel/arch/arm64/kernel/entry.S +++ b/kernel/arch/arm64/kernel/entry.S @@ -39,6 +39,12 @@ #endif .endm + .macro user_exit_el0_irq +#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) + bl enter_el0_irq +#endif + .endm + .macro user_enter_irqoff #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) bl exit_to_user_mode @@ -534,6 +540,21 @@ mov x24, scs_sp // preserve the original shadow stack #endif +#ifdef CONFIG_DOVETAIL + /* + * When the pipeline is enabled, context switches over the irq + * stack are allowed (for the co-kernel), and more interrupts + * can be taken over sibling stack contexts. So we need a not so + * subtle way of figuring out whether the irq stack was actually + * exited, which cannot depend on the current task pointer. + */ + adr_this_cpu x25, irq_nesting, x26 + ldr w26, [x25] + cmp w26, #0 + add w26, w26, #1 + str w26, [x25] + b.ne 9998f +#else /* * Compare sp with the base of the task stack. * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, @@ -543,6 +564,7 @@ eor x25, x25, x19 and x25, x25, #~(THREAD_SIZE - 1) cbnz x25, 9998f +#endif ldr_this_cpu x25, irq_stack_ptr, x26 mov x26, #IRQ_STACK_SIZE @@ -563,11 +585,18 @@ * The callee-saved regs (x19-x29) should be preserved between * irq_stack_entry and irq_stack_exit, but note that kernel_entry * uses x20-x23 to store data for later use. + * IRQ_PIPELINE: caution, we have to preserve w0. */ .macro irq_stack_exit mov sp, x19 #ifdef CONFIG_SHADOW_CALL_STACK mov scs_sp, x24 +#endif +#ifdef CONFIG_DOVETAIL + adr_this_cpu x1, irq_nesting, x2 + ldr w2, [x1] + add w2, w2, #-1 + str w2, [x1] #endif .endm @@ -578,7 +607,15 @@ * Interrupt handling. */ .macro irq_handler, handler:req +#ifdef CONFIG_IRQ_PIPELINE +# .if \handler == handle_arch_irq + ldr x1, =handle_arch_irq_pipelined +# .else +# .error "irq_pipeline: cannot handle interrupt" +# .endif +#else ldr_l x1, \handler +#endif mov x0, sp irq_stack_entry blr x1 @@ -616,6 +653,9 @@ irq_handler \handler +#ifdef CONFIG_IRQ_PIPELINE + cbz w0, 66f // skip epilogue if oob or in-band stalled +#endif #ifdef CONFIG_PREEMPTION ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count alternative_if ARM64_HAS_IRQ_PRIO_MASKING @@ -630,13 +670,13 @@ bl arm64_preempt_schedule_irq // irq en/disable is done inside 1: #endif - +66: mov x0, sp bl exit_el1_irq_or_nmi .endm .macro el0_interrupt_handler, handler:req - user_exit_irqoff + user_exit_el0_irq enable_da_f tbz x22, #55, 1f @@ -815,6 +855,9 @@ kernel_entry 0 el0_irq_naked: el0_interrupt_handler handle_arch_irq +#ifdef CONFIG_IRQ_PIPELINE + cbz w0, fast_ret_from_el0_irq // skip epilogue if oob +#endif b ret_to_user SYM_CODE_END(el0_irq) @@ -846,6 +889,11 @@ SYM_CODE_START_LOCAL(ret_to_user) disable_daif gic_prio_kentry_setup tmp=x3 +#ifdef CONFIG_IRQ_PIPELINE + ldr x0, [tsk, #TSK_TI_LOCAL_FLAGS] + tst x0, #_TLF_OOB + b.ne fast_ret_to_user +#endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif @@ -854,12 +902,22 @@ cbnz x2, work_pending finish_ret_to_user: user_enter_irqoff +ret_to_user_naked: enable_step_tsk x19, x2 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK bl stackleak_erase #endif kernel_exit 0 +#ifdef CONFIG_IRQ_PIPELINE +fast_ret_from_el0_irq: + disable_daif + gic_prio_kentry_setup tmp=x3 +fast_ret_to_user: + ldr x19, [tsk, #TSK_TI_FLAGS] + b ret_to_user_naked +#endif + /* * Ok, we need to do extra processing, enter the slow path. */ diff --git a/kernel/arch/arm64/kernel/fpsimd.c b/kernel/arch/arm64/kernel/fpsimd.c index 5335a6b..175353e 100644 --- a/kernel/arch/arm64/kernel/fpsimd.c +++ b/kernel/arch/arm64/kernel/fpsimd.c @@ -169,6 +169,42 @@ WARN_ON(busy); } +static void __put_cpu_fpsimd_context(void) +{ + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); + + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ +} + +#ifdef CONFIG_DOVETAIL + +#define get_cpu_fpsimd_context(__flags) \ + do { \ + (__flags) = hard_preempt_disable(); \ + __get_cpu_fpsimd_context(); \ + } while (0) + +#define put_cpu_fpsimd_context(__flags) \ + do { \ + __put_cpu_fpsimd_context(); \ + hard_preempt_enable(__flags); \ + } while (0) + +void fpsimd_restore_current_oob(void) +{ + /* + * Restore the fpsimd context for the current task as it + * resumes from dovetail_context_switch(), which always happen + * on the out-of-band stage. Skip this for kernel threads + * which have no such context but always bear + * TIF_FOREIGN_FPSTATE. + */ + if (current->mm) + fpsimd_restore_current_state(); +} + +#else + /* * Claim ownership of the CPU FPSIMD context for use by the calling context. * @@ -178,19 +214,12 @@ * The double-underscore version must only be called if you know the task * can't be preempted. */ -static void get_cpu_fpsimd_context(void) -{ - local_bh_disable(); - __get_cpu_fpsimd_context(); -} - -static void __put_cpu_fpsimd_context(void) -{ - bool busy = __this_cpu_xchg(fpsimd_context_busy, false); - - WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ -} - +#define get_cpu_fpsimd_context(__flags) \ + do { \ + preempt_disable(); \ + __get_cpu_fpsimd_context(); \ + (void)(__flags); \ + } while (0) /* * Release the CPU FPSIMD context. * @@ -198,12 +227,14 @@ * previously called, with no call to put_cpu_fpsimd_context() in the * meantime. */ -static void put_cpu_fpsimd_context(void) -{ - __put_cpu_fpsimd_context(); - local_bh_enable(); -} +#define put_cpu_fpsimd_context(__flags) \ + do { \ + __put_cpu_fpsimd_context(); \ + preempt_enable(); \ + (void)(__flags); \ + } while (0) +#endif /* !CONFIG_DOVETAIL */ static bool have_cpu_fpsimd_context(void) { return !preemptible() && __this_cpu_read(fpsimd_context_busy); @@ -283,7 +314,7 @@ static void task_fpsimd_load(void) { WARN_ON(!system_supports_fpsimd()); - WARN_ON(!have_cpu_fpsimd_context()); + WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context()); if (system_supports_sve() && test_thread_flag(TIF_SVE)) sve_load_state(sve_pffr(¤t->thread), @@ -297,14 +328,14 @@ * Ensure FPSIMD/SVE storage in memory for the loaded context is up to * date with respect to the CPU registers. */ -static void fpsimd_save(void) +static void __fpsimd_save(void) { struct fpsimd_last_state_struct const *last = this_cpu_ptr(&fpsimd_last_state); /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ WARN_ON(!system_supports_fpsimd()); - WARN_ON(!have_cpu_fpsimd_context()); + WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { if (system_supports_sve() && test_thread_flag(TIF_SVE)) { @@ -324,6 +355,15 @@ } else fpsimd_save_state(last->st); } +} + +void fpsimd_save(void) +{ + unsigned long flags; + + flags = hard_cond_local_irq_save(); + __fpsimd_save(); + hard_cond_local_irq_restore(flags); } /* @@ -444,7 +484,7 @@ * task->thread.uw.fpsimd_state must be up to date before calling this * function. */ -static void fpsimd_to_sve(struct task_struct *task) +static void _fpsimd_to_sve(struct task_struct *task) { unsigned int vq; void *sst = task->thread.sve_state; @@ -455,6 +495,15 @@ vq = sve_vq_from_vl(task->thread.sve_vl); __fpsimd_to_sve(sst, fst, vq); +} + +static void fpsimd_to_sve(struct task_struct *task) +{ + unsigned long flags; + + flags = hard_cond_local_irq_save(); + _fpsimd_to_sve(task); + hard_cond_local_irq_restore(flags); } /* @@ -475,15 +524,20 @@ struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; unsigned int i; __uint128_t const *p; + unsigned long flags; if (!system_supports_sve()) return; + + flags = hard_cond_local_irq_save(); vq = sve_vq_from_vl(task->thread.sve_vl); for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t const *)ZREG(sst, vq, i); fst->vregs[i] = arm64_le128_to_cpu(*p); } + + hard_cond_local_irq_restore(flags); } #ifdef CONFIG_ARM64_SVE @@ -584,6 +638,8 @@ int sve_set_vector_length(struct task_struct *task, unsigned long vl, unsigned long flags) { + unsigned long irqflags = 0; + if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | PR_SVE_SET_VL_ONEXEC)) return -EINVAL; @@ -621,9 +677,9 @@ * non-SVE thread. */ if (task == current) { - get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(irqflags); - fpsimd_save(); + __fpsimd_save(); } fpsimd_flush_task_state(task); @@ -631,7 +687,7 @@ sve_to_fpsimd(task); if (task == current) - put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(irqflags); /* * Force reallocation of task SVE state to the correct size @@ -936,17 +992,21 @@ */ void do_sve_acc(unsigned int esr, struct pt_regs *regs) { + unsigned long flags; + + mark_trap_entry(ARM64_TRAP_SVE, regs); + /* Even if we chose not to use SVE, the hardware could still trap: */ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); - return; + goto out; } sve_alloc(current); - get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(flags); - fpsimd_save(); + __fpsimd_save(); /* Force ret_to_user to reload the registers: */ fpsimd_flush_task_state(current); @@ -955,7 +1015,9 @@ if (test_and_set_thread_flag(TIF_SVE)) WARN_ON(1); /* SVE access shouldn't have trapped */ - put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(flags); +out: + mark_trap_exit(ARM64_TRAP_SVE, regs); } /* @@ -974,6 +1036,9 @@ { unsigned int si_code = FPE_FLTUNK; + if (!mark_cond_trap_entry(ARM64_TRAP_FPE, regs)) + return; + if (esr & ESR_ELx_FP_EXC_TFV) { if (esr & FPEXC_IOF) si_code = FPE_FLTINV; @@ -990,19 +1055,24 @@ send_sig_fault(SIGFPE, si_code, (void __user *)instruction_pointer(regs), current); + + mark_trap_exit(ARM64_TRAP_FPE, regs); } void fpsimd_thread_switch(struct task_struct *next) { bool wrong_task, wrong_cpu; + unsigned long flags; if (!system_supports_fpsimd()) return; + flags = hard_cond_local_irq_save(); + __get_cpu_fpsimd_context(); /* Save unsaved fpsimd state, if any: */ - fpsimd_save(); + __fpsimd_save(); /* * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's @@ -1017,16 +1087,19 @@ wrong_task || wrong_cpu); __put_cpu_fpsimd_context(); + + hard_cond_local_irq_restore(flags); } void fpsimd_flush_thread(void) { int vl, supported_vl; + unsigned long flags; if (!system_supports_fpsimd()) return; - get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(flags); fpsimd_flush_task_state(current); memset(¤t->thread.uw.fpsimd_state, 0, @@ -1067,7 +1140,7 @@ current->thread.sve_vl_onexec = 0; } - put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(flags); } /* @@ -1076,12 +1149,14 @@ */ void fpsimd_preserve_current_state(void) { + unsigned long flags; + if (!system_supports_fpsimd()) return; - get_cpu_fpsimd_context(); - fpsimd_save(); - put_cpu_fpsimd_context(); + get_cpu_fpsimd_context(flags); + __fpsimd_save(); + put_cpu_fpsimd_context(flags); } /* @@ -1123,7 +1198,7 @@ } } -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, +static void __fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, unsigned int sve_vl) { struct fpsimd_last_state_struct *last = @@ -1137,6 +1212,18 @@ last->sve_vl = sve_vl; } +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, + unsigned int sve_vl) +{ + unsigned long flags; + + WARN_ON(!in_softirq() && !irqs_disabled()); + + flags = hard_cond_local_irq_save(); + __fpsimd_bind_state_to_cpu(st, sve_state, sve_vl); + hard_cond_local_irq_restore(flags); +} + /* * Load the userland FPSIMD state of 'current' from memory, but only if the * FPSIMD state already held in the registers is /not/ the most recent FPSIMD @@ -1144,6 +1231,8 @@ */ void fpsimd_restore_current_state(void) { + unsigned long flags; + /* * For the tasks that were created before we detected the absence of * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), @@ -1158,14 +1247,14 @@ return; } - get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(flags); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { task_fpsimd_load(); fpsimd_bind_task_to_cpu(); } - put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(flags); } /* @@ -1175,21 +1264,23 @@ */ void fpsimd_update_current_state(struct user_fpsimd_state const *state) { + unsigned long flags; + if (WARN_ON(!system_supports_fpsimd())) return; - get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(flags); current->thread.uw.fpsimd_state = *state; if (system_supports_sve() && test_thread_flag(TIF_SVE)) - fpsimd_to_sve(current); + _fpsimd_to_sve(current); task_fpsimd_load(); fpsimd_bind_task_to_cpu(); clear_thread_flag(TIF_FOREIGN_FPSTATE); - put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(flags); } /* @@ -1239,9 +1330,9 @@ { if (!system_supports_fpsimd()) return; - WARN_ON(preemptible()); + WARN_ON(!hard_irqs_disabled() && preemptible()); __get_cpu_fpsimd_context(); - fpsimd_save(); + __fpsimd_save(); fpsimd_flush_cpu_state(); __put_cpu_fpsimd_context(); } @@ -1267,18 +1358,23 @@ */ void kernel_neon_begin(void) { + unsigned long flags; + if (WARN_ON(!system_supports_fpsimd())) return; BUG_ON(!may_use_simd()); - get_cpu_fpsimd_context(); + get_cpu_fpsimd_context(flags); /* Save unsaved fpsimd state, if any: */ - fpsimd_save(); + __fpsimd_save(); /* Invalidate any task state remaining in the fpsimd regs: */ fpsimd_flush_cpu_state(); + + if (dovetailing()) + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL(kernel_neon_begin); @@ -1293,10 +1389,12 @@ */ void kernel_neon_end(void) { + unsigned long flags = hard_local_save_flags(); + if (!system_supports_fpsimd()) return; - put_cpu_fpsimd_context(); + put_cpu_fpsimd_context(flags); } EXPORT_SYMBOL(kernel_neon_end); @@ -1386,9 +1484,13 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { + unsigned long flags; + switch (cmd) { case CPU_PM_ENTER: + flags = hard_cond_local_irq_save(); fpsimd_save_and_flush_cpu_state(); + hard_cond_local_irq_restore(flags); break; case CPU_PM_EXIT: break; diff --git a/kernel/arch/arm64/kernel/irq.c b/kernel/arch/arm64/kernel/irq.c index dfb1fea..e625e14 100644 --- a/kernel/arch/arm64/kernel/irq.c +++ b/kernel/arch/arm64/kernel/irq.c @@ -14,6 +14,7 @@ #include <linux/memory.h> #include <linux/smp.h> #include <linux/hardirq.h> +#include <linux/irq_pipeline.h> #include <linux/init.h> #include <linux/irqchip.h> #include <linux/kprobes.h> @@ -28,6 +29,15 @@ DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); +#ifdef CONFIG_IRQ_PIPELINE + +asmlinkage int notrace +handle_arch_irq_pipelined(struct pt_regs *regs) +{ + return handle_irq_pipelined(regs); +} + +#endif DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); diff --git a/kernel/arch/arm64/kernel/irq_pipeline.c b/kernel/arch/arm64/kernel/irq_pipeline.c new file mode 100644 index 0000000..cc1b354 --- /dev/null +++ b/kernel/arch/arm64/kernel/irq_pipeline.c @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/irq.h> +#include <linux/irq_pipeline.h> + +/* irq_nesting tracks the interrupt nesting level for a CPU. */ +DEFINE_PER_CPU(int, irq_nesting); + +void arch_do_IRQ_pipelined(struct irq_desc *desc) +{ + struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs); + unsigned int irq = irq_desc_get_irq(desc); + + __handle_domain_irq(NULL, irq, false, regs); +} + +void __init arch_irq_pipeline_init(void) +{ + /* no per-arch init. */ +} diff --git a/kernel/arch/arm64/kernel/process.c b/kernel/arch/arm64/kernel/process.c index c38a5ab..c2b328f 100644 --- a/kernel/arch/arm64/kernel/process.c +++ b/kernel/arch/arm64/kernel/process.c @@ -125,6 +125,7 @@ * tricks */ cpu_do_idle(); + hard_cond_local_irq_enable(); raw_local_irq_enable(); } @@ -824,8 +825,41 @@ core_initcall(tagged_addr_init); #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ +#ifdef CONFIG_IRQ_PIPELINE + +/* + * When pipelining interrupts, we have to reconcile the hardware and + * the virtual states. Hard irqs are off on entry while the current + * stage has to be unstalled: fix this up by stalling the in-band + * stage on entry, unstalling on exit. + */ +static inline void arm64_preempt_irq_enter(void) +{ + WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall()); + stall_inband(); + trace_hardirqs_off(); +} + +static inline void arm64_preempt_irq_exit(void) +{ + trace_hardirqs_on(); + unstall_inband(); +} + +#else + +static inline void arm64_preempt_irq_enter(void) +{ } + +static inline void arm64_preempt_irq_exit(void) +{ } + +#endif + asmlinkage void __sched arm64_preempt_schedule_irq(void) { + arm64_preempt_irq_enter(); + lockdep_assert_irqs_disabled(); /* @@ -838,6 +872,8 @@ */ if (system_capabilities_finalized()) preempt_schedule_irq(); + + arm64_preempt_irq_exit(); } #ifdef CONFIG_BINFMT_ELF diff --git a/kernel/arch/arm64/kernel/signal.c b/kernel/arch/arm64/kernel/signal.c index b6fbbd5..3bf8eae 100644 --- a/kernel/arch/arm64/kernel/signal.c +++ b/kernel/arch/arm64/kernel/signal.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/signal.h> +#include <linux/irq_pipeline.h> #include <linux/personality.h> #include <linux/freezer.h> #include <linux/stddef.h> @@ -914,19 +915,36 @@ restore_saved_sigmask(); } +static inline void do_retuser(void) +{ + unsigned long thread_flags; + + if (dovetailing()) { + thread_flags = current_thread_info()->flags; + if (thread_flags & _TIF_RETUSER) + inband_retuser_notify(); + } +} + asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { + WARN_ON_ONCE(irq_pipeline_debug() && running_oob()); + + stall_inband(); + do { /* Check valid user FS if needed */ addr_limit_user_check(); if (thread_flags & _TIF_NEED_RESCHED) { /* Unmask Debug and SError for the next task */ - local_daif_restore(DAIF_PROCCTX_NOIRQ); + local_daif_restore(irqs_pipelined() ? DAIF_PROCCTX : + DAIF_PROCCTX_NOIRQ); schedule(); } else { + unstall_inband(); local_daif_restore(DAIF_PROCCTX); if (thread_flags & _TIF_UPROBE) @@ -948,11 +966,29 @@ if (thread_flags & _TIF_FOREIGN_FPSTATE) fpsimd_restore_current_state(); + + do_retuser(); + /* RETUSER might have switched oob */ + if (running_oob()) { + local_daif_mask(); + return; + } } + /* + * Dovetail: we may have restored the fpsimd state for + * current with no other opportunity to check for + * _TIF_FOREIGN_FPSTATE until we are back running on + * el0, so we must not take any interrupt until then, + * otherwise we may end up resuming with some OOB + * thread's fpsimd state. + */ local_daif_mask(); + stall_inband(); thread_flags = READ_ONCE(current_thread_info()->flags); } while (thread_flags & _TIF_WORK_MASK); + + unstall_inband(); } unsigned long __ro_after_init signal_minsigstksz; diff --git a/kernel/arch/arm64/kernel/smp.c b/kernel/arch/arm64/kernel/smp.c index 581defe..ba10c60 100644 --- a/kernel/arch/arm64/kernel/smp.c +++ b/kernel/arch/arm64/kernel/smp.c @@ -86,7 +86,7 @@ NR_IPI }; -static int ipi_irq_base __read_mostly; +int ipi_irq_base __read_mostly; static int nr_ipi __read_mostly = NR_IPI; static struct irq_desc *ipi_desc[NR_IPI] __read_mostly; @@ -273,6 +273,7 @@ complete(&cpu_running); local_daif_restore(DAIF_PROCCTX); + local_irq_enable_full(); /* * OK, it's off to the idle thread for us @@ -811,6 +812,8 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu); + unsigned long irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) @@ -822,7 +825,7 @@ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); + seq_printf(p, "%10u ", get_ipi_count(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } @@ -888,7 +891,7 @@ atomic_dec(&waiting_for_crash_ipi); - local_irq_disable(); + local_irq_disable_full(); sdei_mask_local_cpu(); if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) @@ -900,7 +903,7 @@ } /* - * Main handler for inter-processor interrupts + * Main handler for inter-processor interrupts on the in-band stage. */ static void do_handle_IPI(int ipinr) { @@ -963,6 +966,73 @@ trace_ipi_exit_rcuidle(ipi_types[ipinr]); } +static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +#ifdef CONFIG_IRQ_PIPELINE + +static DEFINE_PER_CPU(unsigned long, ipi_messages); + +static DEFINE_PER_CPU(unsigned int [NR_IPI], ipi_counts); + +static irqreturn_t ipi_handler(int irq, void *data) +{ + unsigned long *pmsg; + unsigned int ipinr; + + /* + * Decode in-band IPIs (0..NR_IPI - 1) multiplexed over + * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own + * individual handler. + */ + pmsg = raw_cpu_ptr(&ipi_messages); + while (*pmsg) { + ipinr = ffs(*pmsg) - 1; + clear_bit(ipinr, pmsg); + __this_cpu_inc(ipi_counts[ipinr]); + do_handle_IPI(ipinr); + } + + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + unsigned int cpu; + + /* regular in-band IPI (multiplexed over SGI0). */ + for_each_cpu(cpu, target) + set_bit(ipinr, &per_cpu(ipi_messages, cpu)); + + wmb(); + __smp_cross_call(target, 0); +} + +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) +{ + return per_cpu(ipi_counts[irq - ipi_irq_base], cpu); +} + +void irq_send_oob_ipi(unsigned int irq, + const struct cpumask *cpumask) +{ + unsigned int sgi = irq - ipi_irq_base; + + if (WARN_ON(irq_pipeline_debug() && + (sgi < OOB_IPI_OFFSET || + sgi >= OOB_IPI_OFFSET + OOB_NR_IPI))) + return; + + /* Out-of-band IPI (SGI1-2). */ + __smp_cross_call(cpumask, sgi); +} +EXPORT_SYMBOL_GPL(irq_send_oob_ipi); + +#else + static irqreturn_t ipi_handler(int irq, void *data) { do_handle_IPI(irq - ipi_irq_base); @@ -971,9 +1041,15 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { - trace_ipi_raise(target, ipi_types[ipinr]); - __ipi_send_mask(ipi_desc[ipinr], target); + __smp_cross_call(target, ipinr); } + +static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu) +{ + return kstat_irqs_cpu(irq, cpu); +} + +#endif /* CONFIG_IRQ_PIPELINE */ static void ipi_setup(int cpu) { @@ -1001,18 +1077,25 @@ void __init set_smp_ipi_range(int ipi_base, int n) { - int i; + int i, inband_nr_ipi; WARN_ON(n < NR_IPI); nr_ipi = min(n, NR_IPI); + /* + * irq_pipeline: the in-band stage traps SGI0 only, + * over which IPI messages are mutiplexed. Other SGIs + * are available for exchanging out-of-band IPIs. + */ + inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi; for (i = 0; i < nr_ipi; i++) { - int err; + if (i < inband_nr_ipi) { + int err; - err = request_percpu_irq(ipi_base + i, ipi_handler, - "IPI", &cpu_number); - WARN_ON(err); - + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &cpu_number); + WARN_ON(err); + } ipi_desc[i] = irq_to_desc(ipi_base + i); irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); diff --git a/kernel/arch/arm64/kernel/syscall.c b/kernel/arch/arm64/kernel/syscall.c index 6f9839d..24ab737 100644 --- a/kernel/arch/arm64/kernel/syscall.c +++ b/kernel/arch/arm64/kernel/syscall.c @@ -2,6 +2,7 @@ #include <linux/compiler.h> #include <linux/context_tracking.h> +#include <linux/irqstage.h> #include <linux/errno.h> #include <linux/nospec.h> #include <linux/ptrace.h> @@ -94,6 +95,7 @@ const syscall_fn_t syscall_table[]) { unsigned long flags = current_thread_info()->flags; + int ret; regs->orig_x0 = regs->regs[0]; regs->syscallno = scno; @@ -117,9 +119,18 @@ */ cortex_a76_erratum_1463225_svc_handler(); + WARN_ON_ONCE(dovetail_debug() && + running_inband() && test_inband_stall()); local_daif_restore(DAIF_PROCCTX); - if (flags & _TIF_MTE_ASYNC_FAULT) { + ret = pipeline_syscall(scno, regs); + if (ret > 0) + return; + + if (ret < 0) + goto tail_work; + + if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) { /* * Process the asynchronous tag check fault before the actual * syscall. do_notify_resume() will send a signal to userspace @@ -159,11 +170,16 @@ * check again. However, if we were tracing entry, then we always trace * exit regardless, as the old entry assembly did. */ +tail_work: if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { local_daif_mask(); + stall_inband(); flags = current_thread_info()->flags; - if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) { + unstall_inband(); return; + } + unstall_inband(); local_daif_restore(DAIF_PROCCTX); } diff --git a/kernel/arch/arm64/kernel/traps.c b/kernel/arch/arm64/kernel/traps.c index 49b4b7b..a670df1 100644 --- a/kernel/arch/arm64/kernel/traps.c +++ b/kernel/arch/arm64/kernel/traps.c @@ -15,6 +15,7 @@ #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/hardirq.h> +#include <linux/irqstage.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/kexec.h> @@ -117,7 +118,7 @@ return ret; } -static DEFINE_RAW_SPINLOCK(die_lock); +static DEFINE_HARD_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. @@ -292,7 +293,7 @@ } static LIST_HEAD(undef_hook); -static DEFINE_RAW_SPINLOCK(undef_lock); +static DEFINE_HARD_SPINLOCK(undef_lock); void register_undef_hook(struct undef_hook *hook) { @@ -406,6 +407,13 @@ void do_undefinstr(struct pt_regs *regs) { + /* + * If the companion core did not switched us to in-band + * context, we may assume that it has handled the trap. + */ + if (running_oob()) + return; + /* check for AArch32 breakpoint instructions */ if (!aarch32_break_handler(regs)) return; @@ -415,14 +423,18 @@ trace_android_rvh_do_undefinstr(regs, user_mode(regs)); BUG_ON(!user_mode(regs)); + mark_trap_entry(ARM64_TRAP_UNDI, regs); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + mark_trap_exit(ARM64_TRAP_UNDI, regs); } NOKPROBE_SYMBOL(do_undefinstr); void do_bti(struct pt_regs *regs) { BUG_ON(!user_mode(regs)); + mark_trap_entry(ARM64_TRAP_BTI, regs); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + mark_trap_exit(ARM64_TRAP_BTI, regs); } NOKPROBE_SYMBOL(do_bti); @@ -492,9 +504,11 @@ return; } - if (ret) - arm64_notify_segfault(tagged_address); - else + if (ret) { + mark_trap_entry(ARM64_TRAP_ACCESS, regs); + arm64_notify_segfault(address); + mark_trap_exit(ARM64_TRAP_ACCESS, regs); + } else arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } @@ -540,8 +554,11 @@ rt = ESR_ELx_SYS64_ISS_RT(esr); sysreg = esr_sys64_to_sysreg(esr); - if (do_emulate_mrs(regs, sysreg, rt) != 0) + if (do_emulate_mrs(regs, sysreg, rt) != 0) { + mark_trap_entry(ARM64_TRAP_ACCESS, regs); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + mark_trap_exit(ARM64_TRAP_ACCESS, regs); + } } static void wfi_handler(unsigned int esr, struct pt_regs *regs) @@ -768,6 +785,11 @@ */ asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr) { + /* + * Dovetail: Same as __do_kernel_fault(), don't bother + * restoring the in-band stage, this trap is fatal and we are + * already walking on thin ice. + */ arm64_enter_nmi(regs); console_verbose(); @@ -790,11 +812,13 @@ { unsigned long pc = instruction_pointer(regs); + mark_trap_entry(ARM64_TRAP_ACCESS, regs); current->thread.fault_address = 0; current->thread.fault_code = esr; arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, "Bad EL0 synchronous exception"); + mark_trap_exit(ARM64_TRAP_ACCESS, regs); } #ifdef CONFIG_VMAP_STACK diff --git a/kernel/arch/arm64/kernel/vdso.c b/kernel/arch/arm64/kernel/vdso.c index debb899..9ddd257 100644 --- a/kernel/arch/arm64/kernel/vdso.c +++ b/kernel/arch/arm64/kernel/vdso.c @@ -43,6 +43,8 @@ VVAR_NR_PAGES, }; +#define VPRIV_NR_PAGES __VPRIV_PAGES + struct vdso_abi_info { const char *name; const char *vdso_code_start; @@ -123,6 +125,9 @@ vdso_pagelist[i] = pfn_to_page(pfn + i); vdso_info[abi].cm->pages = vdso_pagelist; +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + vdso_data->cs_type_seq = CLOCKSOURCE_VDSO_NONE << 16 | 1; +#endif return 0; } @@ -243,7 +248,8 @@ vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ - vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; + vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE + + VPRIV_NR_PAGES * PAGE_SIZE; vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { @@ -251,6 +257,26 @@ goto up_fail; } + /* + * Install the vDSO mappings we need: + * + * +----------------+ + * | vpriv | PAGE_SIZE (private anon page if GENERIC_CLOCKSOURCE_VDSO) + * |----------------| + * | vvar | PAGE_SIZE (shared) + * |----------------| + * | text | text_pages * PAGE_SIZE (shared) + * | ... | + * +----------------+ + */ + if (VPRIV_NR_PAGES > 0 && mmap_region(NULL, vdso_base, PAGE_SIZE, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0, NULL) != vdso_base) { + ret = ERR_PTR(-EINVAL); + goto up_fail; + } + + vdso_base += VPRIV_NR_PAGES * PAGE_SIZE; /* Skip private area. */ ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE, VM_READ|VM_MAYREAD|VM_PFNMAP, vdso_info[abi].dm); diff --git a/kernel/arch/arm64/kernel/vdso/vdso.lds.S b/kernel/arch/arm64/kernel/vdso/vdso.lds.S index b840ab1..93ff9fa 100644 --- a/kernel/arch/arm64/kernel/vdso/vdso.lds.S +++ b/kernel/arch/arm64/kernel/vdso/vdso.lds.S @@ -21,6 +21,9 @@ #ifdef CONFIG_TIME_NS PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); #endif +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + PROVIDE(_vdso_priv = _vdso_data - __VPRIV_PAGES * PAGE_SIZE); +#endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/kernel/arch/arm64/kernel/vdso32/vdso.lds.S b/kernel/arch/arm64/kernel/vdso32/vdso.lds.S index 3348ce5..63354d1 100644 --- a/kernel/arch/arm64/kernel/vdso32/vdso.lds.S +++ b/kernel/arch/arm64/kernel/vdso32/vdso.lds.S @@ -21,6 +21,9 @@ #ifdef CONFIG_TIME_NS PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE); #endif +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + PROVIDE_HIDDEN(_vdso_priv = _vdso_data - __VPRIV_PAGES * PAGE_SIZE); +#endif . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/kernel/arch/arm64/mm/context.c b/kernel/arch/arm64/mm/context.c index 001737a..336aca9 100644 --- a/kernel/arch/arm64/mm/context.c +++ b/kernel/arch/arm64/mm/context.c @@ -18,7 +18,7 @@ #include <asm/tlbflush.h> static u32 asid_bits; -static DEFINE_RAW_SPINLOCK(cpu_asid_lock); +static DEFINE_HARD_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation; static unsigned long *asid_map; @@ -217,6 +217,9 @@ unsigned long flags; unsigned int cpu; u64 asid, old_active_asid; + bool need_flush; + + WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled()); if (system_supports_cnp()) cpu_set_reserved_ttbr0(); @@ -252,12 +255,14 @@ } cpu = smp_processor_id(); - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) - local_flush_tlb_all(); + need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending); atomic64_set(this_cpu_ptr(&active_asids), asid); raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + if (need_flush) + local_flush_tlb_all(); + switch_mm_fastpath: arm64_apply_bp_hardening(); diff --git a/kernel/arch/arm64/mm/fault.c b/kernel/arch/arm64/mm/fault.c index 45e652d..d755cbb 100644 --- a/kernel/arch/arm64/mm/fault.c +++ b/kernel/arch/arm64/mm/fault.c @@ -264,11 +264,11 @@ (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) return false; - local_irq_save(flags); + flags = hard_local_irq_save(); asm volatile("at s1e1r, %0" :: "r" (addr)); isb(); par = read_sysreg_par(); - local_irq_restore(flags); + hard_local_irq_restore(flags); /* * If we now have a valid translation, treat the translation fault as @@ -399,6 +399,12 @@ msg = "paging request"; } + /* + * Dovetail: Don't bother restoring the in-band stage in the + * non-recoverable fault case, we got busted and a full stage + * switch is likely to make things even worse. Try at least to + * get some debug output before panicing. + */ die_kernel_fault(msg, addr, esr, regs); } @@ -471,8 +477,10 @@ if (user_mode(regs)) { const struct fault_info *inf = esr_to_fault_info(esr); + mark_trap_entry(ARM64_TRAP_ACCESS, regs); set_thread_esr(addr, esr); arm64_force_sig_fault(inf->sig, inf->code, far, inf->name); + mark_trap_exit(ARM64_TRAP_ACCESS, regs); } else { __do_kernel_fault(addr, esr, regs); } @@ -536,6 +544,8 @@ if (kprobe_page_fault(regs, esr)) return 0; + + mark_trap_entry(ARM64_TRAP_ACCESS, regs); /* * If we're in an interrupt or have no user context, we must not take @@ -612,7 +622,7 @@ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; - return 0; + goto out; } if (fault & VM_FAULT_RETRY) { @@ -637,7 +647,7 @@ */ if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) - return 0; + goto out; /* * If we are in kernel mode at this point, we have no context to @@ -653,7 +663,7 @@ * oom-killed). */ pagefault_out_of_memory(); - return 0; + goto out; } inf = esr_to_fault_info(esr); @@ -682,10 +692,12 @@ far, inf->name); } - return 0; + goto out; no_context: __do_kernel_fault(addr, esr, regs); +out: + mark_trap_exit(ARM64_TRAP_ACCESS, regs); return 0; } @@ -731,6 +743,8 @@ const struct fault_info *inf; unsigned long siaddr; + mark_trap_entry(ARM64_TRAP_SEA, regs); + inf = esr_to_fault_info(esr); if (user_mode(regs) && apei_claim_sea(regs) == 0) { @@ -738,7 +752,7 @@ * APEI claimed this as a firmware-first notification. * Some processing deferred to task_work before ret_to_user(). */ - return 0; + goto out; } if (esr & ESR_ELx_FnV) { @@ -753,6 +767,8 @@ } trace_android_rvh_do_sea(regs, esr, siaddr, inf->name); arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); +out: + mark_trap_exit(ARM64_TRAP_SEA, regs); return 0; } @@ -845,6 +861,8 @@ if (!inf->fn(far, esr, regs)) return; + mark_trap_entry(ARM64_TRAP_ACCESS, regs); + if (!user_mode(regs)) { pr_alert("Unhandled fault at 0x%016lx\n", addr); trace_android_rvh_do_mem_abort(regs, esr, addr, inf->name); @@ -858,6 +876,7 @@ * address to the signal handler. */ arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); + mark_trap_exit(ARM64_TRAP_ACCESS, regs); } NOKPROBE_SYMBOL(do_mem_abort); @@ -871,9 +890,12 @@ void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { trace_android_rvh_do_sp_pc_abort(regs, esr, addr, user_mode(regs)); + mark_trap_entry(ARM64_TRAP_ALIGN, regs); arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, addr, esr); + + mark_trap_exit(ARM64_TRAP_ALIGN, regs); } NOKPROBE_SYMBOL(do_sp_pc_abort); @@ -968,6 +990,8 @@ if (cortex_a76_erratum_1463225_debug_handler(regs)) return; + mark_trap_entry(ARM64_TRAP_DEBUG, regs); + debug_exception_enter(regs); if (user_mode(regs) && !is_ttbr0_addr(pc)) @@ -978,6 +1002,8 @@ } debug_exception_exit(regs); + + mark_trap_exit(ARM64_TRAP_DEBUG, regs); } NOKPROBE_SYMBOL(do_debug_exception); diff --git a/kernel/arch/arm64/xenomai/Kconfig b/kernel/arch/arm64/xenomai/Kconfig new file mode 120000 index 0000000..883810c --- /dev/null +++ b/kernel/arch/arm64/xenomai/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/Makefile b/kernel/arch/arm64/xenomai/dovetail/Makefile new file mode 120000 index 0000000..65b9c47 --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h new file mode 120000 index 0000000..a78ce4e --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h new file mode 120000 index 0000000..4aa781f --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h new file mode 120000 index 0000000..5f8f2ba --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h new file mode 120000 index 0000000..5357527 --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h new file mode 120000 index 0000000..74725b2 --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h new file mode 120000 index 0000000..f5d0dcb --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h new file mode 120000 index 0000000..c75bd9b --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h new file mode 120000 index 0000000..1cc6d3f --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/dovetail/machine.c b/kernel/arch/arm64/xenomai/dovetail/machine.c new file mode 120000 index 0000000..9f2c965 --- /dev/null +++ b/kernel/arch/arm64/xenomai/dovetail/machine.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h new file mode 120000 index 0000000..2f40f5f --- /dev/null +++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h new file mode 120000 index 0000000..aa1899e --- /dev/null +++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h new file mode 120000 index 0000000..e11fa39 --- /dev/null +++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h new file mode 120000 index 0000000..55c4265 --- /dev/null +++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h new file mode 120000 index 0000000..39f44c7 --- /dev/null +++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/Makefile b/kernel/arch/arm64/xenomai/ipipe/Makefile new file mode 120000 index 0000000..2591050 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h new file mode 120000 index 0000000..d7db202 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h new file mode 120000 index 0000000..f465a22 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h new file mode 120000 index 0000000..26e47be --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h new file mode 120000 index 0000000..e7ecef4 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h new file mode 120000 index 0000000..ac8ecae --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h new file mode 120000 index 0000000..b78d657 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h new file mode 120000 index 0000000..352ebb9 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h new file mode 120000 index 0000000..51a1bb5 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/machine.c b/kernel/arch/arm64/xenomai/ipipe/machine.c new file mode 120000 index 0000000..64a9700 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/machine.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/syscall.c b/kernel/arch/arm64/xenomai/ipipe/syscall.c new file mode 120000 index 0000000..6ce0ebb --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/syscall.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c \ No newline at end of file diff --git a/kernel/arch/arm64/xenomai/ipipe/thread.c b/kernel/arch/arm64/xenomai/ipipe/thread.c new file mode 120000 index 0000000..5dc5094 --- /dev/null +++ b/kernel/arch/arm64/xenomai/ipipe/thread.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c \ No newline at end of file diff --git a/kernel/arch/x86/Kconfig b/kernel/arch/x86/Kconfig index 32536ff..3f5a5ad 100644 --- a/kernel/arch/x86/Kconfig +++ b/kernel/arch/x86/Kconfig @@ -29,6 +29,8 @@ select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_SOFT_DIRTY + select HAVE_IRQ_PIPELINE + select HAVE_DOVETAIL select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE select SWIOTLB @@ -208,6 +210,7 @@ select HAVE_MOVE_PMD select HAVE_MOVE_PUD select HAVE_NMI + select HAVE_PERCPU_PREEMPT_COUNT select HAVE_OPROFILE select HAVE_OPTPROBES select HAVE_PCSPKR_PLATFORM @@ -864,6 +867,7 @@ endif #HYPERVISOR_GUEST +source "kernel/Kconfig.dovetail" source "arch/x86/Kconfig.cpu" config HPET_TIMER diff --git a/kernel/arch/x86/entry/common.c b/kernel/arch/x86/entry/common.c index 93a3122..9fdc77a 100644 --- a/kernel/arch/x86/entry/common.c +++ b/kernel/arch/x86/entry/common.c @@ -40,6 +40,15 @@ { nr = syscall_enter_from_user_mode(regs, nr); + if (dovetailing()) { + if (nr == EXIT_SYSCALL_OOB) { + hard_local_irq_disable(); + return; + } + if (nr == EXIT_SYSCALL_TAIL) + goto done; + } + instrumentation_begin(); if (likely(nr < NR_syscalls)) { nr = array_index_nospec(nr, NR_syscalls); @@ -53,6 +62,7 @@ #endif } instrumentation_end(); +done: syscall_exit_to_user_mode(regs); } #endif @@ -89,11 +99,22 @@ * or may not be necessary, but it matches the old asm behavior. */ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr); + + if (dovetailing()) { + if (nr == EXIT_SYSCALL_OOB) { + hard_local_irq_disable(); + return; + } + if (nr == EXIT_SYSCALL_TAIL) + goto done; + } + instrumentation_begin(); do_syscall_32_irqs_on(regs, nr); instrumentation_end(); +done: syscall_exit_to_user_mode(regs); } @@ -136,9 +157,20 @@ /* The case truncates any ptrace induced syscall nr > 2^32 -1 */ nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr); + if (dovetailing()) { + if (nr == EXIT_SYSCALL_OOB) { + instrumentation_end(); + hard_local_irq_disable(); + return true; + } + if (nr == EXIT_SYSCALL_TAIL) + goto done; + } + /* Now this is just like a normal syscall. */ do_syscall_32_irqs_on(regs, nr); +done: instrumentation_end(); syscall_exit_to_user_mode(regs); return true; diff --git a/kernel/arch/x86/entry/entry_64.S b/kernel/arch/x86/entry/entry_64.S index 559c82b..1d07a15 100644 --- a/kernel/arch/x86/entry/entry_64.S +++ b/kernel/arch/x86/entry/entry_64.S @@ -417,6 +417,11 @@ * If hits in kernel mode then it needs to go through the paranoid * entry as the exception can hit any random state. No preemption * check on exit to keep the paranoid path simple. + * + * irq_pipeline: since those events are non-maskable in essence, + * we may assume NMI-type restrictions for their handlers, which + * means the latter may - and actually have to - run immediately + * regardless of the current stage. */ .macro idtentry_mce_db vector asmsym cfunc SYM_CODE_START(\asmsym) diff --git a/kernel/arch/x86/hyperv/hv_init.c b/kernel/arch/x86/hyperv/hv_init.c index 01860c0..fd30d6c 100644 --- a/kernel/arch/x86/hyperv/hv_init.c +++ b/kernel/arch/x86/hyperv/hv_init.c @@ -156,7 +156,8 @@ ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT; } -DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_REENLIGHTENMENT_VECTOR, + sysvec_hyperv_reenlightenment) { ack_APIC_irq(); inc_irq_stat(irq_hv_reenlightenment_count); diff --git a/kernel/arch/x86/include/asm/apic.h b/kernel/arch/x86/include/asm/apic.h index 3b4412c..e104d0d 100644 --- a/kernel/arch/x86/include/asm/apic.h +++ b/kernel/arch/x86/include/asm/apic.h @@ -437,7 +437,7 @@ extern void apic_ack_irq(struct irq_data *data); -static inline void ack_APIC_irq(void) +static inline void __ack_APIC_irq(void) { /* * ack_APIC_irq() actually gets compiled as a single instruction @@ -446,6 +446,11 @@ apic_eoi(); } +static inline void ack_APIC_irq(void) +{ + if (!irqs_pipelined()) + __ack_APIC_irq(); +} static inline bool lapic_vector_set_in_irr(unsigned int vector) { diff --git a/kernel/arch/x86/include/asm/dovetail.h b/kernel/arch/x86/include/asm/dovetail.h new file mode 100644 index 0000000..940726f --- /dev/null +++ b/kernel/arch/x86/include/asm/dovetail.h @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum. + */ +#ifndef _ASM_X86_DOVETAIL_H +#define _ASM_X86_DOVETAIL_H + +#if !defined(__ASSEMBLY__) && defined(CONFIG_DOVETAIL) + +#include <asm/fpu/api.h> +#include <asm/io_bitmap.h> + +static inline void arch_dovetail_exec_prepare(void) +{ + clear_thread_flag(TIF_NEED_FPU_LOAD); +} + +static inline +void arch_dovetail_switch_prepare(bool leave_inband) +{ + if (leave_inband) + fpu__suspend_inband(); +} + +static inline +void arch_dovetail_switch_finish(bool enter_inband) +{ + unsigned int ti_work = READ_ONCE(current_thread_info()->flags); + + if (unlikely(ti_work & _TIF_IO_BITMAP)) + tss_update_io_bitmap(); + + if (enter_inband) { + fpu__resume_inband(); + } else { + if (unlikely(ti_work & _TIF_NEED_FPU_LOAD && + !(current->flags & PF_KTHREAD))) + switch_fpu_return(); + } +} + +#endif + +#endif /* _ASM_X86_DOVETAIL_H */ diff --git a/kernel/arch/x86/include/asm/fpu/api.h b/kernel/arch/x86/include/asm/fpu/api.h index 8b9bfaa..5ef1216 100644 --- a/kernel/arch/x86/include/asm/fpu/api.h +++ b/kernel/arch/x86/include/asm/fpu/api.h @@ -41,16 +41,25 @@ * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in * a random state. */ -static inline void fpregs_lock(void) +static inline unsigned long fpregs_lock(void) { - preempt_disable(); - local_bh_disable(); + if (IS_ENABLED(CONFIG_IRQ_PIPELINE)) { + return hard_preempt_disable(); + } else { + preempt_disable(); + local_bh_disable(); + return 0; + } } -static inline void fpregs_unlock(void) +static inline void fpregs_unlock(unsigned long flags) { - local_bh_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_IRQ_PIPELINE)) { + hard_preempt_enable(flags); + } else { + local_bh_enable(); + preempt_enable(); + } } #ifdef CONFIG_X86_DEBUG_FPU @@ -64,6 +73,10 @@ */ extern void switch_fpu_return(void); +/* For Dovetail context switching. */ +void fpu__suspend_inband(void); +void fpu__resume_inband(void); + /* * Query the presence of one or more xfeatures. Works on any legacy CPU as well. * diff --git a/kernel/arch/x86/include/asm/fpu/internal.h b/kernel/arch/x86/include/asm/fpu/internal.h index 70b9bc5..11d31cf 100644 --- a/kernel/arch/x86/include/asm/fpu/internal.h +++ b/kernel/arch/x86/include/asm/fpu/internal.h @@ -15,6 +15,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/dovetail.h> #include <asm/user.h> #include <asm/fpu/api.h> @@ -509,6 +510,32 @@ clear_thread_flag(TIF_NEED_FPU_LOAD); } +#ifdef CONFIG_DOVETAIL + +static inline void oob_fpu_set_preempt(struct fpu *fpu) +{ + fpu->preempted = 1; +} + +static inline void oob_fpu_clear_preempt(struct fpu *fpu) +{ + fpu->preempted = 0; +} + +static inline bool oob_fpu_preempted(struct fpu *old_fpu) +{ + return old_fpu->preempted; +} + +#else + +static inline bool oob_fpu_preempted(struct fpu *old_fpu) +{ + return false; +} + +#endif /* !CONFIG_DOVETAIL */ + /* * FPU state switching for scheduling. * @@ -535,7 +562,9 @@ { struct fpu *old_fpu = &prev->thread.fpu; - if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) { + if (static_cpu_has(X86_FEATURE_FPU) && + !(prev->flags & PF_KTHREAD) && + !oob_fpu_preempted(old_fpu)) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else diff --git a/kernel/arch/x86/include/asm/fpu/types.h b/kernel/arch/x86/include/asm/fpu/types.h index f5a38a5..ce2bdeb 100644 --- a/kernel/arch/x86/include/asm/fpu/types.h +++ b/kernel/arch/x86/include/asm/fpu/types.h @@ -329,6 +329,18 @@ */ unsigned int last_cpu; +#ifdef CONFIG_DOVETAIL + /* + * @preempted: + * + * When Dovetail is enabled, this flag is set for the inband + * task context saved when entering a kernel_fpu_begin/end() + * section before the latter got preempted by an out-of-band + * task. + */ + unsigned char preempted : 1; +#endif + /* * @avx512_timestamp: * diff --git a/kernel/arch/x86/include/asm/i8259.h b/kernel/arch/x86/include/asm/i8259.h index 89789e8..facf1bc 100644 --- a/kernel/arch/x86/include/asm/i8259.h +++ b/kernel/arch/x86/include/asm/i8259.h @@ -26,7 +26,7 @@ #define SLAVE_ICW4_DEFAULT 0x01 #define PIC_ICW4_AEOI 2 -extern raw_spinlock_t i8259A_lock; +extern hard_spinlock_t i8259A_lock; /* the PIC may need a careful delay on some platforms, hence specific calls */ static inline unsigned char inb_pic(unsigned int port) diff --git a/kernel/arch/x86/include/asm/idtentry.h b/kernel/arch/x86/include/asm/idtentry.h index dc2a8b1..c0d1f94 100644 --- a/kernel/arch/x86/include/asm/idtentry.h +++ b/kernel/arch/x86/include/asm/idtentry.h @@ -174,6 +174,56 @@ #define DECLARE_IDTENTRY_IRQ(vector, func) \ DECLARE_IDTENTRY_ERRORCODE(vector, func) +#ifdef CONFIG_IRQ_PIPELINE + +struct irq_stage_data; + +struct irq_stage_data * +handle_irq_pipelined_prepare(struct pt_regs *regs); + +int handle_irq_pipelined_finish(struct irq_stage_data *prevd, + struct pt_regs *regs);; + +void arch_pipeline_entry(struct pt_regs *regs, u8 vector); + +#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func) \ + DECLARE_IDTENTRY_SYSVEC(vector, func); \ + __visible void __##func(struct pt_regs *regs) + +#define DEFINE_IDTENTRY_IRQ_PIPELINED(func) \ +__visible noinstr void func(struct pt_regs *regs, \ + unsigned long error_code) \ +{ \ + arch_pipeline_entry(regs, (u8)error_code); \ +} \ +static __always_inline void __##func(struct pt_regs *regs, u8 vector) + +/* + * In a pipelined model, the actual sysvec __handler() is directly + * instrumentable, just like it is in fact in the non-pipelined + * model. The indirect call via run_on_irqstack_cond() in + * DEFINE_IDTENTRY_SYSVEC() happens to hide the noinstr dependency + * from objtool in the latter case. + */ +#define DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func) \ +__visible noinstr void func(struct pt_regs *regs) \ +{ \ + arch_pipeline_entry(regs, vector); \ +} \ + \ +__visible void __##func(struct pt_regs *regs) + +#define DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(vector, func) \ + DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func) + +#else /* !CONFIG_IRQ_PIPELINE */ + +#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func) DECLARE_IDTENTRY_SYSVEC(vector, func) + +#define DEFINE_IDTENTRY_IRQ_PIPELINED(func) DEFINE_IDTENTRY_IRQ(func) +#define DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func) DEFINE_IDTENTRY_SYSVEC(func) +#define DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(vector, func) DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) + /** * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points * @func: Function name of the entry point @@ -204,6 +254,8 @@ } \ \ static __always_inline void __##func(struct pt_regs *regs, u8 vector) + +#endif /* !CONFIG_IRQ_PIPELINE */ /** * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points @@ -450,6 +502,9 @@ #define DECLARE_IDTENTRY_SYSVEC(vector, func) \ idtentry_sysvec vector func +#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func) \ + DECLARE_IDTENTRY_SYSVEC(vector, func) + #ifdef CONFIG_X86_64 # define DECLARE_IDTENTRY_MCE(vector, func) \ idtentry_mce_db vector asm_##func func @@ -632,21 +687,25 @@ #ifdef CONFIG_X86_LOCAL_APIC DECLARE_IDTENTRY_SYSVEC(ERROR_APIC_VECTOR, sysvec_error_interrupt); DECLARE_IDTENTRY_SYSVEC(SPURIOUS_APIC_VECTOR, sysvec_spurious_apic_interrupt); -DECLARE_IDTENTRY_SYSVEC(LOCAL_TIMER_VECTOR, sysvec_apic_timer_interrupt); -DECLARE_IDTENTRY_SYSVEC(X86_PLATFORM_IPI_VECTOR, sysvec_x86_platform_ipi); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR, sysvec_apic_timer_interrupt); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(X86_PLATFORM_IPI_VECTOR, sysvec_x86_platform_ipi); #endif #ifdef CONFIG_SMP -DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi); -DECLARE_IDTENTRY_SYSVEC(IRQ_MOVE_CLEANUP_VECTOR, sysvec_irq_move_cleanup); -DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot); -DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single); -DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(RESCHEDULE_VECTOR, sysvec_reschedule_ipi); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR, sysvec_irq_move_cleanup); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(REBOOT_VECTOR, sysvec_reboot); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_VECTOR, sysvec_call_function); +#ifdef CONFIG_IRQ_PIPELINE +DECLARE_IDTENTRY_SYSVEC(RESCHEDULE_OOB_VECTOR, sysvec_reschedule_oob_ipi); +DECLARE_IDTENTRY_SYSVEC(TIMER_OOB_VECTOR, sysvec_timer_oob_ipi); +#endif #endif #ifdef CONFIG_X86_LOCAL_APIC # ifdef CONFIG_X86_MCE_THRESHOLD -DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold); +DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold); # endif # ifdef CONFIG_X86_MCE_AMD @@ -658,28 +717,28 @@ # endif # ifdef CONFIG_IRQ_WORK -DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(IRQ_WORK_VECTOR, sysvec_irq_work); # endif #endif #ifdef CONFIG_HAVE_KVM -DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi); -DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi); -DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi); #endif #if IS_ENABLED(CONFIG_HYPERV) -DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); -DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); -DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); #endif #if IS_ENABLED(CONFIG_ACRN_GUEST) -DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_acrn_hv_callback); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, sysvec_acrn_hv_callback); #endif #ifdef CONFIG_XEN_PVHVM -DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback); +DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback); #endif #ifdef CONFIG_KVM_GUEST diff --git a/kernel/arch/x86/include/asm/irq_pipeline.h b/kernel/arch/x86/include/asm/irq_pipeline.h new file mode 100644 index 0000000..5fa0cce --- /dev/null +++ b/kernel/arch/x86/include/asm/irq_pipeline.h @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef _ASM_X86_IRQ_PIPELINE_H +#define _ASM_X86_IRQ_PIPELINE_H + +#include <asm-generic/irq_pipeline.h> + +#ifdef CONFIG_IRQ_PIPELINE + +#include <asm/ptrace.h> + +#define FIRST_SYSTEM_IRQ NR_IRQS +#define TIMER_OOB_IPI apicm_vector_irq(TIMER_OOB_VECTOR) +#define RESCHEDULE_OOB_IPI apicm_vector_irq(RESCHEDULE_OOB_VECTOR) +#define apicm_irq_vector(__irq) ((__irq) - FIRST_SYSTEM_IRQ + FIRST_SYSTEM_VECTOR) +#define apicm_vector_irq(__vec) ((__vec) - FIRST_SYSTEM_VECTOR + FIRST_SYSTEM_IRQ) + +#define X86_EFLAGS_SS_BIT 31 + +static inline notrace +unsigned long arch_irqs_virtual_to_native_flags(int stalled) +{ + return (!stalled) << X86_EFLAGS_IF_BIT; +} + +static inline notrace +unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags) +{ + return hard_irqs_disabled_flags(flags) << X86_EFLAGS_SS_BIT; +} + +#ifndef CONFIG_PARAVIRT_XXL + +static inline notrace unsigned long arch_local_irq_save(void) +{ + int stalled = inband_irq_save(); + barrier(); + return arch_irqs_virtual_to_native_flags(stalled); +} + +static inline notrace void arch_local_irq_enable(void) +{ + barrier(); + inband_irq_enable(); +} + +static inline notrace void arch_local_irq_disable(void) +{ + inband_irq_disable(); + barrier(); +} + +static inline notrace unsigned long arch_local_save_flags(void) +{ + int stalled = inband_irqs_disabled(); + barrier(); + return arch_irqs_virtual_to_native_flags(stalled); +} + +static inline notrace void arch_local_irq_restore(unsigned long flags) +{ + inband_irq_restore(native_irqs_disabled_flags(flags)); + barrier(); +} + +#endif /* !CONFIG_PARAVIRT_XXL */ + +static inline +void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src) +{ + dst->flags = src->flags; + dst->cs = src->cs; + dst->ip = src->ip; + dst->bp = src->bp; + dst->ss = src->ss; + dst->sp = src->sp; +} + +static inline bool arch_steal_pipelined_tick(struct pt_regs *regs) +{ + return !(regs->flags & X86_EFLAGS_IF); +} + +static inline int arch_enable_oob_stage(void) +{ + return 0; +} + +static inline void handle_arch_irq(struct pt_regs *regs) +{ } + +#else /* !CONFIG_IRQ_PIPELINE */ + +struct pt_regs; + +#ifndef CONFIG_PARAVIRT_XXL + +static inline notrace unsigned long arch_local_save_flags(void) +{ + return native_save_fl(); +} + +static inline notrace void arch_local_irq_restore(unsigned long flags) +{ + native_restore_fl(flags); +} + +static inline notrace void arch_local_irq_disable(void) +{ + native_irq_disable(); +} + +static inline notrace void arch_local_irq_enable(void) +{ + native_irq_enable(); +} + +/* + * For spinlocks, etc: + */ +static inline notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags = arch_local_save_flags(); + arch_local_irq_disable(); + return flags; +} + +#endif /* !CONFIG_PARAVIRT_XXL */ + +#endif /* !CONFIG_IRQ_PIPELINE */ + +#endif /* _ASM_X86_IRQ_PIPELINE_H */ diff --git a/kernel/arch/x86/include/asm/irq_stack.h b/kernel/arch/x86/include/asm/irq_stack.h index 7758169..58ad3c4 100644 --- a/kernel/arch/x86/include/asm/irq_stack.h +++ b/kernel/arch/x86/include/asm/irq_stack.h @@ -18,6 +18,13 @@ void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc), struct irq_desc *desc); +/* + * IRQ pipeline: only in-band (soft-)irq handlers have to run on the + * irqstack, oob irq handlers must be lean by design therefore can run + * directly over the preempted context. Therefore, the guarantee that + * the in-band stage is currently stalled on the current CPU is enough + * to update irq_count atomically. + */ static __always_inline void __run_on_irqstack(void (*func)(void)) { void *tos = __this_cpu_read(hardirq_stack_ptr); diff --git a/kernel/arch/x86/include/asm/irq_vectors.h b/kernel/arch/x86/include/asm/irq_vectors.h index 889f8b1..1e51dc4 100644 --- a/kernel/arch/x86/include/asm/irq_vectors.h +++ b/kernel/arch/x86/include/asm/irq_vectors.h @@ -106,10 +106,19 @@ #define LOCAL_TIMER_VECTOR 0xec +#ifdef CONFIG_IRQ_PIPELINE +#define TIMER_OOB_VECTOR 0xeb +#define RESCHEDULE_OOB_VECTOR 0xea +#define FIRST_SYSTEM_APIC_VECTOR RESCHEDULE_OOB_VECTOR +#define NR_APIC_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR) +#else +#define FIRST_SYSTEM_APIC_VECTOR LOCAL_TIMER_VECTOR +#endif + #define NR_VECTORS 256 #ifdef CONFIG_X86_LOCAL_APIC -#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR +#define FIRST_SYSTEM_VECTOR FIRST_SYSTEM_APIC_VECTOR #else #define FIRST_SYSTEM_VECTOR NR_VECTORS #endif diff --git a/kernel/arch/x86/include/asm/irqflags.h b/kernel/arch/x86/include/asm/irqflags.h index 8c86ede..ca2a870 100644 --- a/kernel/arch/x86/include/asm/irqflags.h +++ b/kernel/arch/x86/include/asm/irqflags.h @@ -35,8 +35,13 @@ return flags; } +static inline unsigned long native_save_flags(void) +{ + return native_save_fl(); +} + extern inline void native_restore_fl(unsigned long flags); -extern inline void native_restore_fl(unsigned long flags) +extern __always_inline void native_restore_fl(unsigned long flags) { asm volatile("push %0 ; popf" : /* no output */ @@ -52,6 +57,38 @@ static __always_inline void native_irq_enable(void) { asm volatile("sti": : :"memory"); +} + +static __always_inline void native_irq_sync(void) +{ + asm volatile("sti ; nop ; cli": : :"memory"); +} + +static __always_inline unsigned long native_irq_save(void) +{ + unsigned long flags; + + flags = native_save_flags(); + + native_irq_disable(); + + return flags; +} + +static __always_inline void native_irq_restore(unsigned long flags) +{ + return native_restore_fl(flags); +} + +static __always_inline int native_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & X86_EFLAGS_IF); +} + +static __always_inline bool native_irqs_disabled(void) +{ + unsigned long flags = native_save_flags(); + return native_irqs_disabled_flags(flags); } static inline __cpuidle void native_safe_halt(void) @@ -73,26 +110,7 @@ #else #ifndef __ASSEMBLY__ #include <linux/types.h> - -static __always_inline unsigned long arch_local_save_flags(void) -{ - return native_save_fl(); -} - -static __always_inline void arch_local_irq_restore(unsigned long flags) -{ - native_restore_fl(flags); -} - -static __always_inline void arch_local_irq_disable(void) -{ - native_irq_disable(); -} - -static __always_inline void arch_local_irq_enable(void) -{ - native_irq_enable(); -} +#include <asm/irq_pipeline.h> /* * Used in the idle loop; sti takes one instruction cycle @@ -112,15 +130,6 @@ native_halt(); } -/* - * For spinlocks, etc: - */ -static __always_inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags = arch_local_save_flags(); - arch_local_irq_disable(); - return flags; -} #else #define ENABLE_INTERRUPTS(x) sti @@ -149,7 +158,7 @@ #ifndef __ASSEMBLY__ static __always_inline int arch_irqs_disabled_flags(unsigned long flags) { - return !(flags & X86_EFLAGS_IF); + return native_irqs_disabled_flags(flags); } static __always_inline int arch_irqs_disabled(void) diff --git a/kernel/arch/x86/include/asm/mmu_context.h b/kernel/arch/x86/include/asm/mmu_context.h index d98016b..b20f7de 100644 --- a/kernel/arch/x86/include/asm/mmu_context.h +++ b/kernel/arch/x86/include/asm/mmu_context.h @@ -128,6 +128,13 @@ struct task_struct *tsk); #define switch_mm_irqs_off switch_mm_irqs_off +static inline void +switch_oob_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + switch_mm_irqs_off(prev, next, tsk); +} + #define activate_mm(prev, next) \ do { \ paravirt_activate_mm((prev), (next)); \ diff --git a/kernel/arch/x86/include/asm/pgtable.h b/kernel/arch/x86/include/asm/pgtable.h index 87de9f2..d4b8b84 100644 --- a/kernel/arch/x86/include/asm/pgtable.h +++ b/kernel/arch/x86/include/asm/pgtable.h @@ -137,6 +137,7 @@ static inline void write_pkru(u32 pkru) { struct pkru_state *pk; + unsigned long flags; if (!boot_cpu_has(X86_FEATURE_OSPKE)) return; @@ -148,11 +149,11 @@ * written to the CPU. The FPU restore on return to userland would * otherwise load the previous value again. */ - fpregs_lock(); + flags = fpregs_lock(); if (pk) pk->pkru = pkru; __write_pkru(pkru); - fpregs_unlock(); + fpregs_unlock(flags); } static inline int pte_young(pte_t pte) diff --git a/kernel/arch/x86/include/asm/special_insns.h b/kernel/arch/x86/include/asm/special_insns.h index 415693f..2035bbe 100644 --- a/kernel/arch/x86/include/asm/special_insns.h +++ b/kernel/arch/x86/include/asm/special_insns.h @@ -138,9 +138,9 @@ { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); asm_load_gs_index(selector); - local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline unsigned long __read_cr4(void) diff --git a/kernel/arch/x86/include/asm/syscall.h b/kernel/arch/x86/include/asm/syscall.h index 7cbf733..7b2a464 100644 --- a/kernel/arch/x86/include/asm/syscall.h +++ b/kernel/arch/x86/include/asm/syscall.h @@ -128,6 +128,11 @@ } } +static inline unsigned long syscall_get_arg0(struct pt_regs *regs) +{ + return regs->di; +} + static inline void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, const unsigned long *args) diff --git a/kernel/arch/x86/include/asm/thread_info.h b/kernel/arch/x86/include/asm/thread_info.h index 012c8ee..ecd2701 100644 --- a/kernel/arch/x86/include/asm/thread_info.h +++ b/kernel/arch/x86/include/asm/thread_info.h @@ -52,16 +52,20 @@ struct task_struct; #include <asm/cpufeature.h> #include <linux/atomic.h> +#include <dovetail/thread_info.h> struct thread_info { unsigned long flags; /* low level flags */ u32 status; /* thread synchronous flags */ + struct oob_thread_state oob_state; /* co-kernel thread state */ }; #define INIT_THREAD_INFO(tsk) \ { \ .flags = 0, \ } + +#define ti_local_flags(__ti) ((__ti)->status) #else /* !__ASSEMBLY__ */ @@ -97,8 +101,10 @@ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ +#define TIF_RETUSER 23 /* INBAND_TASK_RETUSER is pending */ #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ +#define TIF_MAYDAY 26 /* emergency trap pending */ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ @@ -126,7 +132,9 @@ #define _TIF_SLD (1 << TIF_SLD) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) +#define _TIF_RETUSER (1 << TIF_RETUSER) #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) +#define _TIF_MAYDAY (1 << TIF_MAYDAY) #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) @@ -226,6 +234,16 @@ * have to worry about atomic accesses. */ #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ +/* bits 2 and 3 reserved for compat */ +#define TS_OOB 0x0010 /* Thread is running out-of-band */ +#define TS_DOVETAIL 0x0020 /* Dovetail notifier enabled */ +#define TS_OFFSTAGE 0x0040 /* Thread is in-flight to OOB context */ +#define TS_OOBTRAP 0x0080 /* Handling a trap from OOB context */ + +#define _TLF_OOB TS_OOB +#define _TLF_DOVETAIL TS_DOVETAIL +#define _TLF_OFFSTAGE TS_OFFSTAGE +#define _TLF_OOBTRAP TS_OOBTRAP #ifndef __ASSEMBLY__ #ifdef CONFIG_COMPAT diff --git a/kernel/arch/x86/include/asm/tlbflush.h b/kernel/arch/x86/include/asm/tlbflush.h index 8c87a2e..1dfab59 100644 --- a/kernel/arch/x86/include/asm/tlbflush.h +++ b/kernel/arch/x86/include/asm/tlbflush.h @@ -37,9 +37,9 @@ { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); cr4_set_bits_irqsoff(mask); - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* Clear in this cpu's CR4. */ @@ -47,9 +47,9 @@ { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); cr4_clear_bits_irqsoff(mask); - local_irq_restore(flags); + hard_local_irq_restore(flags); } #ifndef MODULE diff --git a/kernel/arch/x86/include/asm/uaccess.h b/kernel/arch/x86/include/asm/uaccess.h index bb14302..25b7697 100644 --- a/kernel/arch/x86/include/asm/uaccess.h +++ b/kernel/arch/x86/include/asm/uaccess.h @@ -44,7 +44,7 @@ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline bool pagefault_disabled(void); # define WARN_ON_IN_IRQ() \ - WARN_ON_ONCE(!in_task() && !pagefault_disabled()) + WARN_ON_ONCE(running_inband() && !in_task() && !pagefault_disabled()) #else # define WARN_ON_IN_IRQ() #endif diff --git a/kernel/arch/x86/kernel/Makefile b/kernel/arch/x86/kernel/Makefile index c06f3a9..74449e6 100644 --- a/kernel/arch/x86/kernel/Makefile +++ b/kernel/arch/x86/kernel/Makefile @@ -131,6 +131,7 @@ obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o obj-$(CONFIG_JAILHOUSE_GUEST) += jailhouse.o +obj-$(CONFIG_IRQ_PIPELINE) += irq_pipeline.o obj-$(CONFIG_EISA) += eisa.o obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o diff --git a/kernel/arch/x86/kernel/alternative.c b/kernel/arch/x86/kernel/alternative.c index 92f0a97..e0be5d4 100644 --- a/kernel/arch/x86/kernel/alternative.c +++ b/kernel/arch/x86/kernel/alternative.c @@ -8,6 +8,7 @@ #include <linux/list.h> #include <linux/stringify.h> #include <linux/highmem.h> +#include <linux/irq_pipeline.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/memory.h> @@ -366,9 +367,9 @@ if (nnops <= 1) return nnops; - local_irq_save(flags); + flags = hard_local_irq_save(); add_nops(instr + off, nnops); - local_irq_restore(flags); + hard_local_irq_restore(flags); DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i); @@ -1065,9 +1066,9 @@ */ memcpy(addr, opcode, len); } else { - local_irq_save(flags); + flags = hard_local_irq_save(); memcpy(addr, opcode, len); - local_irq_restore(flags); + hard_local_irq_restore(flags); sync_core(); /* @@ -1099,6 +1100,7 @@ temp_mm_state_t temp_state; lockdep_assert_irqs_disabled(); + WARN_ON_ONCE(irq_pipeline_debug() && !hard_irqs_disabled()); /* * Make sure not to be in TLB lazy mode, as otherwise we'll end up @@ -1192,7 +1194,7 @@ */ VM_BUG_ON(!ptep); - local_irq_save(flags); + local_irq_save_full(flags); pte = mk_pte(pages[0], pgprot); set_pte_at(poking_mm, poking_addr, ptep, pte); @@ -1243,7 +1245,7 @@ */ BUG_ON(memcmp(addr, opcode, len)); - local_irq_restore(flags); + local_irq_restore_full(flags); pte_unmap_unlock(ptep, ptl); return addr; } diff --git a/kernel/arch/x86/kernel/apic/apic.c b/kernel/arch/x86/kernel/apic/apic.c index 1c96f24..8984c79 100644 --- a/kernel/arch/x86/kernel/apic/apic.c +++ b/kernel/arch/x86/kernel/apic/apic.c @@ -31,6 +31,7 @@ #include <linux/i8253.h> #include <linux/dmar.h> #include <linux/init.h> +#include <linux/irq.h> #include <linux/cpu.h> #include <linux/dmi.h> #include <linux/smp.h> @@ -272,10 +273,10 @@ { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); apic_write(APIC_ICR, low); - local_irq_restore(flags); + hard_local_irq_restore(flags); } u64 native_apic_icr_read(void) @@ -331,6 +332,9 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) { unsigned int lvtt_value, tmp_value; + unsigned long flags; + + flags = hard_cond_local_irq_save(); lvtt_value = LOCAL_TIMER_VECTOR; if (!oneshot) @@ -353,6 +357,8 @@ * According to Intel, MFENCE can do the serialization here. */ asm volatile("mfence" : : : "memory"); + hard_cond_local_irq_restore(flags); + printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); return; } @@ -366,6 +372,8 @@ if (!oneshot) apic_write(APIC_TMICT, clocks / APIC_DIVISOR); + + hard_cond_local_irq_restore(flags); } /* @@ -471,28 +479,34 @@ static int lapic_next_deadline(unsigned long delta, struct clock_event_device *evt) { + unsigned long flags; u64 tsc; /* This MSR is special and need a special fence: */ weak_wrmsr_fence(); + flags = hard_local_irq_save(); tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + hard_local_irq_restore(flags); return 0; } static int lapic_timer_shutdown(struct clock_event_device *evt) { + unsigned long flags; unsigned int v; /* Lapic used as dummy for broadcast ? */ if (evt->features & CLOCK_EVT_FEAT_DUMMY) return 0; + flags = hard_local_irq_save(); v = apic_read(APIC_LVTT); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); apic_write(APIC_LVTT, v); apic_write(APIC_TMICT, 0); + hard_local_irq_restore(flags); return 0; } @@ -527,6 +541,32 @@ #endif } +static DEFINE_PER_CPU(struct clock_event_device, lapic_events); + +#ifdef CONFIG_IRQ_PIPELINE + +#define LAPIC_TIMER_IRQ apicm_vector_irq(LOCAL_TIMER_VECTOR) + +static irqreturn_t lapic_oob_handler(int irq, void *dev_id) +{ + struct clock_event_device *evt = this_cpu_ptr(&lapic_events); + + trace_local_timer_entry(LOCAL_TIMER_VECTOR); + clockevents_handle_event(evt); + trace_local_timer_exit(LOCAL_TIMER_VECTOR); + + return IRQ_HANDLED; +} + +static struct irqaction lapic_oob_action = { + .handler = lapic_oob_handler, + .name = "Out-of-band LAPIC timer interrupt", + .flags = IRQF_TIMER | IRQF_PERCPU, +}; + +#else +#define LAPIC_TIMER_IRQ -1 +#endif /* * The local apic timer can be used for any function which is CPU local. @@ -534,8 +574,8 @@ static struct clock_event_device lapic_clockevent = { .name = "lapic", .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP - | CLOCK_EVT_FEAT_DUMMY, + CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP | + CLOCK_EVT_FEAT_PIPELINE | CLOCK_EVT_FEAT_DUMMY, .shift = 32, .set_state_shutdown = lapic_timer_shutdown, .set_state_periodic = lapic_timer_set_periodic, @@ -544,9 +584,8 @@ .set_next_event = lapic_next_event, .broadcast = lapic_timer_broadcast, .rating = 100, - .irq = -1, + .irq = LAPIC_TIMER_IRQ, }; -static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static const struct x86_cpu_id deadline_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */ @@ -1042,6 +1081,9 @@ /* Setup the lapic or request the broadcast */ setup_APIC_timer(); amd_e400_c1e_apic_setup(); +#ifdef CONFIG_IRQ_PIPELINE + setup_percpu_irq(LAPIC_TIMER_IRQ, &lapic_oob_action); +#endif } void setup_secondary_APIC_clock(void) @@ -1092,7 +1134,8 @@ * [ if a single-CPU system runs an SMP kernel then we call the local * interrupt as well. Thus we cannot inline the local irq ... ] */ -DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR, + sysvec_apic_timer_interrupt) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -1513,7 +1556,7 @@ * per set bit. */ for_each_set_bit(bit, isr->map, APIC_IR_BITS) - ack_APIC_irq(); + __ack_APIC_irq(); return true; } @@ -2131,7 +2174,7 @@ * * Also called from sysvec_spurious_apic_interrupt(). */ -DEFINE_IDTENTRY_IRQ(spurious_interrupt) +DEFINE_IDTENTRY_IRQ_PIPELINED(spurious_interrupt) { u32 v; @@ -2157,7 +2200,7 @@ if (v & (1 << (vector & 0x1f))) { pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n", vector, smp_processor_id()); - ack_APIC_irq(); + __ack_APIC_irq(); } else { pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n", vector, smp_processor_id()); @@ -2166,13 +2209,18 @@ trace_spurious_apic_exit(vector); } -DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(SPURIOUS_APIC_VECTOR, + sysvec_spurious_apic_interrupt) { __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR); } /* * This interrupt should never happen with our APIC/SMP architecture + * + * irq_pipeline: same as spurious_interrupt, would run directly out of + * the IDT, no deferral via the interrupt log which means that only + * the hardware IRQ state is considered for masking. */ DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt) { diff --git a/kernel/arch/x86/kernel/apic/apic_flat_64.c b/kernel/arch/x86/kernel/apic/apic_flat_64.c index 7862b15..d376218 100644 --- a/kernel/arch/x86/kernel/apic/apic_flat_64.c +++ b/kernel/arch/x86/kernel/apic/apic_flat_64.c @@ -52,9 +52,9 @@ { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); __default_send_IPI_dest_field(mask, vector, apic->dest_logical); - local_irq_restore(flags); + hard_local_irq_restore(flags); } static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) diff --git a/kernel/arch/x86/kernel/apic/apic_numachip.c b/kernel/arch/x86/kernel/apic/apic_numachip.c index 35edd57..3522a47 100644 --- a/kernel/arch/x86/kernel/apic/apic_numachip.c +++ b/kernel/arch/x86/kernel/apic/apic_numachip.c @@ -103,10 +103,10 @@ if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); __default_send_IPI_dest_field(apicid, vector, APIC_DEST_PHYSICAL); - local_irq_restore(flags); + hard_local_irq_restore(flags); preempt_enable(); return; } diff --git a/kernel/arch/x86/kernel/apic/io_apic.c b/kernel/arch/x86/kernel/apic/io_apic.c index 25b1d5c..47f9d4a 100644 --- a/kernel/arch/x86/kernel/apic/io_apic.c +++ b/kernel/arch/x86/kernel/apic/io_apic.c @@ -78,7 +78,7 @@ #define for_each_irq_pin(entry, head) \ list_for_each_entry(entry, &head, list) -static DEFINE_RAW_SPINLOCK(ioapic_lock); +static DEFINE_HARD_SPINLOCK(ioapic_lock); static DEFINE_MUTEX(ioapic_mutex); static unsigned int ioapic_dynirq_base; static int ioapic_initialized; @@ -1634,7 +1634,7 @@ return 1; local_save_flags(flags); - local_irq_enable(); + local_irq_enable_full(); if (boot_cpu_has(X86_FEATURE_TSC)) delay_with_tsc(); @@ -1642,6 +1642,8 @@ delay_without_tsc(); local_irq_restore(flags); + if (raw_irqs_disabled_flags(flags)) + hard_local_irq_disable(); /* * Expect a few ticks at least, to be sure some possible @@ -1722,14 +1724,56 @@ return false; } +static inline void do_prepare_move(struct irq_data *data) +{ + if (!irqd_irq_masked(data)) + mask_ioapic_irq(data); +} + +#ifdef CONFIG_IRQ_PIPELINE + +static inline void ioapic_finish_move(struct irq_data *data, bool moveit); + +static void ioapic_deferred_irq_move(struct irq_work *work) +{ + struct irq_data *data; + struct irq_desc *desc; + unsigned long flags; + + data = container_of(work, struct irq_data, move_work); + desc = irq_data_to_desc(data); + raw_spin_lock_irqsave(&desc->lock, flags); + do_prepare_move(data); + ioapic_finish_move(data, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +static inline bool __ioapic_prepare_move(struct irq_data *data) +{ + init_irq_work(&data->move_work, ioapic_deferred_irq_move); + irq_work_queue(&data->move_work); + + return false; /* Postpone ioapic_finish_move(). */ +} + +#else /* !CONFIG_IRQ_PIPELINE */ + +static inline bool __ioapic_prepare_move(struct irq_data *data) +{ + do_prepare_move(data); + + return true; +} + +#endif + static inline bool ioapic_prepare_move(struct irq_data *data) { /* If we are moving the IRQ we need to mask it */ - if (unlikely(irqd_is_setaffinity_pending(data))) { - if (!irqd_irq_masked(data)) - mask_ioapic_irq(data); - return true; - } + if (irqd_is_setaffinity_pending(data) && + !irqd_is_setaffinity_blocked(data)) + return __ioapic_prepare_move(data); + return false; } @@ -1828,7 +1872,7 @@ * We must acknowledge the irq before we move it or the acknowledge will * not propagate properly. */ - ack_APIC_irq(); + __ack_APIC_irq(); /* * Tail end of clearing remote IRR bit (either by delivering the EOI @@ -1949,7 +1993,8 @@ .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_get_irqchip_state = ioapic_irq_get_chip_state, .flags = IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_AFFINITY_PRE_STARTUP, + IRQCHIP_AFFINITY_PRE_STARTUP | + IRQCHIP_PIPELINE_SAFE, }; static struct irq_chip ioapic_ir_chip __read_mostly = { @@ -1963,7 +2008,8 @@ .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_get_irqchip_state = ioapic_irq_get_chip_state, .flags = IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_AFFINITY_PRE_STARTUP, + IRQCHIP_AFFINITY_PRE_STARTUP | + IRQCHIP_PIPELINE_SAFE, }; static inline void init_IO_APIC_traps(void) @@ -2010,7 +2056,7 @@ static void ack_lapic_irq(struct irq_data *data) { - ack_APIC_irq(); + __ack_APIC_irq(); } static struct irq_chip lapic_chip __read_mostly = { @@ -2018,6 +2064,7 @@ .irq_mask = mask_lapic_irq, .irq_unmask = unmask_lapic_irq, .irq_ack = ack_lapic_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static void lapic_register_intr(int irq) @@ -2135,7 +2182,7 @@ if (!global_clock_event) return; - local_irq_save(flags); + local_irq_save_full(flags); /* * get/set the timer IRQ vector: @@ -2203,7 +2250,7 @@ goto out; } panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC"); - local_irq_disable(); + local_irq_disable_full(); clear_IO_APIC_pin(apic1, pin1); if (!no_pin1) apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " @@ -2227,7 +2274,7 @@ /* * Cleanup, just in case ... */ - local_irq_disable(); + local_irq_disable_full(); legacy_pic->mask(0); clear_IO_APIC_pin(apic2, pin2); apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); @@ -2244,7 +2291,7 @@ apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } - local_irq_disable(); + local_irq_disable_full(); legacy_pic->mask(0); apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); @@ -2263,7 +2310,7 @@ apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); goto out; } - local_irq_disable(); + local_irq_disable_full(); apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); if (apic_is_x2apic_enabled()) apic_printk(APIC_QUIET, KERN_INFO @@ -2272,7 +2319,7 @@ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " "report. Then try booting with the 'noapic' option.\n"); out: - local_irq_restore(flags); + local_irq_restore_full(flags); } /* @@ -3018,13 +3065,13 @@ cfg = irqd_cfg(irq_data); add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); - local_irq_save(flags); + local_irq_save_full(flags); if (info->ioapic.entry) mp_setup_entry(cfg, data, info->ioapic.entry); mp_register_handler(virq, data->trigger); if (virq < nr_legacy_irqs()) legacy_pic->mask(virq); - local_irq_restore(flags); + local_irq_restore_full(flags); apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n", diff --git a/kernel/arch/x86/kernel/apic/ipi.c b/kernel/arch/x86/kernel/apic/ipi.c index 387154e..bd2ffae 100644 --- a/kernel/arch/x86/kernel/apic/ipi.c +++ b/kernel/arch/x86/kernel/apic/ipi.c @@ -117,8 +117,10 @@ * cli/sti. Otherwise we use an even cheaper single atomic write * to the APIC. */ + unsigned long flags; unsigned int cfg; + flags = hard_cond_local_irq_save(); /* * Wait for idle. */ @@ -137,6 +139,8 @@ * Send the IPI. The write to APIC_ICR fires this off. */ native_apic_mem_write(APIC_ICR, cfg); + + hard_cond_local_irq_restore(flags); } /* @@ -145,8 +149,10 @@ */ void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) { + unsigned long flags; unsigned long cfg; + flags = hard_cond_local_irq_save(); /* * Wait for idle. */ @@ -170,16 +176,18 @@ * Send the IPI. The write to APIC_ICR fires this off. */ native_apic_mem_write(APIC_ICR, cfg); + + hard_cond_local_irq_restore(flags); } void default_send_IPI_single_phys(int cpu, int vector) { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), vector, APIC_DEST_PHYSICAL); - local_irq_restore(flags); + hard_local_irq_restore(flags); } void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) @@ -192,12 +200,12 @@ * to an arbitrary mask, so I do a unicast to each CPU instead. * - mbligh */ - local_irq_save(flags); + flags = hard_local_irq_save(); for_each_cpu(query_cpu, mask) { __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } - local_irq_restore(flags); + hard_local_irq_restore(flags); } void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, @@ -209,14 +217,14 @@ /* See Hack comment above */ - local_irq_save(flags); + flags = hard_local_irq_save(); for_each_cpu(query_cpu, mask) { if (query_cpu == this_cpu) continue; __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* @@ -256,12 +264,12 @@ * should be modified to do 1 message per cluster ID - mbligh */ - local_irq_save(flags); + flags = hard_local_irq_save(); for_each_cpu(query_cpu, mask) __default_send_IPI_dest_field( early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); - local_irq_restore(flags); + hard_local_irq_restore(flags); } void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, @@ -273,7 +281,7 @@ /* See Hack comment above */ - local_irq_save(flags); + flags = hard_local_irq_save(); for_each_cpu(query_cpu, mask) { if (query_cpu == this_cpu) continue; @@ -281,7 +289,7 @@ early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, apic->dest_logical); } - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* @@ -295,10 +303,10 @@ if (!mask) return; - local_irq_save(flags); + flags = hard_local_irq_save(); WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); __default_send_IPI_dest_field(mask, vector, apic->dest_logical); - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* must come after the send_IPI functions above for inlining */ diff --git a/kernel/arch/x86/kernel/apic/msi.c b/kernel/arch/x86/kernel/apic/msi.c index 6bd98a2..69dc36e 100644 --- a/kernel/arch/x86/kernel/apic/msi.c +++ b/kernel/arch/x86/kernel/apic/msi.c @@ -181,7 +181,8 @@ .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_affinity = msi_set_affinity, .flags = IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_AFFINITY_PRE_STARTUP, + IRQCHIP_AFFINITY_PRE_STARTUP | + IRQCHIP_PIPELINE_SAFE, }; int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, @@ -251,7 +252,8 @@ .irq_ack = irq_chip_ack_parent, .irq_retrigger = irq_chip_retrigger_hierarchy, .flags = IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_AFFINITY_PRE_STARTUP, + IRQCHIP_AFFINITY_PRE_STARTUP | + IRQCHIP_PIPELINE_SAFE, }; static struct msi_domain_info pci_msi_ir_domain_info = { @@ -294,7 +296,8 @@ .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_write_msi_msg = dmar_msi_write_msg, .flags = IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_AFFINITY_PRE_STARTUP, + IRQCHIP_AFFINITY_PRE_STARTUP | + IRQCHIP_PIPELINE_SAFE, }; static int dmar_msi_init(struct irq_domain *domain, @@ -386,7 +389,8 @@ .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_write_msi_msg = hpet_msi_write_msg, - .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP | + IRQCHIP_PIPELINE_SAFE, }; static int hpet_msi_init(struct irq_domain *domain, diff --git a/kernel/arch/x86/kernel/apic/vector.c b/kernel/arch/x86/kernel/apic/vector.c index bd557e9..1cb7ab4 100644 --- a/kernel/arch/x86/kernel/apic/vector.c +++ b/kernel/arch/x86/kernel/apic/vector.c @@ -39,7 +39,7 @@ struct irq_domain *x86_vector_domain; EXPORT_SYMBOL_GPL(x86_vector_domain); -static DEFINE_RAW_SPINLOCK(vector_lock); +static DEFINE_HARD_SPINLOCK(vector_lock); static cpumask_var_t vector_searchmask; static struct irq_chip lapic_controller; static struct irq_matrix *vector_matrix; @@ -757,6 +757,10 @@ { int isairq = vector - ISA_IRQ_VECTOR(0); + /* Copy the cleanup vector if irqs are pipelined. */ + if (IS_ENABLED(CONFIG_IRQ_PIPELINE) && + vector == IRQ_MOVE_CLEANUP_VECTOR) + return irq_to_desc(IRQ_MOVE_CLEANUP_VECTOR); /* 1:1 mapping */ /* Check whether the irq is in the legacy space */ if (isairq < 0 || isairq >= nr_legacy_irqs()) return VECTOR_UNUSED; @@ -791,15 +795,19 @@ void lapic_offline(void) { - lock_vector_lock(); + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); irq_matrix_offline(vector_matrix); - unlock_vector_lock(); + raw_spin_unlock_irqrestore(&vector_lock, flags); } static int apic_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) { int err; + + WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled()); if (WARN_ON_ONCE(!irqd_is_activated(irqd))) return -EIO; @@ -830,10 +838,44 @@ return 1; } -void apic_ack_irq(struct irq_data *irqd) +#if defined(CONFIG_IRQ_PIPELINE) && \ + defined(CONFIG_GENERIC_PENDING_IRQ) + +static void apic_deferred_irq_move(struct irq_work *work) +{ + struct irq_data *irqd; + struct irq_desc *desc; + unsigned long flags; + + irqd = container_of(work, struct irq_data, move_work); + desc = irq_data_to_desc(irqd); + raw_spin_lock_irqsave(&desc->lock, flags); + __irq_move_irq(irqd); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +static inline void apic_move_irq(struct irq_data *irqd) +{ + if (irqd_is_setaffinity_pending(irqd) && + !irqd_is_setaffinity_blocked(irqd)) { + init_irq_work(&irqd->move_work, apic_deferred_irq_move); + irq_work_queue(&irqd->move_work); + } +} + +#else + +static inline void apic_move_irq(struct irq_data *irqd) { irq_move_irq(irqd); - ack_APIC_irq(); +} + +#endif + +void apic_ack_irq(struct irq_data *irqd) +{ + apic_move_irq(irqd); + __ack_APIC_irq(); } void apic_ack_edge(struct irq_data *irqd) @@ -876,15 +918,17 @@ apicd->move_in_progress = 0; } -DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR, + sysvec_irq_move_cleanup) { struct hlist_head *clhead = this_cpu_ptr(&cleanup_list); struct apic_chip_data *apicd; struct hlist_node *tmp; + unsigned long flags; ack_APIC_irq(); /* Prevent vectors vanishing under us */ - raw_spin_lock(&vector_lock); + raw_spin_lock_irqsave(&vector_lock, flags); hlist_for_each_entry_safe(apicd, tmp, clhead, clist) { unsigned int irr, vector = apicd->prev_vector; @@ -906,14 +950,15 @@ free_moved_vector(apicd); } - raw_spin_unlock(&vector_lock); + raw_spin_unlock_irqrestore(&vector_lock, flags); } static void __send_cleanup_vector(struct apic_chip_data *apicd) { + unsigned long flags; unsigned int cpu; - raw_spin_lock(&vector_lock); + raw_spin_lock_irqsave(&vector_lock, flags); apicd->move_in_progress = 0; cpu = apicd->prev_cpu; if (cpu_online(cpu)) { @@ -922,7 +967,7 @@ } else { apicd->prev_vector = 0; } - raw_spin_unlock(&vector_lock); + raw_spin_unlock_irqrestore(&vector_lock, flags); } void send_cleanup_vector(struct irq_cfg *cfg) @@ -960,6 +1005,8 @@ struct apic_chip_data *apicd; struct irq_data *irqd; unsigned int vector; + + WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled()); /* * The function is called for all descriptors regardless of which @@ -1051,9 +1098,10 @@ int lapic_can_unplug_cpu(void) { unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); + unsigned long flags; int ret = 0; - raw_spin_lock(&vector_lock); + raw_spin_lock_irqsave(&vector_lock, flags); tomove = irq_matrix_allocated(vector_matrix); avl = irq_matrix_available(vector_matrix, true); if (avl < tomove) { @@ -1068,7 +1116,7 @@ rsvd, avl); } out: - raw_spin_unlock(&vector_lock); + raw_spin_unlock_irqrestore(&vector_lock, flags); return ret; } #endif /* HOTPLUG_CPU */ diff --git a/kernel/arch/x86/kernel/apic/x2apic_cluster.c b/kernel/arch/x86/kernel/apic/x2apic_cluster.c index 7eec3c1..52fdf80 100644 --- a/kernel/arch/x86/kernel/apic/x2apic_cluster.c +++ b/kernel/arch/x86/kernel/apic/x2apic_cluster.c @@ -44,7 +44,7 @@ /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); - local_irq_save(flags); + flags = hard_local_irq_save(); tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); cpumask_copy(tmpmsk, mask); @@ -68,7 +68,7 @@ cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask); } - local_irq_restore(flags); + hard_local_irq_restore(flags); } static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) diff --git a/kernel/arch/x86/kernel/apic/x2apic_phys.c b/kernel/arch/x86/kernel/apic/x2apic_phys.c index 032a00e..72ebc33 100644 --- a/kernel/arch/x86/kernel/apic/x2apic_phys.c +++ b/kernel/arch/x86/kernel/apic/x2apic_phys.c @@ -58,7 +58,7 @@ /* x2apic MSRs are special and need a special fence: */ weak_wrmsr_fence(); - local_irq_save(flags); + flags = hard_local_irq_save(); this_cpu = smp_processor_id(); for_each_cpu(query_cpu, mask) { @@ -67,7 +67,7 @@ __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } - local_irq_restore(flags); + hard_local_irq_restore(flags); } static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) diff --git a/kernel/arch/x86/kernel/asm-offsets.c b/kernel/arch/x86/kernel/asm-offsets.c index 70b7154..09c539f 100644 --- a/kernel/arch/x86/kernel/asm-offsets.c +++ b/kernel/arch/x86/kernel/asm-offsets.c @@ -38,6 +38,9 @@ #endif BLANK(); +#ifdef CONFIG_IRQ_PIPELINE + DEFINE(OOB_stage_mask, STAGE_MASK); +#endif OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); BLANK(); diff --git a/kernel/arch/x86/kernel/cpu/acrn.c b/kernel/arch/x86/kernel/cpu/acrn.c index 0b2c039..7f0694b 100644 --- a/kernel/arch/x86/kernel/cpu/acrn.c +++ b/kernel/arch/x86/kernel/cpu/acrn.c @@ -35,7 +35,8 @@ static void (*acrn_intr_handler)(void); -DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, + sysvec_acrn_hv_callback) { struct pt_regs *old_regs = set_irq_regs(regs); diff --git a/kernel/arch/x86/kernel/cpu/mce/amd.c b/kernel/arch/x86/kernel/cpu/mce/amd.c index 09f7c65..d158d83 100644 --- a/kernel/arch/x86/kernel/cpu/mce/amd.c +++ b/kernel/arch/x86/kernel/cpu/mce/amd.c @@ -921,13 +921,18 @@ mce_log(&m); } +/* + * irq_pipeline: Deferred error events have NMI semantics wrt to + * pipelining, they can and should be handled immediately out of the + * IDT. + */ DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error) { trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); inc_irq_stat(irq_deferred_error_count); deferred_error_int_vector(); trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR); - ack_APIC_irq(); + __ack_APIC_irq(); } /* diff --git a/kernel/arch/x86/kernel/cpu/mce/core.c b/kernel/arch/x86/kernel/cpu/mce/core.c index 5cf1a02..e18dae3 100644 --- a/kernel/arch/x86/kernel/cpu/mce/core.c +++ b/kernel/arch/x86/kernel/cpu/mce/core.c @@ -1473,7 +1473,9 @@ /* If this triggers there is no way to recover. Die hard. */ BUG_ON(!on_thread_stack() || !user_mode(regs)); + hard_local_irq_enable(); queue_task_work(&m, msg, kill_it); + hard_local_irq_disable(); } else { /* diff --git a/kernel/arch/x86/kernel/cpu/mce/therm_throt.c b/kernel/arch/x86/kernel/cpu/mce/therm_throt.c index a7cd2d2..115dd0b 100644 --- a/kernel/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/kernel/arch/x86/kernel/cpu/mce/therm_throt.c @@ -614,13 +614,17 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; +/* + * irq_pipeline: MCE have NMI semantics wrt to pipelining, they can + * and should be handled immediately out of the IDT. + */ DEFINE_IDTENTRY_SYSVEC(sysvec_thermal) { trace_thermal_apic_entry(THERMAL_APIC_VECTOR); inc_irq_stat(irq_thermal_count); smp_thermal_vector(); trace_thermal_apic_exit(THERMAL_APIC_VECTOR); - ack_APIC_irq(); + __ack_APIC_irq(); } /* Thermal monitoring depends on APIC, ACPI and clock modulation */ diff --git a/kernel/arch/x86/kernel/cpu/mce/threshold.c b/kernel/arch/x86/kernel/cpu/mce/threshold.c index 6a059a0..a2515dc 100644 --- a/kernel/arch/x86/kernel/cpu/mce/threshold.c +++ b/kernel/arch/x86/kernel/cpu/mce/threshold.c @@ -27,5 +27,5 @@ inc_irq_stat(irq_threshold_count); mce_threshold_vector(); trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR); - ack_APIC_irq(); + __ack_APIC_irq(); } diff --git a/kernel/arch/x86/kernel/cpu/mshyperv.c b/kernel/arch/x86/kernel/cpu/mshyperv.c index 021cd06..ffa7ff0 100644 --- a/kernel/arch/x86/kernel/cpu/mshyperv.c +++ b/kernel/arch/x86/kernel/cpu/mshyperv.c @@ -41,7 +41,8 @@ static void (*hv_kexec_handler)(void); static void (*hv_crash_handler)(struct pt_regs *regs); -DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, + sysvec_hyperv_callback) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -77,7 +78,8 @@ * Routines to do per-architecture handling of stimer0 * interrupts when in Direct Mode */ -DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_STIMER0_VECTOR, + sysvec_hyperv_stimer0) { struct pt_regs *old_regs = set_irq_regs(regs); diff --git a/kernel/arch/x86/kernel/cpu/mtrr/generic.c b/kernel/arch/x86/kernel/cpu/mtrr/generic.c index a29997e..fc15869 100644 --- a/kernel/arch/x86/kernel/cpu/mtrr/generic.c +++ b/kernel/arch/x86/kernel/cpu/mtrr/generic.c @@ -450,13 +450,13 @@ { unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); prepare_set(); pat_init(); post_set(); - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* Grab all of the MTRR state for this CPU into *state */ @@ -797,7 +797,7 @@ unsigned long mask, count; unsigned long flags; - local_irq_save(flags); + flags = hard_local_irq_save(); prepare_set(); /* Actually set the state */ @@ -807,7 +807,7 @@ pat_init(); post_set(); - local_irq_restore(flags); + hard_local_irq_restore(flags); /* Use the atomic bitops to update the global mask */ for (count = 0; count < sizeof(mask) * 8; ++count) { @@ -836,7 +836,7 @@ vr = &mtrr_state.var_ranges[reg]; - local_irq_save(flags); + flags = hard_local_irq_save(); prepare_set(); if (size == 0) { @@ -857,7 +857,7 @@ } post_set(); - local_irq_restore(flags); + hard_local_irq_restore(flags); } int generic_validate_add_page(unsigned long base, unsigned long size, diff --git a/kernel/arch/x86/kernel/dumpstack.c b/kernel/arch/x86/kernel/dumpstack.c index 97aa900..0fe58ed 100644 --- a/kernel/arch/x86/kernel/dumpstack.c +++ b/kernel/arch/x86/kernel/dumpstack.c @@ -7,6 +7,7 @@ #include <linux/uaccess.h> #include <linux/utsname.h> #include <linux/hardirq.h> +#include <linux/irq_pipeline.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/ptrace.h> @@ -335,7 +336,7 @@ oops_enter(); /* racy, but better than risking deadlock. */ - raw_local_irq_save(flags); + flags = hard_local_irq_save(); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) @@ -365,7 +366,7 @@ if (!die_nest_count) /* Nest count reaches zero, release the lock. */ arch_spin_unlock(&die_lock); - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); oops_exit(); /* Executive summary in case the oops scrolled away */ @@ -394,6 +395,8 @@ { const char *pr = ""; + irq_pipeline_oops(); + /* Save the regs of the first oops for the executive summary later. */ if (!die_counter) exec_summary_regs = *regs; @@ -402,13 +405,14 @@ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; printk(KERN_DEFAULT - "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, + "%s: %04lx [#%d]%s%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, pr, IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? - (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); + (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "", + irqs_pipelined() ? " IRQ_PIPELINE" : ""); } NOKPROBE_SYMBOL(__die_header); diff --git a/kernel/arch/x86/kernel/fpu/core.c b/kernel/arch/x86/kernel/fpu/core.c index 571220a..49e2853 100644 --- a/kernel/arch/x86/kernel/fpu/core.c +++ b/kernel/arch/x86/kernel/fpu/core.c @@ -15,6 +15,7 @@ #include <linux/hardirq.h> #include <linux/pkeys.h> +#include <linux/cpuhotplug.h> #define CREATE_TRACE_POINTS #include <asm/trace/fpu.h> @@ -76,9 +77,10 @@ */ bool irq_fpu_usable(void) { - return !in_interrupt() || - interrupted_user_mode() || - interrupted_kernel_fpu_idle(); + return running_inband() && + (!in_interrupt() || + interrupted_user_mode() || + interrupted_kernel_fpu_idle()); } EXPORT_SYMBOL(irq_fpu_usable); @@ -123,10 +125,14 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask) { + unsigned long flags; + preempt_disable(); WARN_ON_FPU(!irq_fpu_usable()); WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); + + flags = hard_cond_local_irq_save(); this_cpu_write(in_kernel_fpu, true); @@ -139,6 +145,7 @@ */ copy_fpregs_to_fpstate(¤t->thread.fpu); } + __cpu_invalidate_fpregs_state(); /* Put sane initial values into the control registers. */ @@ -147,6 +154,8 @@ if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) asm volatile ("fninit"); + + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); @@ -166,9 +175,11 @@ */ void fpu__save(struct fpu *fpu) { + unsigned long flags; + WARN_ON_FPU(fpu != ¤t->thread.fpu); - fpregs_lock(); + flags = fpregs_lock(); trace_x86_fpu_before_save(fpu); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { @@ -178,7 +189,7 @@ } trace_x86_fpu_after_save(fpu); - fpregs_unlock(); + fpregs_unlock(flags); } /* @@ -214,6 +225,7 @@ { struct fpu *dst_fpu = &dst->thread.fpu; struct fpu *src_fpu = &src->thread.fpu; + unsigned long flags; dst_fpu->last_cpu = -1; @@ -236,14 +248,14 @@ * ( The function 'fails' in the FNSAVE case, which destroys * register contents so we have to load them back. ) */ - fpregs_lock(); + flags = fpregs_lock(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size); else if (!copy_fpregs_to_fpstate(dst_fpu)) copy_kernel_to_fpregs(&dst_fpu->state); - fpregs_unlock(); + fpregs_unlock(flags); set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD); @@ -321,7 +333,9 @@ */ void fpu__drop(struct fpu *fpu) { - preempt_disable(); + unsigned long flags; + + flags = hard_preempt_disable(); if (fpu == ¤t->thread.fpu) { /* Ignore delayed exceptions from user space */ @@ -333,7 +347,7 @@ trace_x86_fpu_dropped(fpu); - preempt_enable(); + hard_preempt_enable(flags); } /* @@ -361,15 +375,19 @@ */ static void fpu__clear(struct fpu *fpu, bool user_only) { + unsigned long flags; + WARN_ON_FPU(fpu != ¤t->thread.fpu); if (!static_cpu_has(X86_FEATURE_FPU)) { + flags = hard_cond_local_irq_save(); fpu__drop(fpu); fpu__initialize(fpu); + hard_cond_local_irq_restore(flags); return; } - fpregs_lock(); + flags = fpregs_lock(); if (user_only) { if (!fpregs_state_valid(fpu, smp_processor_id()) && @@ -382,7 +400,7 @@ } fpregs_mark_activate(); - fpregs_unlock(); + fpregs_unlock(flags); } void fpu__clear_user_states(struct fpu *fpu) @@ -400,10 +418,14 @@ */ void switch_fpu_return(void) { + unsigned long flags; + if (!static_cpu_has(X86_FEATURE_FPU)) return; + flags = hard_cond_local_irq_save(); __fpregs_load_activate(); + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(switch_fpu_return); @@ -503,3 +525,70 @@ */ return 0; } + +#ifdef CONFIG_DOVETAIL + +/* + * Holds the in-kernel fpu state when preempted by a task running on + * the out-of-band stage. + */ +static DEFINE_PER_CPU(struct fpu *, in_kernel_fpstate); + +static int fpu__init_kernel_fpstate(unsigned int cpu) +{ + struct fpu *fpu; + + fpu = kzalloc(sizeof(*fpu) + fpu_kernel_xstate_size, GFP_KERNEL); + if (fpu == NULL) + return -ENOMEM; + + this_cpu_write(in_kernel_fpstate, fpu); + fpstate_init(&fpu->state); + + return 0; +} + +static int fpu__drop_kernel_fpstate(unsigned int cpu) +{ + struct fpu *fpu = this_cpu_read(in_kernel_fpstate); + + kfree(fpu); + + return 0; +} + +void fpu__suspend_inband(void) +{ + struct fpu *kfpu = this_cpu_read(in_kernel_fpstate); + struct task_struct *tsk = current; + + if (kernel_fpu_disabled()) { + copy_fpregs_to_fpstate(kfpu); + __cpu_invalidate_fpregs_state(); + oob_fpu_set_preempt(&tsk->thread.fpu); + } +} + +void fpu__resume_inband(void) +{ + struct fpu *kfpu = this_cpu_read(in_kernel_fpstate); + struct task_struct *tsk = current; + + if (oob_fpu_preempted(&tsk->thread.fpu)) { + copy_kernel_to_fpregs(&kfpu->state); + __cpu_invalidate_fpregs_state(); + oob_fpu_clear_preempt(&tsk->thread.fpu); + } else if (!(tsk->flags & PF_KTHREAD) && + test_thread_flag(TIF_NEED_FPU_LOAD)) + switch_fpu_return(); +} + +static void __init fpu__init_dovetail(void) +{ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/dovetail:online", + fpu__init_kernel_fpstate, fpu__drop_kernel_fpstate); +} +core_initcall(fpu__init_dovetail); + +#endif diff --git a/kernel/arch/x86/kernel/fpu/signal.c b/kernel/arch/x86/kernel/fpu/signal.c index b7b92cd..20d04a3 100644 --- a/kernel/arch/x86/kernel/fpu/signal.c +++ b/kernel/arch/x86/kernel/fpu/signal.c @@ -61,11 +61,12 @@ struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; struct user_i387_ia32_struct env; struct _fpstate_32 __user *fp = buf; + unsigned long flags; - fpregs_lock(); + flags = fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) copy_fxregs_to_kernel(&tsk->thread.fpu); - fpregs_unlock(); + fpregs_unlock(flags); convert_from_fxsr(&env, tsk); @@ -165,6 +166,7 @@ { struct task_struct *tsk = current; int ia32_fxstate = (buf != buf_fx); + unsigned long flags; int ret; ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || @@ -186,14 +188,14 @@ * userland's stack frame which will likely succeed. If it does not, * resolve the fault in the user memory and try again. */ - fpregs_lock(); + flags = fpregs_lock(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) __fpregs_load_activate(); pagefault_disable(); ret = copy_fpregs_to_sigframe(buf_fx); pagefault_enable(); - fpregs_unlock(); + fpregs_unlock(flags); if (ret) { if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size)) @@ -286,6 +288,7 @@ struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; u64 user_xfeatures = 0; + unsigned long flags; int fx_only = 0; int ret = 0; @@ -337,7 +340,7 @@ * going through the kernel buffer with the enabled pagefault * handler. */ - fpregs_lock(); + flags = fpregs_lock(); pagefault_disable(); ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only); pagefault_enable(); @@ -360,7 +363,7 @@ copy_kernel_to_xregs(&fpu->state.xsave, xfeatures_mask_supervisor()); fpregs_mark_activate(); - fpregs_unlock(); + fpregs_unlock(flags); return 0; } @@ -382,7 +385,7 @@ if (test_thread_flag(TIF_NEED_FPU_LOAD)) __cpu_invalidate_fpregs_state(); - fpregs_unlock(); + fpregs_unlock(flags); } else { /* * For 32-bit frames with fxstate, copy the fxstate so it can @@ -400,7 +403,7 @@ * to be loaded again on return to userland (overriding last_cpu avoids * the optimisation). */ - fpregs_lock(); + flags = fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { @@ -413,7 +416,7 @@ set_thread_flag(TIF_NEED_FPU_LOAD); } __fpu_invalidate_fpregs_state(fpu); - fpregs_unlock(); + fpregs_unlock(flags); if (use_xsave() && !fx_only) { u64 init_bv = xfeatures_mask_user() & ~user_xfeatures; @@ -425,7 +428,7 @@ sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures, fx_only); - fpregs_lock(); + flags = fpregs_lock(); if (unlikely(init_bv)) copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); @@ -446,7 +449,7 @@ sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures, fx_only); - fpregs_lock(); + flags = fpregs_lock(); if (use_xsave()) { u64 init_bv; @@ -460,14 +463,14 @@ if (ret) goto out; - fpregs_lock(); + flags = fpregs_lock(); ret = copy_kernel_to_fregs_err(&fpu->state.fsave); } if (!ret) fpregs_mark_activate(); else fpregs_deactivate(fpu); - fpregs_unlock(); + fpregs_unlock(flags); out: if (ret) diff --git a/kernel/arch/x86/kernel/hpet.c b/kernel/arch/x86/kernel/hpet.c index 574df24..f44da6c 100644 --- a/kernel/arch/x86/kernel/hpet.c +++ b/kernel/arch/x86/kernel/hpet.c @@ -406,7 +406,7 @@ evt->set_next_event = hpet_clkevt_set_next_event; evt->set_state_shutdown = hpet_clkevt_set_state_shutdown; - evt->features = CLOCK_EVT_FEAT_ONESHOT; + evt->features = CLOCK_EVT_FEAT_ONESHOT|CLOCK_EVT_FEAT_PIPELINE; if (hc->boot_cfg & HPET_TN_PERIODIC) { evt->features |= CLOCK_EVT_FEAT_PERIODIC; evt->set_state_periodic = hpet_clkevt_set_state_periodic; @@ -519,7 +519,7 @@ return IRQ_HANDLED; } - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -702,7 +702,7 @@ if (arch_spin_is_locked(&old.lock)) goto contended; - local_irq_save(flags); + flags = hard_local_irq_save(); if (arch_spin_trylock(&hpet.lock)) { new.value = hpet_readl(HPET_COUNTER); /* @@ -710,10 +710,10 @@ */ WRITE_ONCE(hpet.value, new.value); arch_spin_unlock(&hpet.lock); - local_irq_restore(flags); + hard_local_irq_restore(flags); return (u64)new.value; } - local_irq_restore(flags); + hard_local_irq_restore(flags); contended: /* diff --git a/kernel/arch/x86/kernel/i8259.c b/kernel/arch/x86/kernel/i8259.c index 282b4ee..6abdbd0 100644 --- a/kernel/arch/x86/kernel/i8259.c +++ b/kernel/arch/x86/kernel/i8259.c @@ -33,7 +33,7 @@ static void init_8259A(int auto_eoi); static int i8259A_auto_eoi; -DEFINE_RAW_SPINLOCK(i8259A_lock); +DEFINE_HARD_SPINLOCK(i8259A_lock); /* * 8259A PIC functions to handle ISA devices: @@ -227,6 +227,7 @@ .irq_disable = disable_8259A_irq, .irq_unmask = enable_8259A_irq, .irq_mask_ack = mask_and_ack_8259A, + .flags = IRQCHIP_PIPELINE_SAFE, }; static char irq_trigger[2]; diff --git a/kernel/arch/x86/kernel/idt.c b/kernel/arch/x86/kernel/idt.c index ee1a283..fb4333d 100644 --- a/kernel/arch/x86/kernel/idt.c +++ b/kernel/arch/x86/kernel/idt.c @@ -117,6 +117,10 @@ INTG(CALL_FUNCTION_SINGLE_VECTOR, asm_sysvec_call_function_single), INTG(IRQ_MOVE_CLEANUP_VECTOR, asm_sysvec_irq_move_cleanup), INTG(REBOOT_VECTOR, asm_sysvec_reboot), +#ifdef CONFIG_IRQ_PIPELINE + INTG(RESCHEDULE_OOB_VECTOR, asm_sysvec_reschedule_oob_ipi), + INTG(TIMER_OOB_VECTOR, asm_sysvec_timer_oob_ipi), +#endif #endif #ifdef CONFIG_X86_THERMAL_VECTOR diff --git a/kernel/arch/x86/kernel/irq.c b/kernel/arch/x86/kernel/irq.c index ce904c8..753e4d6 100644 --- a/kernel/arch/x86/kernel/irq.c +++ b/kernel/arch/x86/kernel/irq.c @@ -4,6 +4,7 @@ */ #include <linux/cpu.h> #include <linux/interrupt.h> +#include <linux/irq_pipeline.h> #include <linux/kernel_stat.h> #include <linux/of.h> #include <linux/seq_file.h> @@ -48,7 +49,7 @@ * completely. * But only ack when the APIC is enabled -AK */ - ack_APIC_irq(); + __ack_APIC_irq(); } #define irq_stats(x) (&per_cpu(irq_stat, x)) @@ -235,8 +236,11 @@ /* * common_interrupt() handles all normal device IRQ's (the special SMP * cross-CPU interrupts have their own entry points). + * + * Compiled out if CONFIG_IRQ_PIPELINE is enabled, replaced by + * arch_handle_irq(). */ -DEFINE_IDTENTRY_IRQ(common_interrupt) +DEFINE_IDTENTRY_IRQ_PIPELINED(common_interrupt) { struct pt_regs *old_regs = set_irq_regs(regs); struct irq_desc *desc; @@ -268,7 +272,8 @@ /* * Handler for X86_PLATFORM_IPI_VECTOR. */ -DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(X86_PLATFORM_IPI_VECTOR, + sysvec_x86_platform_ipi) { struct pt_regs *old_regs = set_irq_regs(regs); @@ -300,7 +305,8 @@ /* * Handler for POSTED_INTERRUPT_VECTOR. */ -DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi) +DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(POSTED_INTR_VECTOR, + sysvec_kvm_posted_intr_ipi) { ack_APIC_irq(); inc_irq_stat(kvm_posted_intr_ipis); @@ -309,7 +315,8 @@ /* * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. */ -DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_WAKEUP_VECTOR, + sysvec_kvm_posted_intr_wakeup_ipi) { ack_APIC_irq(); inc_irq_stat(kvm_posted_intr_wakeup_ipis); @@ -319,7 +326,8 @@ /* * Handler for POSTED_INTERRUPT_NESTED_VECTOR. */ -DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi) +DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(POSTED_INTR_NESTED_VECTOR, + sysvec_kvm_posted_intr_nested_ipi) { ack_APIC_irq(); inc_irq_stat(kvm_posted_intr_nested_ipis); diff --git a/kernel/arch/x86/kernel/irq_pipeline.c b/kernel/arch/x86/kernel/irq_pipeline.c new file mode 100644 index 0000000..725a326 --- /dev/null +++ b/kernel/arch/x86/kernel/irq_pipeline.c @@ -0,0 +1,387 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/irq.h> +#include <linux/irq_pipeline.h> +#include <asm/irqdomain.h> +#include <asm/apic.h> +#include <asm/traps.h> +#include <asm/irq_work.h> +#include <asm/mshyperv.h> +#include <asm/idtentry.h> + +static struct irq_domain *sipic_domain; + +static void sipic_irq_noop(struct irq_data *data) { } + +static unsigned int sipic_irq_noop_ret(struct irq_data *data) +{ + return 0; +} + +static struct irq_chip sipic_chip = { + .name = "SIPIC", + .irq_startup = sipic_irq_noop_ret, + .irq_shutdown = sipic_irq_noop, + .irq_enable = sipic_irq_noop, + .irq_disable = sipic_irq_noop, + .flags = IRQCHIP_PIPELINE_SAFE | IRQCHIP_SKIP_SET_WAKE, +}; + +void handle_apic_irq(struct irq_desc *desc) +{ + if (WARN_ON_ONCE(irq_pipeline_debug() && !on_pipeline_entry())) + return; + + /* + * MCE events are non-maskable therefore their in-band + * handlers have to be oob-compatible by construction. Those + * handlers run immediately out of the IDT for this reason as + * well. We won't see them here since they are not routed via + * arch_handle_irq() -> generic_pipeline_irq(). + * + * All we need to do at this stage is to acknowledge other + * APIC events, then pipeline the corresponding interrupt from + * our synthetic controller chip (SIPIC). + */ + __ack_APIC_irq(); + + handle_oob_irq(desc); +} + +void irq_send_oob_ipi(unsigned int ipi, + const struct cpumask *cpumask) +{ + apic->send_IPI_mask_allbutself(cpumask, apicm_irq_vector(ipi)); +} +EXPORT_SYMBOL_GPL(irq_send_oob_ipi); + +static void do_sysvec_inband(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct pt_regs *regs = get_irq_regs(); + int vector = apicm_irq_vector(irq); + + /* + * This code only sees pipelined sysvec events tagged with + * DEFINE_IDTENTRY_SYSVEC_PIPELINED: + * + * arch_handle_irq(irq) + * generic_pipeline_irq(irq) + * handle_apic_irq(irq) + * handle_oob_irq(irq) + * [...irq_post_inband...] + * + * arch_do_IRQ_pipelined(desc) + * <switch_to_irqstack> + * | + * v + * do_sysvec_inband(desc) + * + * System vectors which are still tagged as + * DEFINE_IDTENTRY_SYSVEC/DEFINE_IDTENTRY_SYSVEC_SIMPLE are + * directly dispatched out of the IDT, assuming their handler + * is oob-safe (like NMI handlers) therefore never reach this + * in-band stage handler. + */ + + switch (vector) { +#ifdef CONFIG_SMP + case RESCHEDULE_VECTOR: + __sysvec_reschedule_ipi(regs); + break; + case CALL_FUNCTION_VECTOR: + __sysvec_call_function(regs); + break; + case CALL_FUNCTION_SINGLE_VECTOR: + __sysvec_call_function_single(regs); + break; + case REBOOT_VECTOR: + __sysvec_reboot(regs); + break; +#endif + case X86_PLATFORM_IPI_VECTOR: + __sysvec_x86_platform_ipi(regs); + break; + case IRQ_WORK_VECTOR: + __sysvec_irq_work(regs); + break; +#ifdef CONFIG_HAVE_KVM + case POSTED_INTR_VECTOR: + __sysvec_kvm_posted_intr_ipi(regs); + break; + case POSTED_INTR_WAKEUP_VECTOR: + __sysvec_kvm_posted_intr_wakeup_ipi(regs); + break; + case POSTED_INTR_NESTED_VECTOR: + __sysvec_kvm_posted_intr_nested_ipi(regs); + break; +#endif +#ifdef CONFIG_HYPERV + case HYPERVISOR_CALLBACK_VECTOR: + __sysvec_hyperv_callback(regs); + break; + case HYPERV_REENLIGHTENMENT_VECTOR: + __sysvec_hyperv_reenlightenment(regs); + break; + case HYPERV_STIMER0_VECTOR: + __sysvec_hyperv_stimer0(regs); + break; +#endif +#ifdef CONFIG_ACRN_GUEST + case HYPERVISOR_CALLBACK_VECTOR: + __sysvec_acrn_hv_callback(regs); + break; +#endif +#ifdef CONFIG_XEN_PVHVM + case HYPERVISOR_CALLBACK_VECTOR: + __sysvec_xen_hvm_callback(regs); + break; +#endif + case LOCAL_TIMER_VECTOR: + __sysvec_apic_timer_interrupt(regs); + break; + default: + printk_once(KERN_ERR "irq_pipeline: unexpected event" + " on vector #%.2x (irq=%u)", vector, irq); + } +} + +static irqentry_state_t pipeline_enter_rcu(void) +{ + irqentry_state_t state = { + .exit_rcu = false, + .stage_info = IRQENTRY_INBAND_UNSTALLED, + }; + + if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) { + rcu_irq_enter(); + state.exit_rcu = true; + } else { + rcu_irq_enter_check_tick(); + } + + return state; +} + +static void pipeline_exit_rcu(irqentry_state_t state) +{ + if (state.exit_rcu) + rcu_irq_exit(); +} + +void arch_do_IRQ_pipelined(struct irq_desc *desc) +{ + struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs); + struct pt_regs *old_regs = set_irq_regs(regs); + irqentry_state_t state; + + /* Emulate a kernel entry. */ + state = pipeline_enter_rcu(); + irq_enter_rcu(); + + if (desc->irq_data.domain == sipic_domain) + run_irq_on_irqstack_cond(do_sysvec_inband, desc, regs); + else + run_irq_on_irqstack_cond(desc->handle_irq, desc, regs); + + irq_exit_rcu(); + pipeline_exit_rcu(state); + + set_irq_regs(old_regs); +} + +void arch_handle_irq(struct pt_regs *regs, u8 vector, bool irq_movable) +{ + struct irq_desc *desc; + unsigned int irq; + + if (vector >= FIRST_SYSTEM_VECTOR) { + irq = apicm_vector_irq(vector); + } else { + desc = __this_cpu_read(vector_irq[vector]); + if (unlikely(IS_ERR_OR_NULL(desc))) { + __ack_APIC_irq(); + + if (desc == VECTOR_UNUSED) { + pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n", + __func__, smp_processor_id(), + vector); + } else { + __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); + } + return; + } + if (irqd_is_setaffinity_pending(&desc->irq_data)) { + raw_spin_lock(&desc->lock); + if (irq_movable) + irqd_clr_move_blocked(&desc->irq_data); + else + irqd_set_move_blocked(&desc->irq_data); + raw_spin_unlock(&desc->lock); + } + irq = irq_desc_get_irq(desc); + } + + generic_pipeline_irq(irq, regs); +} + +noinstr void arch_pipeline_entry(struct pt_regs *regs, u8 vector) +{ + struct irq_stage_data *prevd; + irqentry_state_t state; + + /* + * The tricky one: we distinguish the following cases: + * + * [1] entry from oob context, either kernel or user code was + * preempted by the IRQ, the in-band (virtual) interrupt state + * is 'undefined' (could be either stalled/unstalled, it is + * not relevant). + * + * [2] entry from in-band context while the stage is stalled, + * which means that some kernel code was preempted by the IRQ + * since in-band user code cannot run with interrupts + * (virtually) disabled. + * + * [3] entry from in-band context while the stage is + * unstalled: the common case for IRQ entry. Kernel or user + * code may have been preempted, we handle the event + * identically. + * + * [1] and [2] are processed almost the same way, except for + * one key aspect: the potential stage demotion of the + * preempted task which originally entered [1] on the oob + * stage, then left it for the in-band stage as a result of + * handling the IRQ (such demotion normally happens during + * handle_irq_pipelined_finish() if required). In this + * particular case, we want to run the common IRQ epilogue + * code before returning to user mode, so that all pending + * in-band work (_TIF_WORK_*) is carried out for the task + * which is about to exit kernel mode. + * + * If the task runs in-band at the exit point and a user mode + * context was preempted, then case [2] is excluded by + * definition so we know for sure that we just observed a + * stage demotion, therefore we have to run the work loop by + * calling irqentry_exit_to_user_mode(). + */ + if (unlikely(running_oob() || irqs_disabled())) { + instrumentation_begin(); + prevd = handle_irq_pipelined_prepare(regs); + arch_handle_irq(regs, vector, false); + kvm_set_cpu_l1tf_flush_l1d(); + handle_irq_pipelined_finish(prevd, regs); + if (running_inband() && user_mode(regs)) { + stall_inband_nocheck(); + irqentry_exit_to_user_mode(regs); + } + instrumentation_end(); + return; + } + + /* In-band on entry, accepting interrupts. */ + state = irqentry_enter(regs); + instrumentation_begin(); + /* Prep for handling, switching oob. */ + prevd = handle_irq_pipelined_prepare(regs); + arch_handle_irq(regs, vector, true); + kvm_set_cpu_l1tf_flush_l1d(); + /* irqentry_enter() stalled the in-band stage. */ + trace_hardirqs_on(); + unstall_inband_nocheck(); + handle_irq_pipelined_finish(prevd, regs); + stall_inband_nocheck(); + trace_hardirqs_off(); + instrumentation_end(); + irqentry_exit(regs, state); +} + +static int sipic_irq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_percpu_devid(irq); + irq_set_chip_and_handler(irq, &sipic_chip, handle_apic_irq); + + return 0; +} + +static struct irq_domain_ops sipic_domain_ops = { + .map = sipic_irq_map, +}; + +static void create_x86_apic_domain(void) +{ + sipic_domain = irq_domain_add_simple(NULL, NR_APIC_VECTORS, + FIRST_SYSTEM_IRQ, + &sipic_domain_ops, NULL); +} + +#ifdef CONFIG_SMP + +DEFINE_IDTENTRY_SYSVEC_PIPELINED(RESCHEDULE_OOB_VECTOR, + sysvec_reschedule_oob_ipi) +{ /* In-band handler is unused. */ } + +DEFINE_IDTENTRY_SYSVEC_PIPELINED(TIMER_OOB_VECTOR, + sysvec_timer_oob_ipi) +{ /* In-band handler is unused. */ } + +void handle_irq_move_cleanup(struct irq_desc *desc) +{ + if (on_pipeline_entry()) { + /* 1. on receipt from hardware. */ + __ack_APIC_irq(); + handle_oob_irq(desc); + } else { + /* 2. in-band delivery. */ + __sysvec_irq_move_cleanup(NULL); + } +} + +static void smp_setup(void) +{ + int irq; + + /* + * The IRQ cleanup event must be pipelined to the inband + * stage, so we need a valid IRQ descriptor for it. Since we + * still are in the early boot stage on CPU0, we ask for a 1:1 + * mapping between the vector number and IRQ number, to make + * things easier for us later on. + */ + irq = irq_alloc_desc_at(IRQ_MOVE_CLEANUP_VECTOR, 0); + WARN_ON(IRQ_MOVE_CLEANUP_VECTOR != irq); + /* + * Set up the vector_irq[] mapping array for the boot CPU, + * other CPUs will copy this entry when their APIC is going + * online (see lapic_online()). + */ + per_cpu(vector_irq, 0)[irq] = irq_to_desc(irq); + + irq_set_chip_and_handler(irq, &dummy_irq_chip, + handle_irq_move_cleanup); +} + +#else + +static void smp_setup(void) { } + +#endif + +void __init arch_irq_pipeline_init(void) +{ + /* + * Create an IRQ domain for mapping APIC system interrupts + * (in-band and out-of-band), with fixed sirq numbers starting + * from FIRST_SYSTEM_IRQ. Upon receipt of a system interrupt, + * the corresponding sirq is injected into the pipeline. + */ + create_x86_apic_domain(); + + smp_setup(); +} diff --git a/kernel/arch/x86/kernel/irq_work.c b/kernel/arch/x86/kernel/irq_work.c index 890d477..f2c8d14 100644 --- a/kernel/arch/x86/kernel/irq_work.c +++ b/kernel/arch/x86/kernel/irq_work.c @@ -14,7 +14,8 @@ #include <linux/interrupt.h> #ifdef CONFIG_X86_LOCAL_APIC -DEFINE_IDTENTRY_SYSVEC(sysvec_irq_work) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_WORK_VECTOR, + sysvec_irq_work) { ack_APIC_irq(); trace_irq_work_entry(IRQ_WORK_VECTOR); diff --git a/kernel/arch/x86/kernel/kvm.c b/kernel/arch/x86/kernel/kvm.c index fe9babe..6988375 100644 --- a/kernel/arch/x86/kernel/kvm.c +++ b/kernel/arch/x86/kernel/kvm.c @@ -255,12 +255,15 @@ { u32 flags = kvm_read_and_reset_apf_flags(); irqentry_state_t state; + unsigned long irqflags; if (!flags) return false; state = irqentry_enter(regs); + oob_trap_notify(X86_TRAP_PF, regs); instrumentation_begin(); + irqflags = hard_cond_local_irq_save(); /* * If the host managed to inject an async #PF into an interrupt @@ -279,7 +282,9 @@ WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags); } + hard_cond_local_irq_restore(irqflags); instrumentation_end(); + oob_trap_unwind(X86_TRAP_PF, regs); irqentry_exit(regs, state); return true; } @@ -478,6 +483,9 @@ static void kvm_guest_cpu_offline(bool shutdown) { + unsigned long flags; + + flags = hard_local_irq_save(); kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); @@ -485,15 +493,16 @@ if (!shutdown) apf_task_wake_all(); kvmclock_disable(); + hard_local_irq_restore(flags); } static int kvm_cpu_online(unsigned int cpu) { unsigned long flags; - local_irq_save(flags); + local_irq_save_full(flags); kvm_guest_cpu_init(); - local_irq_restore(flags); + local_irq_restore_full(flags); return 0; } @@ -907,7 +916,7 @@ if (in_nmi()) return; - local_irq_save(flags); + flags = hard_local_irq_save(); if (READ_ONCE(*ptr) != val) goto out; @@ -923,7 +932,7 @@ safe_halt(); out: - local_irq_restore(flags); + hard_local_irq_restore(flags); } #ifdef CONFIG_X86_32 diff --git a/kernel/arch/x86/kernel/nmi.c b/kernel/arch/x86/kernel/nmi.c index 2ef961c..bf0766f 100644 --- a/kernel/arch/x86/kernel/nmi.c +++ b/kernel/arch/x86/kernel/nmi.c @@ -473,6 +473,10 @@ static DEFINE_PER_CPU(unsigned long, nmi_cr2); static DEFINE_PER_CPU(unsigned long, nmi_dr7); +/* + * IRQ pipeline: fixing up the virtual IRQ state makes no sense on + * NMI. + */ DEFINE_IDTENTRY_RAW(exc_nmi) { irqentry_state_t irq_state; diff --git a/kernel/arch/x86/kernel/process.c b/kernel/arch/x86/kernel/process.c index 5e17c39..a78266b 100644 --- a/kernel/arch/x86/kernel/process.c +++ b/kernel/arch/x86/kernel/process.c @@ -598,9 +598,9 @@ unsigned long flags; /* Forced update. Make sure all relevant TIF flags are different */ - local_irq_save(flags); + flags = hard_local_irq_save(); __speculation_ctrl_update(~tif, tif); - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* Called from seccomp/prctl update */ @@ -700,6 +700,9 @@ /* * We use this if we don't have any better idle routine.. + * + * IRQ pipeline: safe_halt() returns with hard irqs on, caller does + * not need to force enable. */ void __cpuidle default_idle(void) { @@ -722,7 +725,7 @@ void stop_this_cpu(void *dummy) { - local_irq_disable(); + hard_local_irq_disable(); /* * Remove this CPU: */ @@ -822,11 +825,14 @@ } __monitor((void *)¤t_thread_info()->flags, 0, 0); - if (!need_resched()) + if (!need_resched()) { __sti_mwait(0, 0); - else + } else { + hard_cond_local_irq_enable(); raw_local_irq_enable(); + } } else { + hard_cond_local_irq_enable(); raw_local_irq_enable(); } __current_clr_polling(); diff --git a/kernel/arch/x86/kernel/process_64.c b/kernel/arch/x86/kernel/process_64.c index ad3f82a..65d6171 100644 --- a/kernel/arch/x86/kernel/process_64.c +++ b/kernel/arch/x86/kernel/process_64.c @@ -272,9 +272,9 @@ unsigned long flags; /* Interrupts need to be off for FSGSBASE */ - local_irq_save(flags); + local_irq_save_full(flags); save_fsgs(current); - local_irq_restore(flags); + local_irq_restore_full(flags); } #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL_GPL(current_save_fsgs); @@ -410,9 +410,9 @@ if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; - local_irq_save(flags); + local_irq_save_full(flags); gsbase = __rdgsbase_inactive(); - local_irq_restore(flags); + local_irq_restore_full(flags); } else { rdmsrl(MSR_KERNEL_GS_BASE, gsbase); } @@ -425,9 +425,9 @@ if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; - local_irq_save(flags); + local_irq_save_full(flags); __wrgsbase_inactive(gsbase); - local_irq_restore(flags); + local_irq_restore_full(flags); } else { wrmsrl(MSR_KERNEL_GS_BASE, gsbase); } @@ -537,8 +537,17 @@ struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); + /* + * Dovetail: Switching context on the out-of-band stage is + * legit, and we may have preempted an in-band (soft)irq + * handler earlier. Since oob handlers never switch stack, + * make sure to restrict the following test to in-band + * callers. + */ WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && - this_cpu_read(irq_count) != -1); + running_inband() && this_cpu_read(irq_count) != -1); + + WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled()); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_p, cpu); @@ -719,6 +728,7 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) { + unsigned long flags; int ret = 0; switch (option) { @@ -726,7 +736,7 @@ if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; - preempt_disable(); + flags = hard_preempt_disable(); /* * ARCH_SET_GS has always overwritten the index * and the base. Zero is the most sensible value @@ -747,7 +757,7 @@ task->thread.gsindex = 0; x86_gsbase_write_task(task, arg2); } - preempt_enable(); + hard_preempt_enable(flags); break; } case ARCH_SET_FS: { @@ -758,7 +768,7 @@ if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; - preempt_disable(); + flags = hard_preempt_disable(); /* * Set the selector to 0 for the same reason * as %gs above. @@ -776,7 +786,7 @@ task->thread.fsindex = 0; x86_fsbase_write_task(task, arg2); } - preempt_enable(); + hard_preempt_enable(flags); break; } case ARCH_GET_FS: { diff --git a/kernel/arch/x86/kernel/smp.c b/kernel/arch/x86/kernel/smp.c index eff4ce3..c4684db 100644 --- a/kernel/arch/x86/kernel/smp.c +++ b/kernel/arch/x86/kernel/smp.c @@ -131,7 +131,7 @@ /* * this function calls the 'stop' function on all other CPUs in the system. */ -DEFINE_IDTENTRY_SYSVEC(sysvec_reboot) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(REBOOT_VECTOR, sysvec_reboot) { ack_APIC_irq(); cpu_emergency_vmxoff(); @@ -212,17 +212,18 @@ udelay(1); } - local_irq_save(flags); + flags = hard_local_irq_save(); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); - local_irq_restore(flags); + hard_local_irq_restore(flags); } /* * Reschedule call back. KVM uses this interrupt to force a cpu out of * guest mode. */ -DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi) +DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(RESCHEDULE_VECTOR, + sysvec_reschedule_ipi) { ack_APIC_irq(); trace_reschedule_entry(RESCHEDULE_VECTOR); @@ -231,7 +232,8 @@ trace_reschedule_exit(RESCHEDULE_VECTOR); } -DEFINE_IDTENTRY_SYSVEC(sysvec_call_function) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_VECTOR, + sysvec_call_function) { ack_APIC_irq(); trace_call_function_entry(CALL_FUNCTION_VECTOR); @@ -240,7 +242,8 @@ trace_call_function_exit(CALL_FUNCTION_VECTOR); } -DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_SINGLE_VECTOR, + sysvec_call_function_single) { ack_APIC_irq(); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); diff --git a/kernel/arch/x86/kernel/smpboot.c b/kernel/arch/x86/kernel/smpboot.c index e8e5515..e9a7b3c 100644 --- a/kernel/arch/x86/kernel/smpboot.c +++ b/kernel/arch/x86/kernel/smpboot.c @@ -258,7 +258,7 @@ x86_platform.nmi_init(); /* enable local interrupts */ - local_irq_enable(); + local_irq_enable_full(); x86_cpuinit.setup_percpu_clockev(); @@ -1133,7 +1133,6 @@ { int apicid = apic->cpu_present_to_apicid(cpu); int cpu0_nmi_registered = 0; - unsigned long flags; int err, ret = 0; lockdep_assert_irqs_enabled(); @@ -1184,9 +1183,9 @@ * Check TSC synchronization with the AP (keep irqs disabled * while doing so): */ - local_irq_save(flags); + local_irq_disable_full(); check_tsc_sync_source(cpu); - local_irq_restore(flags); + local_irq_enable_full(); while (!cpu_online(cpu)) { cpu_relax(); @@ -1654,7 +1653,7 @@ /* * With physical CPU hotplug, we should halt the cpu */ - local_irq_disable(); + local_irq_disable_full(); } /** diff --git a/kernel/arch/x86/kernel/time.c b/kernel/arch/x86/kernel/time.c index e42faa7..874fd2e 100644 --- a/kernel/arch/x86/kernel/time.c +++ b/kernel/arch/x86/kernel/time.c @@ -54,7 +54,7 @@ */ static irqreturn_t timer_interrupt(int irq, void *dev_id) { - global_clock_event->event_handler(global_clock_event); + clockevents_handle_event(global_clock_event); return IRQ_HANDLED; } diff --git a/kernel/arch/x86/kernel/traps.c b/kernel/arch/x86/kernel/traps.c index 2a39a2d..8ac9772 100644 --- a/kernel/arch/x86/kernel/traps.c +++ b/kernel/arch/x86/kernel/traps.c @@ -74,14 +74,22 @@ static inline void cond_local_irq_enable(struct pt_regs *regs) { - if (regs->flags & X86_EFLAGS_IF) - local_irq_enable(); + if (regs->flags & X86_EFLAGS_IF) { + if (running_inband()) + local_irq_enable_full(); + else + hard_local_irq_enable(); + } } static inline void cond_local_irq_disable(struct pt_regs *regs) { - if (regs->flags & X86_EFLAGS_IF) - local_irq_disable(); + if (regs->flags & X86_EFLAGS_IF) { + if (running_inband()) + local_irq_disable_full(); + else + hard_local_irq_disable(); + } } __always_inline int is_valid_bugaddr(unsigned long addr) @@ -148,6 +156,39 @@ } } +static __always_inline +bool mark_trap_entry(int trapnr, struct pt_regs *regs) +{ + oob_trap_notify(trapnr, regs); + + if (likely(running_inband())) { + hard_cond_local_irq_enable(); + return true; + } + + return false; +} + +static __always_inline +void mark_trap_exit(int trapnr, struct pt_regs *regs) +{ + oob_trap_unwind(trapnr, regs); + hard_cond_local_irq_disable(); +} + +static __always_inline +bool mark_trap_entry_raw(int trapnr, struct pt_regs *regs) +{ + oob_trap_notify(trapnr, regs); + return running_inband(); +} + +static __always_inline +void mark_trap_exit_raw(int trapnr, struct pt_regs *regs) +{ + oob_trap_unwind(trapnr, regs); +} + static void do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, int sicode, void __user *addr) @@ -171,12 +212,17 @@ { RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); + if (!mark_trap_entry(trapnr, regs)) + return; + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { cond_local_irq_enable(regs); do_trap(trapnr, signr, str, regs, error_code, sicode, addr); cond_local_irq_disable(regs); } + + mark_trap_exit(trapnr, regs); } /* @@ -230,14 +276,22 @@ * Since we're emulating a CALL with exceptions, restore the interrupt * state to what it was at the exception site. */ - if (regs->flags & X86_EFLAGS_IF) - raw_local_irq_enable(); + if (regs->flags & X86_EFLAGS_IF) { + if (running_oob()) + hard_local_irq_enable(); + else + local_irq_enable_full(); + } if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) { regs->ip += LEN_UD2; handled = true; } - if (regs->flags & X86_EFLAGS_IF) - raw_local_irq_disable(); + if (regs->flags & X86_EFLAGS_IF) { + if (running_oob()) + hard_local_irq_disable(); + else + local_irq_disable_full(); + } instrumentation_end(); return handled; @@ -251,15 +305,26 @@ * We use UD2 as a short encoding for 'CALL __WARN', as such * handle it before exception entry to avoid recursive WARN * in case exception entry is the one triggering WARNs. + * + * dovetail: handle_bug() may run oob, so we do not downgrade + * in-band upon a failed __WARN assertion since it might have + * tripped in a section of code which would not be happy to + * switch stage. However, anything else should be notified to + * the core, because the kernel execution might be about to + * stop, so we'd need to switch in-band to get any output + * before this happens. */ if (!user_mode(regs) && handle_bug(regs)) return; - state = irqentry_enter(regs); - instrumentation_begin(); - handle_invalid_op(regs); - instrumentation_end(); - irqentry_exit(regs, state); + if (mark_trap_entry_raw(X86_TRAP_UD, regs)) { + state = irqentry_enter(regs); + instrumentation_begin(); + handle_invalid_op(regs); + instrumentation_end(); + irqentry_exit(regs, state); + mark_trap_exit_raw(X86_TRAP_UD, regs); + } } DEFINE_IDTENTRY(exc_coproc_segment_overrun) @@ -290,8 +355,11 @@ { char *str = "alignment check"; - if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) + if (!mark_trap_entry(X86_TRAP_AC, regs)) return; + + if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) + goto mark_exit; if (!user_mode(regs)) die("Split lock detected\n", regs, error_code); @@ -306,6 +374,9 @@ out: local_irq_disable(); + +mark_exit: + mark_trap_exit(X86_TRAP_AC, regs); } #ifdef CONFIG_VMAP_STACK @@ -341,6 +412,9 @@ * * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs * to be read before doing anything else. + * + * Dovetail: do not even ask the companion core to try restoring the + * in-band stage on double-fault, this would be a lost cause. */ DEFINE_IDTENTRY_DF(exc_double_fault) { @@ -465,9 +539,12 @@ DEFINE_IDTENTRY(exc_bounds) { + if (!mark_trap_entry(X86_TRAP_BR, regs)) + return; + if (notify_die(DIE_TRAP, "bounds", regs, 0, X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) - return; + goto out; cond_local_irq_enable(regs); if (!user_mode(regs)) @@ -476,6 +553,8 @@ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL); cond_local_irq_disable(regs); +out: + mark_trap_exit(X86_TRAP_BR, regs); } enum kernel_gp_hint { @@ -570,9 +649,9 @@ } if (v8086_mode(regs)) { - local_irq_enable(); + local_irq_enable_full(); handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); - local_irq_disable(); + local_irq_disable_full(); return; } @@ -585,9 +664,12 @@ tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; + if (!mark_trap_entry(X86_TRAP_GP, regs)) + goto exit; + show_signal(tsk, SIGSEGV, "", desc, regs, error_code); force_sig(SIGSEGV); - goto exit; + goto mark_exit; } if (fixup_exception(regs, X86_TRAP_GP, error_code, 0)) @@ -605,9 +687,12 @@ kprobe_fault_handler(regs, X86_TRAP_GP)) goto exit; + if (!mark_trap_entry(X86_TRAP_GP, regs)) + goto exit; + ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV); if (ret == NOTIFY_STOP) - goto exit; + goto mark_exit; if (error_code) snprintf(desc, sizeof(desc), "segment-related " GPFSTR); @@ -629,6 +714,8 @@ die_addr(desc, regs, error_code, gp_addr); +mark_exit: + mark_trap_exit(X86_TRAP_GP, regs); exit: cond_local_irq_disable(regs); } @@ -673,6 +760,9 @@ if (poke_int3_handler(regs)) return; + if (!mark_trap_entry_raw(X86_TRAP_BP, regs)) + return; + /* * irqentry_enter_from_user_mode() uses static_branch_{,un}likely() * and therefore can trigger INT3, hence poke_int3_handler() must @@ -695,6 +785,8 @@ instrumentation_end(); irqentry_nmi_exit(regs, irq_state); } + + mark_trap_exit_raw(X86_TRAP_BP, regs); } #ifdef CONFIG_X86_64 @@ -999,7 +1091,7 @@ goto out; /* It's safe to allow irq's after DR6 has been saved */ - local_irq_enable(); + local_irq_enable_full(); if (v8086_mode(regs)) { handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB); @@ -1012,7 +1104,7 @@ send_sigtrap(regs, 0, get_si_code(dr6)); out_irq: - local_irq_disable(); + local_irq_disable_full(); out: instrumentation_end(); irqentry_exit_to_user_mode(regs); @@ -1022,13 +1114,19 @@ /* IST stack entry */ DEFINE_IDTENTRY_DEBUG(exc_debug) { - exc_debug_kernel(regs, debug_read_clear_dr6()); + if (mark_trap_entry_raw(X86_TRAP_DB, regs)) { + exc_debug_kernel(regs, debug_read_clear_dr6()); + mark_trap_exit_raw(X86_TRAP_DB, regs); + } } /* User entry, runs on regular task stack */ DEFINE_IDTENTRY_DEBUG_USER(exc_debug) { - exc_debug_user(regs, debug_read_clear_dr6()); + if (mark_trap_entry_raw(X86_TRAP_DB, regs)) { + exc_debug_user(regs, debug_read_clear_dr6()); + mark_trap_exit_raw(X86_TRAP_DB, regs); + } } #else /* 32 bit does not have separate entry points. */ @@ -1062,13 +1160,16 @@ if (fixup_exception(regs, trapnr, 0, 0)) goto exit; + if (!mark_trap_entry(trapnr, regs)) + goto exit; + task->thread.error_code = 0; task->thread.trap_nr = trapnr; if (notify_die(DIE_TRAP, str, regs, 0, trapnr, SIGFPE) != NOTIFY_STOP) die(str, regs, 0); - goto exit; + goto mark_exit; } /* @@ -1084,8 +1185,13 @@ if (!si_code) goto exit; + if (!mark_trap_entry(trapnr, regs)) + goto exit; + force_sig_fault(SIGFPE, si_code, (void __user *)uprobe_get_trap_addr(regs)); +mark_exit: + mark_trap_exit(trapnr, regs); exit: cond_local_irq_disable(regs); } @@ -1158,7 +1264,10 @@ * to kill the task than getting stuck in a never-ending * loop of #NM faults. */ - die("unexpected #NM exception", regs, 0); + if (mark_trap_entry(X86_TRAP_NM, regs)) { + die("unexpected #NM exception", regs, 0); + mark_trap_exit(X86_TRAP_NM, regs); + } } } diff --git a/kernel/arch/x86/kernel/tsc.c b/kernel/arch/x86/kernel/tsc.c index f9f1b45..2b59b30 100644 --- a/kernel/arch/x86/kernel/tsc.c +++ b/kernel/arch/x86/kernel/tsc.c @@ -131,7 +131,10 @@ { unsigned long long ns_now; struct cyc2ns_data data; + unsigned long flags; struct cyc2ns *c2n; + + flags = hard_cond_local_irq_save(); ns_now = cycles_2_ns(tsc_now); @@ -163,6 +166,8 @@ c2n->data[0] = data; raw_write_seqcount_latch(&c2n->seq); c2n->data[1] = data; + + hard_cond_local_irq_restore(flags); } static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now) @@ -759,11 +764,11 @@ * calibration, which will take at least 50ms, and * read the end value. */ - local_irq_save(flags); + flags = hard_local_irq_save(); tsc1 = tsc_read_refs(&ref1, hpet); tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); tsc2 = tsc_read_refs(&ref2, hpet); - local_irq_restore(flags); + hard_local_irq_restore(flags); /* Pick the lowest PIT TSC calibration so far */ tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); @@ -872,9 +877,9 @@ if (!fast_calibrate) fast_calibrate = cpu_khz_from_msr(); if (!fast_calibrate) { - local_irq_save(flags); + flags = hard_local_irq_save(); fast_calibrate = quick_pit_calibrate(); - local_irq_restore(flags); + hard_local_irq_restore(flags); } return fast_calibrate; } @@ -942,7 +947,7 @@ if (!sched_clock_stable()) return; - local_irq_save(flags); + flags = hard_local_irq_save(); /* * We're coming out of suspend, there's no concurrency yet; don't @@ -960,7 +965,7 @@ per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; } - local_irq_restore(flags); + hard_local_irq_restore(flags); } #ifdef CONFIG_CPU_FREQ @@ -1411,6 +1416,8 @@ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; + clocksource_tsc.vdso_type = CLOCKSOURCE_VDSO_ARCHITECTED; + /* * When TSC frequency is known (retrieved via MSR or CPUID), we skip * the refined calibration and directly register it as a clocksource. diff --git a/kernel/arch/x86/kernel/tsc_sync.c b/kernel/arch/x86/kernel/tsc_sync.c index 9236600..883c0df 100644 --- a/kernel/arch/x86/kernel/tsc_sync.c +++ b/kernel/arch/x86/kernel/tsc_sync.c @@ -367,6 +367,8 @@ atomic_set(&test_runs, 1); else atomic_set(&test_runs, 3); + + hard_cond_local_irq_disable(); retry: /* * Wait for the target to start or to skip the test: @@ -448,6 +450,8 @@ if (unsynchronized_tsc()) return; + hard_cond_local_irq_disable(); + /* * Store, verify and sanitize the TSC adjust register. If * successful skip the test. diff --git a/kernel/arch/x86/kvm/emulate.c b/kernel/arch/x86/kvm/emulate.c index 63efccc..4301a49 100644 --- a/kernel/arch/x86/kvm/emulate.c +++ b/kernel/arch/x86/kvm/emulate.c @@ -1125,23 +1125,27 @@ } } -static void emulator_get_fpu(void) +static unsigned long emulator_get_fpu(void) { - fpregs_lock(); + unsigned long flags = fpregs_lock(); fpregs_assert_state_consistent(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_return(); + + return flags; } -static void emulator_put_fpu(void) +static void emulator_put_fpu(unsigned long flags) { - fpregs_unlock(); + fpregs_unlock(flags); } static void read_sse_reg(sse128_t *data, int reg) { - emulator_get_fpu(); + unsigned long flags; + + flags = emulator_get_fpu(); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; @@ -1163,12 +1167,14 @@ #endif default: BUG(); } - emulator_put_fpu(); + emulator_put_fpu(flags); } static void write_sse_reg(sse128_t *data, int reg) { - emulator_get_fpu(); + unsigned long flags; + + flags = emulator_get_fpu(); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; @@ -1190,12 +1196,14 @@ #endif default: BUG(); } - emulator_put_fpu(); + emulator_put_fpu(flags); } static void read_mmx_reg(u64 *data, int reg) { - emulator_get_fpu(); + unsigned long flags; + + flags = emulator_get_fpu(); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; @@ -1207,12 +1215,14 @@ case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } - emulator_put_fpu(); + emulator_put_fpu(flags); } static void write_mmx_reg(u64 *data, int reg) { - emulator_get_fpu(); + unsigned long flags; + + flags = emulator_get_fpu(); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; @@ -1224,30 +1234,33 @@ case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } - emulator_put_fpu(); + emulator_put_fpu(flags); } static int em_fninit(struct x86_emulate_ctxt *ctxt) { + unsigned long flags; + if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); - emulator_get_fpu(); + flags = emulator_get_fpu(); asm volatile("fninit"); - emulator_put_fpu(); + emulator_put_fpu(flags); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { + unsigned long flags; u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); - emulator_get_fpu(); + flags = emulator_get_fpu(); asm volatile("fnstcw %0": "+m"(fcw)); - emulator_put_fpu(); + emulator_put_fpu(flags); ctxt->dst.val = fcw; @@ -1256,14 +1269,15 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { + unsigned long flags; u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); - emulator_get_fpu(); + flags = emulator_get_fpu(); asm volatile("fnstsw %0": "+m"(fsw)); - emulator_put_fpu(); + emulator_put_fpu(flags); ctxt->dst.val = fsw; @@ -4182,17 +4196,18 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; + unsigned long flags; int rc; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; - emulator_get_fpu(); + flags = emulator_get_fpu(); rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); - emulator_put_fpu(); + emulator_put_fpu(flags); if (rc != X86EMUL_CONTINUE) return rc; @@ -4224,6 +4239,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; + unsigned long flags; int rc; size_t size; @@ -4236,7 +4252,7 @@ if (rc != X86EMUL_CONTINUE) return rc; - emulator_get_fpu(); + flags = emulator_get_fpu(); if (size < __fxstate_size(16)) { rc = fxregs_fixup(&fx_state, size); @@ -4253,7 +4269,7 @@ rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); out: - emulator_put_fpu(); + emulator_put_fpu(flags); return rc; } @@ -5498,11 +5514,12 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { + unsigned long flags; int rc; - emulator_get_fpu(); + flags = emulator_get_fpu(); rc = asm_safe("fwait"); - emulator_put_fpu(); + emulator_put_fpu(flags); if (unlikely(rc != X86EMUL_CONTINUE)) return emulate_exception(ctxt, MF_VECTOR, 0, false); diff --git a/kernel/arch/x86/kvm/vmx/vmx.c b/kernel/arch/x86/kvm/vmx/vmx.c index af6742d..4554a63 100644 --- a/kernel/arch/x86/kvm/vmx/vmx.c +++ b/kernel/arch/x86/kvm/vmx/vmx.c @@ -752,14 +752,15 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data) { + unsigned long flags; int ret = 0; u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) { - preempt_disable(); + flags = hard_preempt_disable(); ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask); - preempt_enable(); + hard_preempt_enable(flags); if (ret) msr->data = old_msr_data; } @@ -1383,19 +1384,23 @@ #ifdef CONFIG_X86_64 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) { - preempt_disable(); + unsigned long flags; + + flags = hard_preempt_disable(); if (vmx->guest_state_loaded) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); - preempt_enable(); + hard_preempt_enable(flags); return vmx->msr_guest_kernel_gs_base; } static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) { - preempt_disable(); + unsigned long flags; + + flags = hard_preempt_disable(); if (vmx->guest_state_loaded) wrmsrl(MSR_KERNEL_GS_BASE, data); - preempt_enable(); + hard_preempt_enable(flags); vmx->msr_guest_kernel_gs_base = data; } #endif @@ -1795,6 +1800,7 @@ */ static void setup_msrs(struct vcpu_vmx *vmx) { + hard_cond_local_irq_disable(); vmx->guest_uret_msrs_loaded = false; vmx->nr_active_uret_msrs = 0; #ifdef CONFIG_X86_64 @@ -1815,6 +1821,7 @@ vmx_setup_uret_msr(vmx, MSR_TSC_AUX); vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL); + hard_cond_local_irq_enable(); if (cpu_has_vmx_msr_bitmap()) vmx_update_msr_bitmap(&vmx->vcpu); @@ -2050,6 +2057,7 @@ u32 msr_index = msr_info->index; u64 data = msr_info->data; u32 index; + unsigned long flags; switch (msr_index) { case MSR_EFER: @@ -2289,11 +2297,22 @@ default: find_uret_msr: + /* + * Guest MSRs may be activated independently from + * vcpu_run(): rely on the notifier for restoring them + * upon preemption by the companion core, right before + * the current CPU switches to out-of-band scheduling + * (see dovetail_context_switch()). + */ msr = vmx_find_uret_msr(vmx, msr_index); - if (msr) + if (msr) { + flags = hard_cond_local_irq_save(); + inband_enter_guest(vcpu); ret = vmx_set_guest_uret_msr(vmx, msr, data); - else + hard_cond_local_irq_restore(flags); + } else { ret = kvm_set_msr_common(vcpu, msr_info); + } } /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ @@ -7056,7 +7075,9 @@ vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; init_vmcs(vmx); + hard_cond_local_irq_disable(); vmx_vcpu_put(vcpu); + hard_cond_local_irq_enable(); put_cpu(); if (cpu_need_virtualize_apic_accesses(vcpu)) { err = alloc_apic_access_page(vcpu->kvm); diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c index 23d7c56..7928751 100644 --- a/kernel/arch/x86/kvm/x86.c +++ b/kernel/arch/x86/kvm/x86.c @@ -178,6 +178,7 @@ struct kvm_user_return_msrs { struct user_return_notifier urn; bool registered; + bool dirty; struct kvm_user_return_msr_values { u64 host; u64 curr; @@ -295,12 +296,29 @@ vcpu->arch.apf.gfns[i] = ~0; } +static void __kvm_on_user_return(struct kvm_user_return_msrs *msrs) +{ + struct kvm_user_return_msr_values *values; + unsigned slot; + + if (!msrs->dirty) + return; + + for (slot = 0; slot < user_return_msrs_global.nr; ++slot) { + values = &msrs->values[slot]; + if (values->host != values->curr) { + wrmsrl(user_return_msrs_global.msrs[slot], values->host); + values->curr = values->host; + } + } + + msrs->dirty = false; +} + static void kvm_on_user_return(struct user_return_notifier *urn) { - unsigned slot; struct kvm_user_return_msrs *msrs = container_of(urn, struct kvm_user_return_msrs, urn); - struct kvm_user_return_msr_values *values; unsigned long flags; /* @@ -313,13 +331,10 @@ user_return_notifier_unregister(urn); } local_irq_restore(flags); - for (slot = 0; slot < user_return_msrs_global.nr; ++slot) { - values = &msrs->values[slot]; - if (values->host != values->curr) { - wrmsrl(user_return_msrs_global.msrs[slot], values->host); - values->curr = values->host; - } - } + flags = hard_cond_local_irq_save(); + __kvm_on_user_return(msrs); + hard_cond_local_irq_restore(flags); + inband_exit_guest(); } int kvm_probe_user_return_msr(u32 msr) @@ -374,6 +389,7 @@ if (err) return 1; + msrs->dirty = true; msrs->values[slot].curr = value; if (!msrs->registered) { msrs->urn.on_user_return = kvm_on_user_return; @@ -4072,10 +4088,22 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs); + unsigned long flags; int idx; if (vcpu->preempted) vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); + + flags = hard_cond_local_irq_save(); + /* + * Skip steal time accounting from the out-of-band stage since + * this is oob-unsafe. We leave it to the next call from the + * inband stage. + */ + if (running_oob()) + goto skip_steal_time_update; + /* * Disable page faults because we're in atomic context here. @@ -4094,6 +4122,7 @@ kvm_steal_time_set_preempted(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); pagefault_enable(); +skip_steal_time_update: kvm_x86_ops.vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* @@ -4102,7 +4131,40 @@ * guest. do_debug expects dr6 to be cleared after it runs, do the same. */ set_debugreg(0, 6); + + inband_set_vcpu_release_state(vcpu, false); + if (!msrs->dirty) + inband_exit_guest(); + + hard_cond_local_irq_restore(flags); } + +#ifdef CONFIG_DOVETAIL +/* hard irqs off. */ +void kvm_handle_oob_switch(struct kvm_oob_notifier *nfy) +{ + struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs); + struct kvm_vcpu *vcpu; + + vcpu = container_of(nfy, struct kvm_vcpu, oob_notifier); + /* + * If user_return MSRs were still active when leaving + * kvm_arch_vcpu_put(), inband_exit_guest() was not invoked, + * so we might get called later on before kvm_on_user_return() + * had a chance to run, if a switch to out-of-band scheduling + * sneaks in in the meantime. Prevent kvm_arch_vcpu_put() + * from running twice in such a case by checking ->put_vcpu + * from the notifier block. + */ + if (nfy->put_vcpu) + kvm_arch_vcpu_put(vcpu); + + __kvm_on_user_return(msrs); + inband_exit_guest(); +} +#else +#define kvm_handle_oob_switch NULL +#endif static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) @@ -9142,6 +9204,10 @@ } preempt_disable(); + local_irq_disable_full(); + + inband_enter_guest(vcpu); + inband_set_vcpu_release_state(vcpu, true); kvm_x86_ops.prepare_guest_switch(vcpu); @@ -9150,7 +9216,6 @@ * IPI are then delayed after guest entry, which ensures that they * result in virtual interrupt delivery. */ - local_irq_disable(); vcpu->mode = IN_GUEST_MODE; srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); @@ -9179,7 +9244,7 @@ if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); - local_irq_enable(); + local_irq_enable_full(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = 1; @@ -9251,9 +9316,9 @@ * stat.exits increment will do nicely. */ kvm_before_interrupt(vcpu); - local_irq_enable(); + local_irq_enable_full(); ++vcpu->stat.exits; - local_irq_disable(); + local_irq_disable_full(); kvm_after_interrupt(vcpu); /* @@ -9273,7 +9338,7 @@ } } - local_irq_enable(); + local_irq_enable_full(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); @@ -9487,7 +9552,9 @@ /* Swap (qemu) user FPU context for the guest FPU context. */ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { - fpregs_lock(); + unsigned long flags; + + flags = fpregs_lock(); kvm_save_current_fpu(vcpu->arch.user_fpu); @@ -9496,7 +9563,7 @@ ~XFEATURE_MASK_PKRU); fpregs_mark_activate(); - fpregs_unlock(); + fpregs_unlock(flags); trace_kvm_fpu(1); } @@ -9504,14 +9571,16 @@ /* When vcpu_run ends, restore user space FPU context. */ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { - fpregs_lock(); + unsigned long flags; + + flags = fpregs_lock(); kvm_save_current_fpu(vcpu->arch.guest_fpu); copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); fpregs_mark_activate(); - fpregs_unlock(); + fpregs_unlock(flags); ++vcpu->stat.fpu_reload; trace_kvm_fpu(0); @@ -10189,6 +10258,7 @@ if (r) goto free_guest_fpu; + inband_init_vcpu(vcpu, kvm_handle_oob_switch); vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; kvm_vcpu_mtrr_init(vcpu); diff --git a/kernel/arch/x86/lib/usercopy.c b/kernel/arch/x86/lib/usercopy.c index 3f435d7..1cdb806 100644 --- a/kernel/arch/x86/lib/usercopy.c +++ b/kernel/arch/x86/lib/usercopy.c @@ -18,7 +18,7 @@ { unsigned long ret; - if (__range_not_ok(from, n, TASK_SIZE)) + if (running_oob() || __range_not_ok(from, n, TASK_SIZE)) return n; if (!nmi_uaccess_okay()) diff --git a/kernel/arch/x86/mm/fault.c b/kernel/arch/x86/mm/fault.c index e9afbf8..a4d3b18 100644 --- a/kernel/arch/x86/mm/fault.c +++ b/kernel/arch/x86/mm/fault.c @@ -19,6 +19,7 @@ #include <linux/uaccess.h> /* faulthandler_disabled() */ #include <linux/efi.h> /* efi_recover_from_page_fault()*/ #include <linux/mm_types.h> +#include <linux/irqstage.h> #include <asm/cpufeature.h> /* boot_cpu_has, ... */ #include <asm/traps.h> /* dotraplinkage, ... */ @@ -656,7 +657,7 @@ * the below recursive fault logic only apply to a faults from * task context. */ - if (in_interrupt()) + if (running_oob() || in_interrupt()) return; /* @@ -666,10 +667,12 @@ * faulting through the emulate_vsyscall() logic. */ if (current->thread.sig_on_uaccess_err && signal) { + oob_trap_notify(X86_TRAP_PF, regs); set_signal_archinfo(address, error_code); /* XXX: hwpoison faults will set the wrong code. */ force_sig_fault(signal, si_code, (void __user *)address); + oob_trap_unwind(X86_TRAP_PF, regs); } /* @@ -677,6 +680,12 @@ */ return; } + + /* + * Do not bother unwinding the notification context on + * CPU/firmware/kernel bug. + */ + oob_trap_notify(X86_TRAP_PF, regs); #ifdef CONFIG_VMAP_STACK /* @@ -796,6 +805,55 @@ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); } +#ifdef CONFIG_IRQ_PIPELINE + +static inline void cond_reenable_irqs_user(void) +{ + hard_local_irq_enable(); + + if (running_inband()) + local_irq_enable(); +} + +static inline void cond_reenable_irqs_kernel(irqentry_state_t state, + struct pt_regs *regs) +{ + if (regs->flags & X86_EFLAGS_IF) { + hard_local_irq_enable(); + if (state.stage_info == IRQENTRY_INBAND_UNSTALLED) + local_irq_enable(); + } +} + +static inline void cond_disable_irqs(void) +{ + hard_local_irq_disable(); + + if (running_inband()) + local_irq_disable(); +} + +#else /* !CONFIG_IRQ_PIPELINE */ + +static inline void cond_reenable_irqs_user(void) +{ + local_irq_enable(); +} + +static inline void cond_reenable_irqs_kernel(irqentry_state_t state, + struct pt_regs *regs) +{ + if (regs->flags & X86_EFLAGS_IF) + local_irq_enable(); +} + +static inline void cond_disable_irqs(void) +{ + local_irq_disable(); +} + +#endif /* !CONFIG_IRQ_PIPELINE */ + static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 pkey, int si_code) @@ -807,7 +865,7 @@ /* * It's possible to have interrupts off here: */ - local_irq_enable(); + cond_reenable_irqs_user(); /* * Valid to do another page fault here because this one came @@ -818,6 +876,12 @@ if (is_errata100(regs, address)) return; + + oob_trap_notify(X86_TRAP_PF, regs); + if (!running_inband()) { + local_irq_disable_full(); + return; + } /* * To avoid leaking information about the kernel page table @@ -837,7 +901,9 @@ force_sig_fault(SIGSEGV, si_code, (void __user *)address); - local_irq_disable(); + local_irq_disable_full(); + + oob_trap_unwind(X86_TRAP_PF, regs); return; } @@ -1225,7 +1291,8 @@ static inline void do_user_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, - unsigned long address) + unsigned long address, + irqentry_state_t state) { struct vm_area_struct *vma = NULL; struct task_struct *tsk; @@ -1266,7 +1333,7 @@ * If we're in an interrupt, have no user context or are running * in a region with pagefaults disabled then we must not take the fault */ - if (unlikely(faulthandler_disabled() || !mm)) { + if (unlikely(running_inband() && (faulthandler_disabled() || !mm))) { bad_area_nosemaphore(regs, hw_error_code, address); return; } @@ -1279,12 +1346,22 @@ * potential system fault or CPU buglet: */ if (user_mode(regs)) { - local_irq_enable(); + cond_reenable_irqs_user(); flags |= FAULT_FLAG_USER; } else { - if (regs->flags & X86_EFLAGS_IF) - local_irq_enable(); + cond_reenable_irqs_kernel(state, regs); } + + /* + * At this point, we would have to stop running + * out-of-band. Tell the companion core about the page fault + * event, so that it might switch current to in-band mode if + * need be. If it does not, then we may assume that it would + * also handle the fixups. + */ + oob_trap_notify(X86_TRAP_PF, regs); + if (!running_inband()) + return; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); @@ -1307,7 +1384,7 @@ */ if (is_vsyscall_vaddr(address)) { if (emulate_vsyscall(hw_error_code, regs, address)) - return; + goto out; } #endif @@ -1340,7 +1417,7 @@ * which we do not expect faults. */ bad_area_nosemaphore(regs, hw_error_code, address); - return; + goto out; } retry: mmap_read_lock(mm); @@ -1357,17 +1434,17 @@ vma = find_vma(mm, address); if (unlikely(!vma)) { bad_area(regs, hw_error_code, address); - return; + goto out; } if (likely(vma->vm_start <= address)) goto good_area; if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { bad_area(regs, hw_error_code, address); - return; + goto out; } if (unlikely(expand_stack(vma, address))) { bad_area(regs, hw_error_code, address); - return; + goto out; } /* @@ -1377,7 +1454,7 @@ good_area: if (unlikely(access_error(hw_error_code, vma))) { bad_area_access_error(regs, hw_error_code, address, vma); - return; + goto out; } /* @@ -1400,7 +1477,7 @@ if (!user_mode(regs)) no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR); - return; + goto out; } /* @@ -1426,10 +1503,12 @@ done: if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, hw_error_code, address, fault); - return; + goto out; } check_v8086_mode(regs, address, tsk); +out: + oob_trap_unwind(X86_TRAP_PF, regs); } NOKPROBE_SYMBOL(do_user_addr_fault); @@ -1448,7 +1527,8 @@ static __always_inline void handle_page_fault(struct pt_regs *regs, unsigned long error_code, - unsigned long address) + unsigned long address, + irqentry_state_t state) { trace_page_fault_entries(regs, error_code, address); @@ -1459,7 +1539,7 @@ if (unlikely(fault_in_kernel_space(address))) { do_kern_addr_fault(regs, error_code, address); } else { - do_user_addr_fault(regs, error_code, address); + do_user_addr_fault(regs, error_code, address, state); /* * User address page fault handling might have reenabled * interrupts. Fixing up all potential exit points of @@ -1467,7 +1547,7 @@ * doable w/o creating an unholy mess or turning the code * upside down. */ - local_irq_disable(); + cond_disable_irqs(); } } @@ -1515,8 +1595,46 @@ state = irqentry_enter(regs); instrumentation_begin(); - handle_page_fault(regs, error_code, address); + handle_page_fault(regs, error_code, address, state); instrumentation_end(); irqentry_exit(regs, state); } + +#ifdef CONFIG_DOVETAIL + +void arch_advertise_page_mapping(unsigned long start, unsigned long end) +{ + unsigned long next, addr = start; + pgd_t *pgd, *pgd_ref; + struct page *page; + + /* + * APEI may create temporary mappings in interrupt context - + * nothing we can and need to propagate globally. + */ + if (in_interrupt()) + return; + + if (!(start >= VMALLOC_START && start < VMALLOC_END)) + return; + + do { + next = pgd_addr_end(addr, end); + pgd_ref = pgd_offset_k(addr); + if (pgd_none(*pgd_ref)) + continue; + spin_lock(&pgd_lock); + list_for_each_entry(page, &pgd_list, lru) { + pgd = page_address(page) + pgd_index(addr); + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + } + spin_unlock(&pgd_lock); + addr = next; + } while (addr != end); + + arch_flush_lazy_mmu_mode(); +} + +#endif diff --git a/kernel/arch/x86/mm/tlb.c b/kernel/arch/x86/mm/tlb.c index 569ac1d..b720da2 100644 --- a/kernel/arch/x86/mm/tlb.c +++ b/kernel/arch/x86/mm/tlb.c @@ -5,6 +5,7 @@ #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> +#include <linux/irq_pipeline.h> #include <linux/export.h> #include <linux/cpu.h> #include <linux/debugfs.h> @@ -309,10 +310,12 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - unsigned long flags; + unsigned long flags, _flags; local_irq_save(flags); + protect_inband_mm(_flags); switch_mm_irqs_off(prev, next, tsk); + unprotect_inband_mm(_flags); local_irq_restore(flags); } @@ -440,7 +443,9 @@ */ /* We don't want flush_tlb_func_* to run concurrently with us. */ - if (IS_ENABLED(CONFIG_PROVE_LOCKING)) + if (IS_ENABLED(CONFIG_DOVETAIL)) + WARN_ON_ONCE(!hard_irqs_disabled()); + else if (IS_ENABLED(CONFIG_PROVE_LOCKING)) WARN_ON_ONCE(!irqs_disabled()); /* @@ -666,15 +671,24 @@ * wants us to catch up to. */ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); - u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); - u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); - u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); + u32 loaded_mm_asid; + u64 mm_tlb_gen; + u64 local_tlb_gen; + unsigned long flags; /* This code cannot presently handle being reentered. */ VM_WARN_ON(!irqs_disabled()); - if (unlikely(loaded_mm == &init_mm)) + protect_inband_mm(flags); + + loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen); + local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); + + if (unlikely(loaded_mm == &init_mm)) { + unprotect_inband_mm(flags); return; + } VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != loaded_mm->context.ctx_id); @@ -690,6 +704,7 @@ * IPIs to lazy TLB mode CPUs. */ switch_mm_irqs_off(NULL, &init_mm, NULL); + unprotect_inband_mm(flags); return; } @@ -700,12 +715,15 @@ * be handled can catch us all the way up, leaving no work for * the second flush. */ + unprotect_inband_mm(flags); trace_tlb_flush(reason, 0); return; } WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen); WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen); + + unprotect_inband_mm(flags); /* * If we get to this point, we know that our TLB is out of date. @@ -1063,7 +1081,7 @@ * from interrupts. (Use the raw variant because this code can * be called from deep inside debugging code.) */ - raw_local_irq_save(flags); + flags = hard_local_irq_save(); cr4 = this_cpu_read(cpu_tlbstate.cr4); /* toggle PGE */ @@ -1071,7 +1089,7 @@ /* write old PGE again and flush TLBs */ native_write_cr4(cr4); - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); } /* @@ -1079,6 +1097,8 @@ */ STATIC_NOPV void native_flush_tlb_local(void) { + unsigned long flags; + /* * Preemption or interrupts must be disabled to protect the access * to the per CPU variable and to prevent being preempted between @@ -1086,10 +1106,14 @@ */ WARN_ON_ONCE(preemptible()); + flags = hard_cond_local_irq_save(); + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* If current->mm == NULL then the read_cr3() "borrows" an mm */ native_write_cr3(__native_read_cr3()); + + hard_cond_local_irq_restore(flags); } void flush_tlb_local(void) @@ -1165,6 +1189,16 @@ VM_WARN_ON_ONCE(!loaded_mm); /* + * There would be no way for the companion core to switch an + * out-of-band task back in-band in order to handle an access + * fault over NMI safely. Tell the caller that uaccess from + * NMI is NOT ok if the preempted task was running + * out-of-band. + */ + if (running_oob()) + return false; + + /* * The condition we want to check is * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, * if we're running in a VM with shadow paging, and nmi_uaccess_okay() diff --git a/kernel/arch/x86/xen/Kconfig b/kernel/arch/x86/xen/Kconfig index 218acbd..a02524c 100644 --- a/kernel/arch/x86/xen/Kconfig +++ b/kernel/arch/x86/xen/Kconfig @@ -5,7 +5,7 @@ config XEN bool "Xen guest support" - depends on PARAVIRT + depends on PARAVIRT && !IRQ_PIPELINE select PARAVIRT_CLOCK select X86_HV_CALLBACK_VECTOR depends on X86_64 || (X86_32 && X86_PAE) diff --git a/kernel/arch/x86/xen/enlighten_hvm.c b/kernel/arch/x86/xen/enlighten_hvm.c index ec50b74..8617927 100644 --- a/kernel/arch/x86/xen/enlighten_hvm.c +++ b/kernel/arch/x86/xen/enlighten_hvm.c @@ -120,7 +120,8 @@ this_cpu_write(xen_vcpu_id, smp_processor_id()); } -DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback) +DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, + sysvec_xen_hvm_callback) { struct pt_regs *old_regs = set_irq_regs(regs); diff --git a/kernel/drivers/Makefile b/kernel/drivers/Makefile index 21cb556..bb031d3 100644 --- a/kernel/drivers/Makefile +++ b/kernel/drivers/Makefile @@ -193,3 +193,5 @@ obj-$(CONFIG_RK_HEADSET) += headset_observe/ obj-$(CONFIG_RK_NAND) += rk_nand/ obj-$(CONFIG_ROCKCHIP_RKNPU) += rknpu/ + +obj-$(CONFIG_XENOMAI) += xenomai/ diff --git a/kernel/drivers/base/regmap/internal.h b/kernel/drivers/base/regmap/internal.h index 0097696..ec9bea3 100644 --- a/kernel/drivers/base/regmap/internal.h +++ b/kernel/drivers/base/regmap/internal.h @@ -50,7 +50,10 @@ union { struct mutex mutex; struct { - spinlock_t spinlock; + union { + spinlock_t spinlock; + hard_spinlock_t oob_lock; + }; unsigned long spinlock_flags; }; }; diff --git a/kernel/drivers/base/regmap/regmap-irq.c b/kernel/drivers/base/regmap/regmap-irq.c index 4466f8b..ea1fc2f 100644 --- a/kernel/drivers/base/regmap/regmap-irq.c +++ b/kernel/drivers/base/regmap/regmap-irq.c @@ -331,6 +331,7 @@ .irq_enable = regmap_irq_enable, .irq_set_type = regmap_irq_set_type, .irq_set_wake = regmap_irq_set_wake, + .flags = IRQCHIP_PIPELINE_SAFE, }; static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, diff --git a/kernel/drivers/base/regmap/regmap.c b/kernel/drivers/base/regmap/regmap.c index 55a30af..f586293 100644 --- a/kernel/drivers/base/regmap/regmap.c +++ b/kernel/drivers/base/regmap/regmap.c @@ -14,6 +14,7 @@ #include <linux/property.h> #include <linux/rbtree.h> #include <linux/sched.h> +#include <linux/dovetail.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/hwspinlock.h> @@ -523,6 +524,23 @@ spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); } +static void regmap_lock_oob(void *__map) +__acquires(&map->oob_lock) +{ + struct regmap *map = __map; + unsigned long flags; + + raw_spin_lock_irqsave(&map->oob_lock, flags); + map->spinlock_flags = flags; +} + +static void regmap_unlock_oob(void *__map) +__releases(&map->oob_lock) +{ + struct regmap *map = __map; + raw_spin_unlock_irqrestore(&map->oob_lock, map->spinlock_flags); +} + static void dev_get_regmap_release(struct device *dev, void *res) { /* @@ -761,18 +779,29 @@ } else { if ((bus && bus->fast_io) || config->fast_io) { - spin_lock_init(&map->spinlock); - map->lock = regmap_lock_spinlock; - map->unlock = regmap_unlock_spinlock; - lockdep_set_class_and_name(&map->spinlock, - lock_key, lock_name); - } else { + if (dovetailing() && config->oob_io) { + raw_spin_lock_init(&map->oob_lock); + map->lock = regmap_lock_oob; + map->unlock = regmap_unlock_oob; + lockdep_set_class_and_name(&map->oob_lock, + lock_key, lock_name); + } else { + spin_lock_init(&map->spinlock); + map->lock = regmap_lock_spinlock; + map->unlock = regmap_unlock_spinlock; + lockdep_set_class_and_name(&map->spinlock, + lock_key, lock_name); + } + } else if (!config->oob_io) { mutex_init(&map->mutex); map->lock = regmap_lock_mutex; map->unlock = regmap_unlock_mutex; map->can_sleep = true; lockdep_set_class_and_name(&map->mutex, lock_key, lock_name); + } else { + ret = -ENXIO; + goto err_name; } map->lock_arg = map; } diff --git a/kernel/drivers/clocksource/Kconfig b/kernel/drivers/clocksource/Kconfig index 99c6b44..ae971d3 100644 --- a/kernel/drivers/clocksource/Kconfig +++ b/kernel/drivers/clocksource/Kconfig @@ -25,6 +25,7 @@ config OMAP_DM_TIMER bool select TIMER_OF + imply GENERIC_CLOCKSOURCE_VDSO config CLKBLD_I8253 def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK @@ -58,6 +59,8 @@ config DW_APB_TIMER bool "DW APB timer driver" if COMPILE_TEST + select CLKSRC_MMIO + imply GENERIC_CLOCKSOURCE_VDSO help Enables the support for the dw_apb timer. @@ -395,6 +398,7 @@ config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select TIMER_OF if OF + imply GENERIC_CLOCKSOURCE_VDSO depends on ARM help This option enables support for the ARM global timer unit. @@ -444,6 +448,7 @@ config CLKSRC_EXYNOS_MCT bool "Exynos multi core timer driver" if COMPILE_TEST depends on ARM || ARM64 + imply GENERIC_CLOCKSOURCE_VDSO help Support for Multi Core Timer controller on Exynos SoCs. @@ -620,7 +625,7 @@ config CLKSRC_IMX_GPT bool "Clocksource using i.MX GPT" if COMPILE_TEST depends on (ARM || ARM64) && CLKDEV_LOOKUP - select CLKSRC_MMIO + imply GENERIC_CLOCKSOURCE_VDSO config CLKSRC_IMX_TPM bool "Clocksource using i.MX TPM" if COMPILE_TEST @@ -642,7 +647,7 @@ bool "Low power clocksource found in the LPC" if COMPILE_TEST select TIMER_OF if OF depends on HAS_IOMEM - select CLKSRC_MMIO + imply GENERIC_CLOCKSOURCE_VDSO help Enable this option to use the Low Power controller timer as clocksource. diff --git a/kernel/drivers/clocksource/arm_arch_timer.c b/kernel/drivers/clocksource/arm_arch_timer.c index f488176..fb4fb16 100644 --- a/kernel/drivers/clocksource/arm_arch_timer.c +++ b/kernel/drivers/clocksource/arm_arch_timer.c @@ -21,6 +21,7 @@ #include <linux/of_address.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/dovetail.h> #include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/acpi.h> @@ -644,7 +645,7 @@ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { ctrl |= ARCH_TIMER_CTRL_IT_MASK; arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -753,7 +754,7 @@ static void __arch_timer_setup(unsigned type, struct clock_event_device *clk) { - clk->features = CLOCK_EVT_FEAT_ONESHOT; + clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE; if (type == ARCH_TIMER_TYPE_CP15) { typeof(clk->set_next_event) sne; @@ -864,6 +865,9 @@ else cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; + if (IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO)) + cntkctl |= ARCH_TIMER_USR_PT_ACCESS_EN; + arch_timer_set_cntkctl(cntkctl); } @@ -897,6 +901,7 @@ enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); if (arch_timer_has_nonsecure_ppi()) { + clk->irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], flags); @@ -1015,6 +1020,8 @@ arch_timer_read_counter = rd; clocksource_counter.vdso_clock_mode = vdso_default; + if (vdso_default != VDSO_CLOCKMODE_NONE) + clocksource_counter.vdso_type = CLOCKSOURCE_VDSO_ARCHITECTED; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; } diff --git a/kernel/drivers/clocksource/arm_global_timer.c b/kernel/drivers/clocksource/arm_global_timer.c index 88b2d38..3273fc6 100644 --- a/kernel/drivers/clocksource/arm_global_timer.c +++ b/kernel/drivers/clocksource/arm_global_timer.c @@ -153,11 +153,11 @@ * the Global Timer flag _after_ having incremented * the Comparator register value to a higher value. */ - if (clockevent_state_oneshot(evt)) + if (clockevent_is_oob(evt) || clockevent_state_oneshot(evt)) gt_compare_set(ULONG_MAX, 0); writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS); - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -168,7 +168,7 @@ clk->name = "arm_global_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERCPU; + CLOCK_EVT_FEAT_PERCPU | CLOCK_EVT_FEAT_PIPELINE; clk->set_state_shutdown = gt_clockevent_shutdown; clk->set_state_periodic = gt_clockevent_set_periodic; clk->set_state_oneshot = gt_clockevent_shutdown; @@ -192,11 +192,6 @@ return 0; } -static u64 gt_clocksource_read(struct clocksource *cs) -{ - return gt_counter_read(); -} - static void gt_resume(struct clocksource *cs) { unsigned long ctrl; @@ -207,13 +202,15 @@ writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); } -static struct clocksource gt_clocksource = { - .name = "arm_global_timer", - .rating = 300, - .read = gt_clocksource_read, - .mask = CLOCKSOURCE_MASK(64), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .resume = gt_resume, +static struct clocksource_user_mmio gt_clocksource = { + .mmio.clksrc = { + .name = "arm_global_timer", + .rating = 300, + .read = clocksource_dual_mmio_readl_up, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .resume = gt_resume, + }, }; #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK @@ -240,6 +237,8 @@ static int __init gt_clocksource_init(void) { + struct clocksource_mmio_regs mmr; + writel(0, gt_base + GT_CONTROL); writel(0, gt_base + GT_COUNTER0); writel(0, gt_base + GT_COUNTER1); @@ -249,7 +248,13 @@ #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate); #endif - return clocksource_register_hz(>_clocksource, gt_clk_rate); + mmr.reg_upper = gt_base + GT_COUNTER1; + mmr.reg_lower = gt_base + GT_COUNTER0; + mmr.bits_upper = 32; + mmr.bits_lower = 32; + mmr.revmap = NULL; + + return clocksource_user_mmio_init(>_clocksource, &mmr, gt_clk_rate); } static int __init global_timer_of_register(struct device_node *np) @@ -299,8 +304,8 @@ goto out_clk; } - err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, - "gt", gt_evt); + err = __request_percpu_irq(gt_ppi, gt_clockevent_interrupt, + IRQF_TIMER, "gt", gt_evt); if (err) { pr_warn("global-timer: can't register interrupt %d (%d)\n", gt_ppi, err); diff --git a/kernel/drivers/clocksource/bcm2835_timer.c b/kernel/drivers/clocksource/bcm2835_timer.c index 1592650..687e9f2 100644 --- a/kernel/drivers/clocksource/bcm2835_timer.c +++ b/kernel/drivers/clocksource/bcm2835_timer.c @@ -53,18 +53,25 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) { struct bcm2835_timer *timer = dev_id; - void (*event_handler)(struct clock_event_device *); + if (readl_relaxed(timer->control) & timer->match_mask) { writel_relaxed(timer->match_mask, timer->control); - event_handler = READ_ONCE(timer->evt.event_handler); - if (event_handler) - event_handler(&timer->evt); + clockevents_handle_event(&timer->evt); return IRQ_HANDLED; } else { return IRQ_NONE; } } + +static struct clocksource_user_mmio clocksource_bcm2835 = { + .mmio.clksrc = { + .rating = 300, + .read = clocksource_mmio_readl_up, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, +}; static int __init bcm2835_timer_init(struct device_node *node) { @@ -72,6 +79,7 @@ u32 freq; int irq, ret; struct bcm2835_timer *timer; + struct clocksource_mmio_regs mmr; base = of_iomap(node, 0); if (!base) { @@ -88,8 +96,13 @@ system_clock = base + REG_COUNTER_LO; sched_clock_register(bcm2835_sched_read, 32, freq); - clocksource_mmio_init(base + REG_COUNTER_LO, node->name, - freq, 300, 32, clocksource_mmio_readl_up); + mmr.reg_lower = base + REG_COUNTER_LO; + mmr.bits_lower = 32; + mmr.reg_upper = 0; + mmr.bits_upper = 0; + mmr.revmap = NULL; + clocksource_bcm2835.mmio.clksrc.name = node->name; + clocksource_user_mmio_init(&clocksource_bcm2835, &mmr, freq); irq = irq_of_parse_and_map(node, DEFAULT_TIMER); if (irq <= 0) { @@ -109,7 +122,7 @@ timer->match_mask = BIT(DEFAULT_TIMER); timer->evt.name = node->name; timer->evt.rating = 300; - timer->evt.features = CLOCK_EVT_FEAT_ONESHOT; + timer->evt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE; timer->evt.set_next_event = bcm2835_time_set_next_event; timer->evt.cpumask = cpumask_of(0); diff --git a/kernel/drivers/clocksource/clksrc_st_lpc.c b/kernel/drivers/clocksource/clksrc_st_lpc.c index 419a886..b30b814 100644 --- a/kernel/drivers/clocksource/clksrc_st_lpc.c +++ b/kernel/drivers/clocksource/clksrc_st_lpc.c @@ -51,7 +51,7 @@ sched_clock_register(st_clksrc_sched_clock_read, 32, rate); - ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF, + ret = clocksource_user_single_mmio_init(ddata.base + LPC_LPT_LSB_OFF, "clksrc-st-lpc", rate, 300, 32, clocksource_mmio_readl_up); if (ret) { diff --git a/kernel/drivers/clocksource/dw_apb_timer.c b/kernel/drivers/clocksource/dw_apb_timer.c index f5f24a9..a974b9d 100644 --- a/kernel/drivers/clocksource/dw_apb_timer.c +++ b/kernel/drivers/clocksource/dw_apb_timer.c @@ -43,7 +43,7 @@ static inline struct dw_apb_clocksource * clocksource_to_dw_apb_clocksource(struct clocksource *cs) { - return container_of(cs, struct dw_apb_clocksource, cs); + return container_of(cs, struct dw_apb_clocksource, ummio.mmio.clksrc); } static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs) @@ -343,18 +343,6 @@ dw_apb_clocksource_read(dw_cs); } -static u64 __apbt_read_clocksource(struct clocksource *cs) -{ - u32 current_count; - struct dw_apb_clocksource *dw_cs = - clocksource_to_dw_apb_clocksource(cs); - - current_count = apbt_readl_relaxed(&dw_cs->timer, - APBTMR_N_CURRENT_VALUE); - - return (u64)~current_count; -} - static void apbt_restart_clocksource(struct clocksource *cs) { struct dw_apb_clocksource *dw_cs = @@ -376,7 +364,7 @@ * dw_apb_clocksource_register() as the next step. */ struct dw_apb_clocksource * -dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, +__init dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, unsigned long freq) { struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL); @@ -386,12 +374,12 @@ dw_cs->timer.base = base; dw_cs->timer.freq = freq; - dw_cs->cs.name = name; - dw_cs->cs.rating = rating; - dw_cs->cs.read = __apbt_read_clocksource; - dw_cs->cs.mask = CLOCKSOURCE_MASK(32); - dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS; - dw_cs->cs.resume = apbt_restart_clocksource; + dw_cs->ummio.mmio.clksrc.name = name; + dw_cs->ummio.mmio.clksrc.rating = rating; + dw_cs->ummio.mmio.clksrc.read = clocksource_mmio_readl_down; + dw_cs->ummio.mmio.clksrc.mask = CLOCKSOURCE_MASK(32); + dw_cs->ummio.mmio.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + dw_cs->ummio.mmio.clksrc.resume = apbt_restart_clocksource; return dw_cs; } @@ -401,9 +389,17 @@ * * @dw_cs: The clocksource to register. */ -void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) +void __init dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) { - clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq); + struct clocksource_mmio_regs mmr; + + mmr.reg_lower = dw_cs->timer.base + APBTMR_N_CURRENT_VALUE; + mmr.bits_lower = 32; + mmr.reg_upper = 0; + mmr.bits_upper = 0; + mmr.revmap = NULL; + + clocksource_user_mmio_init(&dw_cs->ummio, &mmr, dw_cs->timer.freq); } /** diff --git a/kernel/drivers/clocksource/exynos_mct.c b/kernel/drivers/clocksource/exynos_mct.c index df194b0..243adda 100644 --- a/kernel/drivers/clocksource/exynos_mct.c +++ b/kernel/drivers/clocksource/exynos_mct.c @@ -194,23 +194,20 @@ return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); } -static u64 exynos4_frc_read(struct clocksource *cs) -{ - return exynos4_read_count_32(); -} - static void exynos4_frc_resume(struct clocksource *cs) { exynos4_mct_frc_start(); } -static struct clocksource mct_frc = { - .name = "mct-frc", - .rating = 450, /* use value higher than ARM arch timer */ - .read = exynos4_frc_read, - .mask = CLOCKSOURCE_MASK(32), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, - .resume = exynos4_frc_resume, +static struct clocksource_user_mmio mct_frc = { + .mmio.clksrc = { + .name = "mct-frc", + .rating = 450, /* use value higher than ARM arch timer */ + .read = clocksource_mmio_readl_up, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .resume = exynos4_frc_resume, + }, }; static u64 notrace exynos4_read_sched_clock(void) @@ -231,6 +228,8 @@ static int __init exynos4_clocksource_init(void) { + struct clocksource_mmio_regs mmr; + exynos4_mct_frc_start(); #if defined(CONFIG_ARM) @@ -239,8 +238,13 @@ register_current_timer_delay(&exynos4_delay_timer); #endif - if (clocksource_register_hz(&mct_frc, clk_rate)) - panic("%s: can't register clocksource\n", mct_frc.name); + mmr.reg_upper = NULL; + mmr.reg_lower = reg_base + EXYNOS4_MCT_G_CNT_L; + mmr.bits_upper = 0; + mmr.bits_lower = 32; + mmr.revmap = NULL; + if (clocksource_user_mmio_init(&mct_frc, &mmr, clk_rate)) + panic("%s: can't register clocksource\n", mct_frc.mmio.clksrc.name); sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); @@ -308,7 +312,8 @@ static struct clock_event_device mct_comp_device = { .name = "mct-comp", .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, + CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PIPELINE, .rating = 250, .set_next_event = exynos4_comp_set_next_event, .set_state_periodic = mct_set_state_periodic, @@ -324,7 +329,7 @@ exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT); - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -335,7 +340,7 @@ clockevents_config_and_register(&mct_comp_device, clk_rate, 0xf, 0xffffffff); if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr, - IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq", + IRQF_TIMER | IRQF_IRQPOLL | IRQF_OOB, "mct_comp_irq", &mct_comp_device)) pr_err("%s: request_irq() failed\n", "mct_comp_irq"); @@ -434,7 +439,7 @@ exynos4_mct_tick_clear(mevt); - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -456,7 +461,8 @@ evt->set_state_oneshot = set_state_shutdown; evt->set_state_oneshot_stopped = set_state_shutdown; evt->tick_resume = set_state_shutdown; - evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | \ + CLOCK_EVT_FEAT_PIPELINE; evt->rating = 500; /* use value higher than ARM arch timer */ exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); @@ -541,9 +547,9 @@ if (mct_int_type == MCT_INT_PPI) { - err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], - exynos4_mct_tick_isr, "MCT", - &percpu_mct_tick); + err = __request_percpu_irq(mct_irqs[MCT_L0_IRQ], + exynos4_mct_tick_isr, IRQF_TIMER, + "MCT", &percpu_mct_tick); WARN(err, "MCT: can't request IRQ %d (%d)\n", mct_irqs[MCT_L0_IRQ], err); } else { diff --git a/kernel/drivers/clocksource/mmio.c b/kernel/drivers/clocksource/mmio.c index 826dcc4..163a50a 100644 --- a/kernel/drivers/clocksource/mmio.c +++ b/kernel/drivers/clocksource/mmio.c @@ -6,11 +6,30 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/device.h> -struct clocksource_mmio { - void __iomem *reg; - struct clocksource clksrc; +struct clocksource_user_mapping { + struct mm_struct *mm; + struct clocksource_user_mmio *ucs; + void *regs; + struct hlist_node link; + atomic_t refs; }; + +static struct class *user_mmio_class; +static dev_t user_mmio_devt; + +static DEFINE_SPINLOCK(user_clksrcs_lock); +static unsigned int user_clksrcs_count; +static LIST_HEAD(user_clksrcs); static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) { @@ -38,6 +57,53 @@ return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } +static inline struct clocksource_user_mmio * +to_mmio_ucs(struct clocksource *c) +{ + return container_of(c, struct clocksource_user_mmio, mmio.clksrc); +} + +u64 clocksource_dual_mmio_readl_up(struct clocksource *c) +{ + struct clocksource_user_mmio *ucs = to_mmio_ucs(c); + u32 upper, old_upper, lower; + + upper = readl_relaxed(ucs->reg_upper); + do { + old_upper = upper; + lower = readl_relaxed(ucs->mmio.reg); + upper = readl_relaxed(ucs->reg_upper); + } while (upper != old_upper); + + return (((u64)upper) << ucs->bits_lower) | lower; +} + +u64 clocksource_dual_mmio_readw_up(struct clocksource *c) +{ + struct clocksource_user_mmio *ucs = to_mmio_ucs(c); + u16 upper, old_upper, lower; + + upper = readw_relaxed(ucs->reg_upper); + do { + old_upper = upper; + lower = readw_relaxed(ucs->mmio.reg); + upper = readw_relaxed(ucs->reg_upper); + } while (upper != old_upper); + + return (((u64)upper) << ucs->bits_lower) | lower; +} + +static void mmio_base_init(const char *name,int rating, unsigned int bits, + u64 (*read)(struct clocksource *), + struct clocksource *cs) +{ + cs->name = name; + cs->rating = rating; + cs->read = read; + cs->mask = CLOCKSOURCE_MASK(bits); + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; +} + /** * clocksource_mmio_init - Initialize a simple mmio based clocksource * @base: Virtual address of the clock readout register @@ -52,6 +118,7 @@ u64 (*read)(struct clocksource *)) { struct clocksource_mmio *cs; + int err; if (bits > 64 || bits < 16) return -EINVAL; @@ -61,12 +128,428 @@ return -ENOMEM; cs->reg = base; - cs->clksrc.name = name; - cs->clksrc.rating = rating; - cs->clksrc.read = read; - cs->clksrc.mask = CLOCKSOURCE_MASK(bits); - cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + mmio_base_init(name, rating, bits, read, &cs->clksrc); - return clocksource_register_hz(&cs->clksrc, hz); + err = clocksource_register_hz(&cs->clksrc, hz); + if (err < 0) { + kfree(cs); + return err; + } + + return err; } -EXPORT_SYMBOL_GPL(clocksource_mmio_init); + +static void mmio_ucs_vmopen(struct vm_area_struct *vma) +{ + struct clocksource_user_mapping *mapping, *clone; + struct clocksource_user_mmio *ucs; + unsigned long h_key; + + mapping = vma->vm_private_data; + + if (mapping->mm == vma->vm_mm) { + atomic_inc(&mapping->refs); + } else if (mapping->mm) { + /* + * We must be duplicating the original mm upon fork(), + * clone the parent ucs mapping struct then rehash it + * on the child mm key. If we cannot get memory for + * this, mitigate the issue for users by preventing a + * stale parent mm from being matched later on by a + * process which reused its mm_struct (h_key is based + * on this struct address). + */ + clone = kmalloc(sizeof(*mapping), GFP_KERNEL); + if (clone == NULL) { + pr_alert("out-of-memory for UCS mapping!\n"); + atomic_inc(&mapping->refs); + mapping->mm = NULL; + return; + } + ucs = mapping->ucs; + clone->mm = vma->vm_mm; + clone->ucs = ucs; + clone->regs = mapping->regs; + atomic_set(&clone->refs, 1); + vma->vm_private_data = clone; + h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm); + spin_lock(&ucs->lock); + hash_add(ucs->mappings, &clone->link, h_key); + spin_unlock(&ucs->lock); + } +} + +static void mmio_ucs_vmclose(struct vm_area_struct *vma) +{ + struct clocksource_user_mapping *mapping; + + mapping = vma->vm_private_data; + + if (atomic_dec_and_test(&mapping->refs)) { + spin_lock(&mapping->ucs->lock); + hash_del(&mapping->link); + spin_unlock(&mapping->ucs->lock); + kfree(mapping); + } +} + +static const struct vm_operations_struct mmio_ucs_vmops = { + .open = mmio_ucs_vmopen, + .close = mmio_ucs_vmclose, +}; + +static int mmio_ucs_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long addr, upper_pfn, lower_pfn; + struct clocksource_user_mapping *mapping, *tmp; + struct clocksource_user_mmio *ucs; + unsigned int bits_upper; + unsigned long h_key; + pgprot_t prot; + size_t pages; + int err; + + pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + if (pages > 2) + return -EINVAL; + + vma->vm_private_data = NULL; + + ucs = file->private_data; + upper_pfn = ucs->phys_upper >> PAGE_SHIFT; + lower_pfn = ucs->phys_lower >> PAGE_SHIFT; + bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower; + if (pages == 2 && (!bits_upper || upper_pfn == lower_pfn)) + return -EINVAL; + + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOSPC; + + mapping->mm = vma->vm_mm; + mapping->ucs = ucs; + mapping->regs = (void *)vma->vm_start; + atomic_set(&mapping->refs, 1); + + vma->vm_private_data = mapping; + vma->vm_ops = &mmio_ucs_vmops; + prot = pgprot_noncached(vma->vm_page_prot); + addr = vma->vm_start; + + err = remap_pfn_range(vma, addr, lower_pfn, PAGE_SIZE, prot); + if (err < 0) + goto fail; + + if (pages > 1) { + addr += PAGE_SIZE; + err = remap_pfn_range(vma, addr, upper_pfn, PAGE_SIZE, prot); + if (err < 0) + goto fail; + } + + h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm); + + spin_lock(&ucs->lock); + hash_for_each_possible(ucs->mappings, tmp, link, h_key) { + if (tmp->mm == vma->vm_mm) { + spin_unlock(&ucs->lock); + err = -EBUSY; + goto fail; + } + } + hash_add(ucs->mappings, &mapping->link, h_key); + spin_unlock(&ucs->lock); + + return 0; +fail: + kfree(mapping); + + return err; +} + +static long +mmio_ucs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct clocksource_user_mapping *mapping; + struct clksrc_user_mmio_info __user *u; + unsigned long upper_pfn, lower_pfn; + struct clksrc_user_mmio_info info; + struct clocksource_user_mmio *ucs; + unsigned int bits_upper; + void __user *map_base; + unsigned long h_key; + size_t size; + + u = (struct clksrc_user_mmio_info __user *)arg; + + switch (cmd) { + case CLKSRC_USER_MMIO_MAP: + break; + default: + return -ENOTTY; + } + + h_key = (unsigned long)current->mm / sizeof(*current->mm); + + ucs = file->private_data; + upper_pfn = ucs->phys_upper >> PAGE_SHIFT; + lower_pfn = ucs->phys_lower >> PAGE_SHIFT; + bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower; + size = PAGE_SIZE; + if (bits_upper && upper_pfn != lower_pfn) + size += PAGE_SIZE; + + do { + spin_lock(&ucs->lock); + hash_for_each_possible(ucs->mappings, mapping, link, h_key) { + if (mapping->mm == current->mm) { + spin_unlock(&ucs->lock); + map_base = mapping->regs; + goto found; + } + } + spin_unlock(&ucs->lock); + + map_base = (void *) + vm_mmap(file, 0, size, PROT_READ, MAP_SHARED, 0); + } while (IS_ERR(map_base) && PTR_ERR(map_base) == -EBUSY); + + if (IS_ERR(map_base)) + return PTR_ERR(map_base); + +found: + info.type = ucs->type; + info.reg_lower = map_base + offset_in_page(ucs->phys_lower); + info.mask_lower = ucs->mmio.clksrc.mask; + info.bits_lower = ucs->bits_lower; + info.reg_upper = NULL; + if (ucs->phys_upper) + info.reg_upper = map_base + (size - PAGE_SIZE) + + offset_in_page(ucs->phys_upper); + info.mask_upper = ucs->mask_upper; + + return copy_to_user(u, &info, sizeof(*u)); +} + +static int mmio_ucs_open(struct inode *inode, struct file *file) +{ + struct clocksource_user_mmio *ucs; + + if (file->f_mode & FMODE_WRITE) + return -EINVAL; + + ucs = container_of(inode->i_cdev, typeof(*ucs), cdev); + file->private_data = ucs; + + return 0; +} + +static const struct file_operations mmio_ucs_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = mmio_ucs_ioctl, + .open = mmio_ucs_open, + .mmap = mmio_ucs_mmap, +}; + +static int __init +ucs_create_cdev(struct class *class, struct clocksource_user_mmio *ucs) +{ + int err; + + ucs->dev = device_create(class, NULL, + MKDEV(MAJOR(user_mmio_devt), ucs->id), + ucs, "ucs/%d", ucs->id); + if (IS_ERR(ucs->dev)) + return PTR_ERR(ucs->dev); + + spin_lock_init(&ucs->lock); + hash_init(ucs->mappings); + + cdev_init(&ucs->cdev, &mmio_ucs_fops); + ucs->cdev.kobj.parent = &ucs->dev->kobj; + + err = cdev_add(&ucs->cdev, ucs->dev->devt, 1); + if (err < 0) + goto err_device_destroy; + + return 0; + +err_device_destroy: + device_destroy(class, MKDEV(MAJOR(user_mmio_devt), ucs->id)); + return err; +} + +static unsigned long default_revmap(void *virt) +{ + struct vm_struct *vm; + + vm = find_vm_area(virt); + if (!vm) + return 0; + + return vm->phys_addr + (virt - vm->addr); +} + +int __init clocksource_user_mmio_init(struct clocksource_user_mmio *ucs, + const struct clocksource_mmio_regs *regs, + unsigned long hz) +{ + static u64 (*user_types[CLKSRC_MMIO_TYPE_NR])(struct clocksource *) = { + [CLKSRC_MMIO_L_UP] = clocksource_mmio_readl_up, + [CLKSRC_MMIO_L_DOWN] = clocksource_mmio_readl_down, + [CLKSRC_DMMIO_L_UP] = clocksource_dual_mmio_readl_up, + [CLKSRC_MMIO_W_UP] = clocksource_mmio_readw_up, + [CLKSRC_MMIO_W_DOWN] = clocksource_mmio_readw_down, + [CLKSRC_DMMIO_W_UP] = clocksource_dual_mmio_readw_up, + }; + const char *name = ucs->mmio.clksrc.name; + unsigned long phys_upper = 0, phys_lower; + enum clksrc_user_mmio_type type; + unsigned long (*revmap)(void *); + int err; + + if (regs->bits_lower > 32 || regs->bits_lower < 16 || + regs->bits_upper > 32) + return -EINVAL; + + for (type = 0; type < ARRAY_SIZE(user_types); type++) + if (ucs->mmio.clksrc.read == user_types[type]) + break; + + if (type == ARRAY_SIZE(user_types)) + return -EINVAL; + + if (!(ucs->mmio.clksrc.flags & CLOCK_SOURCE_IS_CONTINUOUS)) + return -EINVAL; + + revmap = regs->revmap; + if (!revmap) + revmap = default_revmap; + + phys_lower = revmap(regs->reg_lower); + if (!phys_lower) + return -EINVAL; + + if (regs->bits_upper) { + phys_upper = revmap(regs->reg_upper); + if (!phys_upper) + return -EINVAL; + } + + ucs->mmio.reg = regs->reg_lower; + ucs->type = type; + ucs->bits_lower = regs->bits_lower; + ucs->reg_upper = regs->reg_upper; + ucs->mask_lower = CLOCKSOURCE_MASK(regs->bits_lower); + ucs->mask_upper = CLOCKSOURCE_MASK(regs->bits_upper); + ucs->phys_lower = phys_lower; + ucs->phys_upper = phys_upper; + spin_lock_init(&ucs->lock); + + err = clocksource_register_hz(&ucs->mmio.clksrc, hz); + if (err < 0) + return err; + + spin_lock(&user_clksrcs_lock); + + ucs->id = user_clksrcs_count++; + if (ucs->id < CLKSRC_USER_MMIO_MAX) + list_add_tail(&ucs->link, &user_clksrcs); + + spin_unlock(&user_clksrcs_lock); + + if (ucs->id >= CLKSRC_USER_MMIO_MAX) { + pr_warn("%s: Too many clocksources\n", name); + err = -EAGAIN; + goto fail; + } + + ucs->mmio.clksrc.vdso_type = CLOCKSOURCE_VDSO_MMIO + ucs->id; + + if (user_mmio_class) { + err = ucs_create_cdev(user_mmio_class, ucs); + if (err < 0) { + pr_warn("%s: Failed to add character device\n", name); + goto fail; + } + } + + return 0; + +fail: + clocksource_unregister(&ucs->mmio.clksrc); + + return err; +} + +int __init clocksource_user_single_mmio_init( + void __iomem *base, const char *name, + unsigned long hz, int rating, unsigned int bits, + u64 (*read)(struct clocksource *)) +{ + struct clocksource_user_mmio *ucs; + struct clocksource_mmio_regs regs; + int ret; + + ucs = kzalloc(sizeof(*ucs), GFP_KERNEL); + if (!ucs) + return -ENOMEM; + + mmio_base_init(name, rating, bits, read, &ucs->mmio.clksrc); + regs.reg_lower = base; + regs.reg_upper = NULL; + regs.bits_lower = bits; + regs.bits_upper = 0; + regs.revmap = NULL; + + ret = clocksource_user_mmio_init(ucs, ®s, hz); + if (ret) + kfree(ucs); + + return ret; +} + +static int __init mmio_clksrc_chr_dev_init(void) +{ + struct clocksource_user_mmio *ucs; + struct class *class; + int err; + + class = class_create(THIS_MODULE, "mmio_ucs"); + if (IS_ERR(class)) { + pr_err("couldn't create user mmio clocksources class\n"); + return PTR_ERR(class); + } + + err = alloc_chrdev_region(&user_mmio_devt, 0, CLKSRC_USER_MMIO_MAX, + "mmio_ucs"); + if (err < 0) { + pr_err("failed to allocate user mmio clocksources character devivces region\n"); + goto err_class_destroy; + } + + /* + * Calling list_for_each_entry is safe here: clocksources are always + * added to the list tail, never removed. + */ + spin_lock(&user_clksrcs_lock); + list_for_each_entry(ucs, &user_clksrcs, link) { + spin_unlock(&user_clksrcs_lock); + + err = ucs_create_cdev(class, ucs); + if (err < 0) + pr_err("%s: Failed to add character device\n", + ucs->mmio.clksrc.name); + + spin_lock(&user_clksrcs_lock); + } + user_mmio_class = class; + spin_unlock(&user_clksrcs_lock); + + return 0; + +err_class_destroy: + class_destroy(class); + return err; +} +device_initcall(mmio_clksrc_chr_dev_init); diff --git a/kernel/drivers/clocksource/timer-imx-gpt.c b/kernel/drivers/clocksource/timer-imx-gpt.c index 7b2c70f..5c46458 100644 --- a/kernel/drivers/clocksource/timer-imx-gpt.c +++ b/kernel/drivers/clocksource/timer-imx-gpt.c @@ -163,8 +163,8 @@ sched_clock_reg = reg; sched_clock_register(mxc_read_sched_clock, 32, c); - return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, - clocksource_mmio_readl_up); + return clocksource_user_single_mmio_init(reg, "mxc_timer1", c, 200, 32, + clocksource_mmio_readl_up); } /* clock event */ @@ -264,7 +264,7 @@ imxtm->gpt->gpt_irq_acknowledge(imxtm); - ced->event_handler(ced); + clockevents_handle_event(ced); return IRQ_HANDLED; } @@ -274,7 +274,7 @@ struct clock_event_device *ced = &imxtm->ced; ced->name = "mxc_timer1"; - ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; + ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PIPELINE; ced->set_state_shutdown = mxc_shutdown; ced->set_state_oneshot = mxc_set_oneshot; ced->tick_resume = mxc_shutdown; diff --git a/kernel/drivers/clocksource/timer-sun4i.c b/kernel/drivers/clocksource/timer-sun4i.c index 0ba8155..43886c3 100644 --- a/kernel/drivers/clocksource/timer-sun4i.c +++ b/kernel/drivers/clocksource/timer-sun4i.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> +#include <linux/dovetail.h> #include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> @@ -135,7 +136,7 @@ struct timer_of *to = to_timer_of(evt); sun4i_timer_clear_interrupt(timer_of_base(to)); - evt->event_handler(evt); + clockevents_handle_event(evt); return IRQ_HANDLED; } @@ -146,7 +147,7 @@ .clkevt = { .name = "sun4i_tick", .rating = 350, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE, .set_state_shutdown = sun4i_clkevt_shutdown, .set_state_periodic = sun4i_clkevt_set_periodic, .set_state_oneshot = sun4i_clkevt_set_oneshot, diff --git a/kernel/drivers/clocksource/timer-ti-dm-systimer.c b/kernel/drivers/clocksource/timer-ti-dm-systimer.c index 2737407..345569d 100644 --- a/kernel/drivers/clocksource/timer-ti-dm-systimer.c +++ b/kernel/drivers/clocksource/timer-ti-dm-systimer.c @@ -57,7 +57,7 @@ }; struct dmtimer_clocksource { - struct clocksource dev; + struct clocksource_user_mmio mmio; struct dmtimer_systimer t; unsigned int loadval; }; @@ -437,7 +437,7 @@ struct dmtimer_systimer *t = &clkevt->t; writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat); - clkevt->dev.event_handler(&clkevt->dev); + clockevents_handle_event(&clkevt->dev); return IRQ_HANDLED; } @@ -548,7 +548,7 @@ * We mostly use cpuidle_coupled with ARM local timers for runtime, * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here. */ - dev->features = features; + dev->features = features | CLOCK_EVT_FEAT_PIPELINE; dev->rating = rating; dev->set_next_event = dmtimer_set_next_event; dev->set_state_shutdown = dmtimer_clockevent_shutdown; @@ -706,15 +706,7 @@ static struct dmtimer_clocksource * to_dmtimer_clocksource(struct clocksource *cs) { - return container_of(cs, struct dmtimer_clocksource, dev); -} - -static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs) -{ - struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs); - struct dmtimer_systimer *t = &clksrc->t; - - return (u64)readl_relaxed(t->base + t->counter); + return container_of(cs, struct dmtimer_clocksource, mmio.mmio.clksrc); } static void __iomem *dmtimer_sched_clock_counter; @@ -753,6 +745,7 @@ static int __init dmtimer_clocksource_init(struct device_node *np) { struct dmtimer_clocksource *clksrc; + struct clocksource_mmio_regs mmr; struct dmtimer_systimer *t; struct clocksource *dev; int error; @@ -761,7 +754,7 @@ if (!clksrc) return -ENOMEM; - dev = &clksrc->dev; + dev = &clksrc->mmio.mmio.clksrc; t = &clksrc->t; error = dmtimer_systimer_setup(np, t); @@ -770,7 +763,7 @@ dev->name = "dmtimer"; dev->rating = 300; - dev->read = dmtimer_clocksource_read_cycles; + dev->read = clocksource_mmio_readl_up, dev->mask = CLOCKSOURCE_MASK(32); dev->flags = CLOCK_SOURCE_IS_CONTINUOUS; @@ -793,7 +786,13 @@ sched_clock_register(dmtimer_read_sched_clock, 32, t->rate); } - if (clocksource_register_hz(dev, t->rate)) + mmr.reg_lower = t->base + t->counter; + mmr.bits_lower = 32; + mmr.reg_upper = 0; + mmr.bits_upper = 0; + mmr.revmap = NULL; + + if (clocksource_user_mmio_init(&clksrc->mmio, &mmr, t->rate)) pr_err("Could not register clocksource %pOF\n", np); return 0; diff --git a/kernel/drivers/cpuidle/cpuidle.c b/kernel/drivers/cpuidle/cpuidle.c index ab77a36..9c6004d 100644 --- a/kernel/drivers/cpuidle/cpuidle.c +++ b/kernel/drivers/cpuidle/cpuidle.c @@ -17,6 +17,7 @@ #include <linux/pm_qos.h> #include <linux/cpu.h> #include <linux/cpuidle.h> +#include <linux/irq_pipeline.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/module.h> @@ -219,6 +220,22 @@ broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); /* + * A companion core running on the oob stage of the IRQ + * pipeline may deny switching to a deeper C-state. If so, + * call the default idle routine instead. If the core cannot + * bear with the latency induced by the default idling + * operation, then CPUIDLE is not usable and should be + * disabled at build time. The in-band stage is currently + * stalled, hard irqs are on. irq_cpuidle_enter() leaves us + * stalled but returns with hard irqs off so that no event may + * sneak in until we actually go idle. + */ + if (!irq_cpuidle_enter(dev, target_state)) { + default_idle_call(); + return -EBUSY; + } + + /* * Tell the time framework to switch to a broadcast timer because our * local timer will be shut down. If a local timer is used from another * CPU as a broadcast timer, this call may fail if it is not available. @@ -247,6 +264,7 @@ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) rcu_idle_enter(); entered_state = target_state->enter(dev, drv, index); + hard_cond_local_irq_enable(); if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) rcu_idle_exit(); start_critical_timings(); diff --git a/kernel/drivers/cpuidle/poll_state.c b/kernel/drivers/cpuidle/poll_state.c index f7e8361..1245138 100644 --- a/kernel/drivers/cpuidle/poll_state.c +++ b/kernel/drivers/cpuidle/poll_state.c @@ -17,7 +17,7 @@ dev->poll_time_limit = false; - local_irq_enable(); + local_irq_enable_full(); if (!current_set_polling_and_test()) { unsigned int loop_count = 0; u64 limit; diff --git a/kernel/drivers/dma/Kconfig b/kernel/drivers/dma/Kconfig index 0801334..821fe3d 100644 --- a/kernel/drivers/dma/Kconfig +++ b/kernel/drivers/dma/Kconfig @@ -47,6 +47,10 @@ config DMA_VIRTUAL_CHANNELS tristate +config DMA_VIRTUAL_CHANNELS_OOB + def_bool n + depends on DMA_VIRTUAL_CHANNELS && DOVETAIL + config DMA_ACPI def_bool y depends on ACPI @@ -137,6 +141,13 @@ depends on ARCH_BCM2835 select DMA_ENGINE select DMA_VIRTUAL_CHANNELS + +config DMA_BCM2835_OOB + bool "Out-of-band support for BCM2835 DMA" + depends on DMA_BCM2835 && DOVETAIL + select DMA_VIRTUAL_CHANNELS_OOB + help + Enable out-of-band requests to BCM2835 DMA. config DMA_JZ4780 tristate "JZ4780 DMA support" @@ -275,6 +286,13 @@ Support the i.MX SDMA engine. This engine is integrated into Freescale i.MX25/31/35/51/53/6 chips. +config IMX_SDMA_OOB + bool "Out-of-band support for i.MX SDMA" + depends on IMX_SDMA && DOVETAIL + select DMA_VIRTUAL_CHANNELS_OOB + help + Enable out-of-band requests to i.MX SDMA. + config INTEL_IDMA64 tristate "Intel integrated DMA 64-bit support" select DMA_ENGINE diff --git a/kernel/drivers/dma/bcm2835-dma.c b/kernel/drivers/dma/bcm2835-dma.c index 630dfbb..6161f76 100644 --- a/kernel/drivers/dma/bcm2835-dma.c +++ b/kernel/drivers/dma/bcm2835-dma.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/io.h> #include <linux/spinlock.h> +#include <linux/irqstage.h> #include <linux/of.h> #include <linux/of_dma.h> @@ -435,10 +436,20 @@ writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); } +static inline void bcm2835_dma_enable_channel(struct bcm2835_chan *c) +{ + writel(c->desc->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); +} + +static inline bool bcm2835_dma_oob_capable(void) +{ + return IS_ENABLED(CONFIG_DMA_BCM2835_OOB); +} + static void bcm2835_dma_start_desc(struct bcm2835_chan *c) { struct virt_dma_desc *vd = vchan_next_desc(&c->vc); - struct bcm2835_desc *d; if (!vd) { c->desc = NULL; @@ -447,10 +458,41 @@ list_del(&vd->node); - c->desc = d = to_bcm2835_dma_desc(&vd->tx); + c->desc = to_bcm2835_dma_desc(&vd->tx); + if (!bcm2835_dma_oob_capable() || !vchan_oob_pulsed(vd)) + bcm2835_dma_enable_channel(c); +} - writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); - writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); +static bool do_channel(struct bcm2835_chan *c, struct bcm2835_desc *d) +{ + struct dmaengine_desc_callback cb; + + if (running_oob()) { + if (!vchan_oob_handled(&d->vd)) + return false; + dmaengine_desc_get_callback(&d->vd.tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { + vchan_unlock(&c->vc); + dmaengine_desc_callback_invoke(&cb, NULL); + vchan_lock(&c->vc); + } + return true; + } + + if (d->cyclic) { + /* call the cyclic callback */ + vchan_cyclic_callback(&d->vd); + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { + vchan_cookie_complete(&c->desc->vd); + bcm2835_dma_start_desc(c); + } + + return true; +} + +static inline bool is_base_irq_handler(void) +{ + return !bcm2835_dma_oob_capable() || running_oob(); } static irqreturn_t bcm2835_dma_callback(int irq, void *data) @@ -460,7 +502,7 @@ unsigned long flags; /* check the shared interrupt */ - if (c->irq_flags & IRQF_SHARED) { + if (is_base_irq_handler() && c->irq_flags & IRQF_SHARED) { /* check if the interrupt is enabled */ flags = readl(c->chan_base + BCM2835_DMA_CS); /* if not set then we are not the reason for the irq */ @@ -468,7 +510,8 @@ return IRQ_NONE; } - spin_lock_irqsave(&c->vc.lock, flags); + /* CAUTION: If running in-band, hard irqs are on. */ + vchan_lock_irqsave(&c->vc, flags); /* * Clear the INT flag to receive further interrupts. Keep the channel @@ -477,22 +520,27 @@ * if this IRQ handler is threaded.) If the channel is finished, it * will remain idle despite the ACTIVE flag being set. */ - writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); + if (is_base_irq_handler()) + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; + if (!d) + goto out; - if (d) { - if (d->cyclic) { - /* call the cyclic callback */ - vchan_cyclic_callback(&d->vd); - } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { - vchan_cookie_complete(&c->desc->vd); - bcm2835_dma_start_desc(c); - } + if (bcm2835_dma_oob_capable() && running_oob()) { + /* + * If we cannot process this from the out-of-band + * stage, schedule a callback from in-band context. + */ + if (!do_channel(c, d)) + irq_post_inband(irq); + } else { + do_channel(c, d); } - spin_unlock_irqrestore(&c->vc.lock, flags); +out: + vchan_unlock_irqrestore(&c->vc, flags); return IRQ_HANDLED; } @@ -571,7 +619,7 @@ if (ret == DMA_COMPLETE || !txstate) return ret; - spin_lock_irqsave(&c->vc.lock, flags); + vchan_lock_irqsave(&c->vc, flags); vd = vchan_find_desc(&c->vc, cookie); if (vd) { txstate->residue = @@ -592,7 +640,7 @@ txstate->residue = 0; } - spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_unlock_irqrestore(&c->vc, flags); return ret; } @@ -602,12 +650,35 @@ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); unsigned long flags; - spin_lock_irqsave(&c->vc.lock, flags); + vchan_lock_irqsave(&c->vc, flags); if (vchan_issue_pending(&c->vc) && !c->desc) bcm2835_dma_start_desc(c); - spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_unlock_irqrestore(&c->vc, flags); } + +#ifdef CONFIG_DMA_BCM2835_OOB +static int bcm2835_dma_pulse_oob(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + unsigned long flags; + int ret = -EIO; + + vchan_lock_irqsave(&c->vc, flags); + if (c->desc && vchan_oob_pulsed(&c->desc->vd)) { + bcm2835_dma_enable_channel(c); + ret = 0; + } + vchan_unlock_irqrestore(&c->vc, flags); + + return ret; +} +#else +static int bcm2835_dma_pulse_oob(struct dma_chan *chan) +{ + return -ENOTSUPP; +} +#endif static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, @@ -649,6 +720,15 @@ u32 info = BCM2835_DMA_WAIT_RESP; u32 extra = BCM2835_DMA_INT_EN; size_t frames; + + if (!bcm2835_dma_oob_capable()) { + if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { + dev_err(chan->device->dev, + "%s: out-of-band slave transfers disabled\n", + __func__); + return NULL; + } + } if (!is_slave_direction(direction)) { dev_err(chan->device->dev, @@ -715,7 +795,21 @@ return NULL; } - if (flags & DMA_PREP_INTERRUPT) + if (!bcm2835_dma_oob_capable()) { + if (flags & DMA_OOB_INTERRUPT) { + dev_err(chan->device->dev, + "%s: out-of-band cyclic transfers disabled\n", + __func__); + return NULL; + } + } else if (flags & DMA_OOB_PULSE) { + dev_err(chan->device->dev, + "%s: no pulse mode with out-of-band cyclic transfers\n", + __func__); + return NULL; + } + + if (flags & (DMA_PREP_INTERRUPT|DMA_OOB_INTERRUPT)) extra |= BCM2835_DMA_INT_EN; else period_len = buf_len; @@ -791,7 +885,7 @@ unsigned long flags; LIST_HEAD(head); - spin_lock_irqsave(&c->vc.lock, flags); + vchan_lock_irqsave(&c->vc, flags); /* stop DMA activity */ if (c->desc) { @@ -801,7 +895,7 @@ } vchan_get_all_descriptors(&c->vc, &head); - spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_unlock_irqrestore(&c->vc, flags); vchan_dma_desc_free_list(&c->vc, &head); return 0; @@ -912,11 +1006,13 @@ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + dma_cap_set(DMA_OOB, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; od->ddev.device_tx_status = bcm2835_dma_tx_status; od->ddev.device_issue_pending = bcm2835_dma_issue_pending; + od->ddev.device_pulse_oob = bcm2835_dma_pulse_oob; od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg; od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; @@ -982,10 +1078,10 @@ continue; /* check if there are other channels that also use this irq */ - irq_flags = 0; + irq_flags = IS_ENABLED(CONFIG_DMA_BCM2835_OOB) ? IRQF_OOB : 0; for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++) if ((i != j) && (irq[j] == irq[i])) { - irq_flags = IRQF_SHARED; + irq_flags |= IRQF_SHARED; break; } diff --git a/kernel/drivers/dma/dmaengine.c b/kernel/drivers/dma/dmaengine.c index af3ee28..e79a94d 100644 --- a/kernel/drivers/dma/dmaengine.c +++ b/kernel/drivers/dma/dmaengine.c @@ -578,7 +578,8 @@ /* check if the channel supports slave transactions */ if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || - test_bit(DMA_CYCLIC, device->cap_mask.bits))) + test_bit(DMA_CYCLIC, device->cap_mask.bits) || + test_bit(DMA_OOB, device->cap_mask.bits))) return -ENXIO; /* @@ -1209,6 +1210,13 @@ return -EIO; } + if (dma_has_cap(DMA_OOB, device->cap_mask) && !device->device_pulse_oob) { + dev_err(device->dev, + "Device claims capability %s, but pulse handler is not defined\n", + "DMA_OOB"); + return -EIO; + } + if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", diff --git a/kernel/drivers/dma/imx-sdma.c b/kernel/drivers/dma/imx-sdma.c index 2283dcd..3648d3c 100644 --- a/kernel/drivers/dma/imx-sdma.c +++ b/kernel/drivers/dma/imx-sdma.c @@ -444,6 +444,10 @@ struct sdma_buffer_descriptor *bd0; /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ bool clk_ratio; +#ifdef CONFIG_IMX_SDMA_OOB + hard_spinlock_t oob_lock; + u32 pending_stat; +#endif }; static int sdma_config_write(struct dma_chan *chan, @@ -748,6 +752,11 @@ return container_of(t, struct sdma_desc, vd.tx); } +static inline bool sdma_oob_capable(void) +{ + return IS_ENABLED(CONFIG_IMX_SDMA_OOB); +} + static void sdma_start_desc(struct sdma_channel *sdmac) { struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc); @@ -765,7 +774,8 @@ sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; - sdma_enable_channel(sdma, sdmac->channel); + if (!sdma_oob_capable() || !vchan_oob_pulsed(vd)) + sdma_enable_channel(sdma, sdmac->channel); } static void sdma_update_channel_loop(struct sdma_channel *sdmac) @@ -809,9 +819,9 @@ * SDMA transaction status by the time the client tasklet is * executed. */ - spin_unlock(&sdmac->vc.lock); + vchan_unlock(&sdmac->vc); dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); - spin_lock(&sdmac->vc.lock); + vchan_lock(&sdmac->vc); if (error) sdmac->status = old_status; @@ -821,20 +831,21 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *data) { struct sdma_channel *sdmac = (struct sdma_channel *) data; + struct sdma_desc *desc = sdmac->desc; struct sdma_buffer_descriptor *bd; int i, error = 0; - sdmac->desc->chn_real_count = 0; + desc->chn_real_count = 0; /* * non loop mode. Iterate over all descriptors, collect * errors and call callback function */ - for (i = 0; i < sdmac->desc->num_bd; i++) { - bd = &sdmac->desc->bd[i]; + for (i = 0; i < desc->num_bd; i++) { + bd = &desc->bd[i]; if (bd->mode.status & (BD_DONE | BD_RROR)) error = -EIO; - sdmac->desc->chn_real_count += bd->mode.count; + desc->chn_real_count += bd->mode.count; } if (error) @@ -843,36 +854,83 @@ sdmac->status = DMA_COMPLETE; } -static irqreturn_t sdma_int_handler(int irq, void *dev_id) +static unsigned long sdma_do_channels(struct sdma_engine *sdma, + unsigned long stat) { - struct sdma_engine *sdma = dev_id; - unsigned long stat; + unsigned long mask = stat; - stat = readl_relaxed(sdma->regs + SDMA_H_INTR); - writel_relaxed(stat, sdma->regs + SDMA_H_INTR); - /* channel 0 is special and not handled here, see run_channel0() */ - stat &= ~1; - - while (stat) { - int channel = fls(stat) - 1; + while (mask) { + int channel = fls(mask) - 1; struct sdma_channel *sdmac = &sdma->channel[channel]; struct sdma_desc *desc; - spin_lock(&sdmac->vc.lock); + vchan_lock(&sdmac->vc); desc = sdmac->desc; if (desc) { + if (running_oob() && !vchan_oob_handled(&desc->vd)) + goto next; if (sdmac->flags & IMX_DMA_SG_LOOP) { sdma_update_channel_loop(sdmac); } else { mxc_sdma_handle_channel_normal(sdmac); + if (running_oob()) { + vchan_unlock(&sdmac->vc); + dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); + __clear_bit(channel, &stat); + goto next_unlocked; + } vchan_cookie_complete(&desc->vd); sdma_start_desc(sdmac); } } - - spin_unlock(&sdmac->vc.lock); __clear_bit(channel, &stat); + next: + vchan_unlock(&sdmac->vc); + next_unlocked: + __clear_bit(channel, &mask); } + + return stat; +} + +static irqreturn_t sdma_int_handler(int irq, void *dev_id) +{ + struct sdma_engine *sdma = dev_id; + unsigned long stat, flags __maybe_unused; + +#ifdef CONFIG_IMX_SDMA_OOB + if (running_oob()) { + stat = readl_relaxed(sdma->regs + SDMA_H_INTR); + writel_relaxed(stat, sdma->regs + SDMA_H_INTR); + /* + * Locking is only to guard against IRQ migration with + * a delayed in-band event running from a remote CPU + * after some IRQ routing changed the affinity of the + * out-of-band handler in the meantime. + */ + stat = sdma_do_channels(sdma, stat & ~1); + if (stat) { + raw_spin_lock(&sdma->oob_lock); + sdma->pending_stat |= stat; + raw_spin_unlock(&sdma->oob_lock); + /* Call us back from in-band context. */ + irq_post_inband(irq); + } + return IRQ_HANDLED; + } + + /* In-band IRQ context: stalled, but hard irqs are on. */ + raw_spin_lock_irqsave(&sdma->oob_lock, flags); + stat = sdma->pending_stat; + sdma->pending_stat = 0; + raw_spin_unlock_irqrestore(&sdma->oob_lock, flags); + sdma_do_channels(sdma, stat); +#else + stat = readl_relaxed(sdma->regs + SDMA_H_INTR); + writel_relaxed(stat, sdma->regs + SDMA_H_INTR); + /* channel 0 is special and not handled here, see run_channel0() */ + sdma_do_channels(sdma, stat & ~1); +#endif return IRQ_HANDLED; } @@ -1060,9 +1118,9 @@ */ usleep_range(1000, 2000); - spin_lock_irqsave(&sdmac->vc.lock, flags); + vchan_lock_irqsave(&sdmac->vc, flags); vchan_get_all_descriptors(&sdmac->vc, &head); - spin_unlock_irqrestore(&sdmac->vc.lock, flags); + vchan_unlock_irqrestore(&sdmac->vc, flags); vchan_dma_desc_free_list(&sdmac->vc, &head); } @@ -1071,17 +1129,18 @@ struct sdma_channel *sdmac = to_sdma_chan(chan); unsigned long flags; - spin_lock_irqsave(&sdmac->vc.lock, flags); + vchan_lock_irqsave(&sdmac->vc, flags); sdma_disable_channel(chan); if (sdmac->desc) { vchan_terminate_vdesc(&sdmac->desc->vd); sdmac->desc = NULL; + vchan_unlock_irqrestore(&sdmac->vc, flags); schedule_work(&sdmac->terminate_worker); + } else { + vchan_unlock_irqrestore(&sdmac->vc, flags); } - - spin_unlock_irqrestore(&sdmac->vc.lock, flags); return 0; } @@ -1441,6 +1500,15 @@ struct scatterlist *sg; struct sdma_desc *desc; + if (!sdma_oob_capable()) { + if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { + dev_err(sdma->dev, + "%s: out-of-band slave transfers disabled\n", + __func__); + return NULL; + } + } + sdma_config_write(chan, &sdmac->slave_config, direction); desc = sdma_transfer_init(sdmac, direction, sg_len); @@ -1492,7 +1560,8 @@ if (i + 1 == sg_len) { param |= BD_INTR; - param |= BD_LAST; + if (!sdma_oob_capable() || !(flags & DMA_OOB_PULSE)) + param |= BD_LAST; param &= ~BD_CONT; } @@ -1526,6 +1595,20 @@ struct sdma_desc *desc; dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); + + if (!sdma_oob_capable()) { + if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { + dev_err(sdma->dev, + "%s: out-of-band cyclic transfers disabled\n", + __func__); + return NULL; + } + } else if (flags & DMA_OOB_PULSE) { + dev_err(chan->device->dev, + "%s: no pulse mode with out-of-band cyclic transfers\n", + __func__); + return NULL; + } sdma_config_write(chan, &sdmac->slave_config, direction); @@ -1649,7 +1732,7 @@ if (ret == DMA_COMPLETE || !txstate) return ret; - spin_lock_irqsave(&sdmac->vc.lock, flags); + vchan_lock_irqsave(&sdmac->vc, flags); vd = vchan_find_desc(&sdmac->vc, cookie); if (vd) @@ -1667,7 +1750,7 @@ residue = 0; } - spin_unlock_irqrestore(&sdmac->vc.lock, flags); + vchan_unlock_irqrestore(&sdmac->vc, flags); dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, residue); @@ -1680,11 +1763,38 @@ struct sdma_channel *sdmac = to_sdma_chan(chan); unsigned long flags; - spin_lock_irqsave(&sdmac->vc.lock, flags); + vchan_lock_irqsave(&sdmac->vc, flags); if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc) sdma_start_desc(sdmac); - spin_unlock_irqrestore(&sdmac->vc.lock, flags); + vchan_unlock_irqrestore(&sdmac->vc, flags); } + +#ifdef CONFIG_IMX_SDMA_OOB +static int sdma_pulse_oob(struct dma_chan *chan) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + struct sdma_desc *desc = sdmac->desc; + unsigned long flags; + int n, ret = -EIO; + + vchan_lock_irqsave(&sdmac->vc, flags); + if (desc && vchan_oob_pulsed(&desc->vd)) { + for (n = 0; n < desc->num_bd - 1; n++) + desc->bd[n].mode.status |= BD_DONE; + desc->bd[n].mode.status |= BD_DONE|BD_WRAP; + sdma_enable_channel(sdmac->sdma, sdmac->channel); + ret = 0; + } + vchan_unlock_irqrestore(&sdmac->vc, flags); + + return ret; +} +#else +static int sdma_pulse_oob(struct dma_chan *chan) +{ + return -ENOTSUPP; +} +#endif #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 @@ -1920,6 +2030,9 @@ clk_disable(sdma->clk_ipg); clk_disable(sdma->clk_ahb); +#ifdef CONFIG_IMX_SDMA_OOB + raw_spin_lock_init(&sdma->oob_lock); +#endif return 0; err_dma_alloc: @@ -2035,8 +2148,9 @@ if (ret) goto err_clk; - ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", - sdma); + ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, + IS_ENABLED(CONFIG_IMX_SDMA_OOB) ? IRQF_OOB : 0, + "sdma", sdma); if (ret) goto err_irq; @@ -2055,6 +2169,7 @@ dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); + dma_cap_set(DMA_OOB, sdma->dma_device.cap_mask); dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask); INIT_LIST_HEAD(&sdma->dma_device.channels); @@ -2106,6 +2221,7 @@ sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; sdma->dma_device.device_issue_pending = sdma_issue_pending; + sdma->dma_device.device_pulse_oob = sdma_pulse_oob; sdma->dma_device.copy_align = 2; dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); @@ -2160,6 +2276,16 @@ } } + /* + * Keep the clocks enabled at any time if we plan to use the + * DMA from out-of-band context, bumping their refcount to + * keep them on until sdma_remove() is called eventually. + */ + if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) { + clk_enable(sdma->clk_ipg); + clk_enable(sdma->clk_ahb); + } + return 0; err_register: @@ -2178,6 +2304,11 @@ struct sdma_engine *sdma = platform_get_drvdata(pdev); int i; + if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) { + clk_disable(sdma->clk_ahb); + clk_disable(sdma->clk_ipg); + } + devm_free_irq(&pdev->dev, sdma->irq, sdma); dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); diff --git a/kernel/drivers/dma/virt-dma.c b/kernel/drivers/dma/virt-dma.c index a6f4265..89e0116 100644 --- a/kernel/drivers/dma/virt-dma.c +++ b/kernel/drivers/dma/virt-dma.c @@ -23,11 +23,11 @@ unsigned long flags; dma_cookie_t cookie; - spin_lock_irqsave(&vc->lock, flags); + vchan_lock_irqsave(vc, flags); cookie = dma_cookie_assign(tx); list_move_tail(&vd->node, &vc->desc_submitted); - spin_unlock_irqrestore(&vc->lock, flags); + vchan_unlock_irqrestore(vc, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", vc, vd, cookie); @@ -52,9 +52,9 @@ struct virt_dma_desc *vd = to_virt_desc(tx); unsigned long flags; - spin_lock_irqsave(&vc->lock, flags); + vchan_lock_irqsave(vc, flags); list_del(&vd->node); - spin_unlock_irqrestore(&vc->lock, flags); + vchan_unlock_irqrestore(vc, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n", vc, vd, vd->tx.cookie); @@ -87,7 +87,7 @@ struct dmaengine_desc_callback cb; LIST_HEAD(head); - spin_lock_irq(&vc->lock); + vchan_lock_irq(vc); list_splice_tail_init(&vc->desc_completed, &head); vd = vc->cyclic; if (vd) { @@ -96,7 +96,7 @@ } else { memset(&cb, 0, sizeof(cb)); } - spin_unlock_irq(&vc->lock); + vchan_unlock_irq(vc); dmaengine_desc_callback_invoke(&cb, &vd->tx_result); @@ -120,11 +120,119 @@ } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); +#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB + +static void inband_init_chan_lock(struct virt_dma_chan *vc) +{ + spin_lock_init(&vc->lock); +} + +static void inband_lock_chan(struct virt_dma_chan *vc) +{ + spin_lock(&vc->lock); +} + +static void inband_unlock_chan(struct virt_dma_chan *vc) +{ + spin_unlock(&vc->lock); +} + +static void inband_lock_irq_chan(struct virt_dma_chan *vc) +{ + spin_lock_irq(&vc->lock); +} + +static void inband_unlock_irq_chan(struct virt_dma_chan *vc) +{ + spin_unlock_irq(&vc->lock); +} + +static unsigned long inband_lock_irqsave_chan(struct virt_dma_chan *vc) +{ + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + + return flags; +} + +static void inband_unlock_irqrestore_chan(struct virt_dma_chan *vc, + unsigned long flags) +{ + spin_unlock_irqrestore(&vc->lock, flags); +} + +static struct virt_dma_lockops inband_lock_ops = { + .init = inband_init_chan_lock, + .lock = inband_lock_chan, + .unlock = inband_unlock_chan, + .lock_irq = inband_lock_irq_chan, + .unlock_irq = inband_unlock_irq_chan, + .lock_irqsave = inband_lock_irqsave_chan, + .unlock_irqrestore = inband_unlock_irqrestore_chan, +}; + +static void oob_init_chan_lock(struct virt_dma_chan *vc) +{ + raw_spin_lock_init(&vc->oob_lock); +} + +static void oob_lock_chan(struct virt_dma_chan *vc) +{ + raw_spin_lock(&vc->oob_lock); +} + +static void oob_unlock_chan(struct virt_dma_chan *vc) +{ + raw_spin_unlock(&vc->oob_lock); +} + +static void oob_lock_irq_chan(struct virt_dma_chan *vc) +{ + raw_spin_lock_irq(&vc->oob_lock); +} + +static void oob_unlock_irq_chan(struct virt_dma_chan *vc) +{ + raw_spin_unlock_irq(&vc->oob_lock); +} + +static unsigned long oob_lock_irqsave_chan(struct virt_dma_chan *vc) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vc->oob_lock, flags); + + return flags; +} + +static void oob_unlock_irqrestore_chan(struct virt_dma_chan *vc, + unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vc->oob_lock, flags); +} + +static struct virt_dma_lockops oob_lock_ops = { + .init = oob_init_chan_lock, + .lock = oob_lock_chan, + .unlock = oob_unlock_chan, + .lock_irq = oob_lock_irq_chan, + .unlock_irq = oob_unlock_irq_chan, + .lock_irqsave = oob_lock_irqsave_chan, + .unlock_irqrestore = oob_unlock_irqrestore_chan, +}; + +#endif + void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) { dma_cookie_init(&vc->chan); - spin_lock_init(&vc->lock); +#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB + vc->lock_ops = test_bit(DMA_OOB, dmadev->cap_mask.bits) ? + &oob_lock_ops : &inband_lock_ops; +#endif + vchan_lock_init(vc); INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); diff --git a/kernel/drivers/dma/virt-dma.h b/kernel/drivers/dma/virt-dma.h index e9f5250..5e01bc8 100644 --- a/kernel/drivers/dma/virt-dma.h +++ b/kernel/drivers/dma/virt-dma.h @@ -19,12 +19,22 @@ struct list_head node; }; +struct virt_dma_lockops; + struct virt_dma_chan { struct dma_chan chan; struct tasklet_struct task; void (*desc_free)(struct virt_dma_desc *); +#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB + struct virt_dma_lockops *lock_ops; + union { + spinlock_t lock; + hard_spinlock_t oob_lock; + }; +#else spinlock_t lock; +#endif /* protected by vc.lock */ struct list_head desc_allocated; @@ -40,6 +50,107 @@ { return container_of(chan, struct virt_dma_chan, chan); } + +#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB + +struct virt_dma_lockops { + void (*init)(struct virt_dma_chan *vc); + void (*lock)(struct virt_dma_chan *vc); + void (*unlock)(struct virt_dma_chan *vc); + void (*lock_irq)(struct virt_dma_chan *vc); + void (*unlock_irq)(struct virt_dma_chan *vc); + unsigned long (*lock_irqsave)(struct virt_dma_chan *vc); + void (*unlock_irqrestore)(struct virt_dma_chan *vc, + unsigned long flags); +}; + +static inline void vchan_lock_init(struct virt_dma_chan *vc) +{ + vc->lock_ops->init(vc); +} + +static inline void vchan_lock(struct virt_dma_chan *vc) +{ + vc->lock_ops->lock(vc); +} + +static inline void vchan_unlock(struct virt_dma_chan *vc) +{ + vc->lock_ops->unlock(vc); +} + +static inline void vchan_lock_irq(struct virt_dma_chan *vc) +{ + vc->lock_ops->lock_irq(vc); +} + +static inline void vchan_unlock_irq(struct virt_dma_chan *vc) +{ + vc->lock_ops->unlock_irq(vc); +} + +static inline +unsigned long __vchan_lock_irqsave(struct virt_dma_chan *vc) +{ + return vc->lock_ops->lock_irqsave(vc); +} + +#define vchan_lock_irqsave(__vc, __flags) \ + do { \ + (__flags) = __vchan_lock_irqsave(__vc); \ + } while (0) + +static inline +void vchan_unlock_irqrestore(struct virt_dma_chan *vc, + unsigned long flags) +{ + vc->lock_ops->unlock_irqrestore(vc, flags); +} + +static inline bool vchan_oob_handled(struct virt_dma_desc *vd) +{ + return !!(vd->tx.flags & DMA_OOB_INTERRUPT); +} + +static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd) +{ + return !!(vd->tx.flags & DMA_OOB_PULSE); +} + +#else + +#define vchan_lock_init(__vc) \ + spin_lock_init(&(__vc)->lock) + +#define vchan_lock(__vc) \ + spin_lock(&(__vc)->lock) + +#define vchan_unlock(__vc) \ + spin_unlock(&(__vc)->lock) + +#define vchan_lock_irq(__vc) \ + spin_lock_irq(&(__vc)->lock) + +#define vchan_unlock_irq(__vc) \ + spin_unlock_irq(&(__vc)->lock) + +#define vchan_lock_irqsave(__vc, __flags) \ + spin_lock_irqsave(&(__vc)->lock, __flags) + +#define vchan_unlock_irqrestore(__vc, __flags) \ + spin_unlock_irqrestore(&(__vc)->lock, __flags) + +static inline bool vchan_oob_handled(struct virt_dma_desc *vd) +{ + return false; +} + +static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd) +{ + return false; +} + +#endif /* !CONFIG_DMA_VIRTUAL_CHANNELS_OOB */ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); @@ -66,9 +177,9 @@ vd->tx_result.result = DMA_TRANS_NOERROR; vd->tx_result.residue = 0; - spin_lock_irqsave(&vc->lock, flags); + vchan_lock_irqsave(vc, flags); list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); + vchan_unlock_irqrestore(vc, flags); return &vd->tx; } @@ -116,9 +227,9 @@ if (dmaengine_desc_test_reuse(&vd->tx)) { unsigned long flags; - spin_lock_irqsave(&vc->lock, flags); + vchan_lock_irqsave(vc, flags); list_add(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); + vchan_unlock_irqrestore(vc, flags); } else { vc->desc_free(vd); } @@ -190,11 +301,11 @@ unsigned long flags; LIST_HEAD(head); - spin_lock_irqsave(&vc->lock, flags); + vchan_lock_irqsave(vc, flags); vchan_get_all_descriptors(vc, &head); list_for_each_entry(vd, &head, node) dmaengine_desc_clear_reuse(&vd->tx); - spin_unlock_irqrestore(&vc->lock, flags); + vchan_unlock_irqrestore(vc, flags); vchan_dma_desc_free_list(vc, &head); } @@ -215,11 +326,11 @@ tasklet_kill(&vc->task); - spin_lock_irqsave(&vc->lock, flags); + vchan_lock_irqsave(vc, flags); list_splice_tail_init(&vc->desc_terminated, &head); - spin_unlock_irqrestore(&vc->lock, flags); + vchan_unlock_irqrestore(vc, flags); vchan_dma_desc_free_list(vc, &head); } diff --git a/kernel/drivers/gpio/gpio-davinci.c b/kernel/drivers/gpio/gpio-davinci.c index 6f21385..b7ae10d 100644 --- a/kernel/drivers/gpio/gpio-davinci.c +++ b/kernel/drivers/gpio/gpio-davinci.c @@ -326,7 +326,7 @@ .irq_enable = gpio_irq_enable, .irq_disable = gpio_irq_disable, .irq_set_type = gpio_irq_type, - .flags = IRQCHIP_SET_TYPE_MASKED, + .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_PIPELINE_SAFE, }; static void gpio_irq_handler(struct irq_desc *desc) diff --git a/kernel/drivers/gpio/gpio-mxc.c b/kernel/drivers/gpio/gpio-mxc.c index ba6ed2a..96d523f 100644 --- a/kernel/drivers/gpio/gpio-mxc.c +++ b/kernel/drivers/gpio/gpio-mxc.c @@ -361,7 +361,8 @@ ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_set_irq_type; ct->chip.irq_set_wake = gpio_set_wake_irq; - ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND; + ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE; ct->regs.ack = GPIO_ISR; ct->regs.mask = GPIO_IMR; diff --git a/kernel/drivers/gpio/gpio-omap.c b/kernel/drivers/gpio/gpio-omap.c index a7e8ed5..3316893 100644 --- a/kernel/drivers/gpio/gpio-omap.c +++ b/kernel/drivers/gpio/gpio-omap.c @@ -55,7 +55,7 @@ u32 saved_datain; u32 level_mask; u32 toggle_mask; - raw_spinlock_t lock; + hard_spinlock_t lock; raw_spinlock_t wa_lock; struct gpio_chip chip; struct clk *dbck; @@ -1058,7 +1058,7 @@ ret = devm_request_irq(bank->chip.parent, bank->irq, omap_gpio_irq_handler, - 0, dev_name(bank->chip.parent), bank); + IRQF_OOB, dev_name(bank->chip.parent), bank); if (ret) gpiochip_remove(&bank->chip); @@ -1406,7 +1406,7 @@ irqc->irq_bus_lock = omap_gpio_irq_bus_lock, irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, irqc->name = dev_name(&pdev->dev); - irqc->flags = IRQCHIP_MASK_ON_SUSPEND; + irqc->flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE; irqc->parent_device = dev; bank->irq = platform_get_irq(pdev, 0); diff --git a/kernel/drivers/gpio/gpio-pca953x.c b/kernel/drivers/gpio/gpio-pca953x.c index 3ad1a9e..cad58d0 100644 --- a/kernel/drivers/gpio/gpio-pca953x.c +++ b/kernel/drivers/gpio/gpio-pca953x.c @@ -855,6 +855,7 @@ irq_chip->irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock; irq_chip->irq_set_type = pca953x_irq_set_type; irq_chip->irq_shutdown = pca953x_irq_shutdown; + irq_chip->flags |= IRQCHIP_PIPELINE_SAFE; girq = &chip->gpio_chip.irq; girq->chip = irq_chip; diff --git a/kernel/drivers/gpio/gpio-pl061.c b/kernel/drivers/gpio/gpio-pl061.c index f1b53dd..c890b9c 100644 --- a/kernel/drivers/gpio/gpio-pl061.c +++ b/kernel/drivers/gpio/gpio-pl061.c @@ -48,7 +48,7 @@ #endif struct pl061 { - raw_spinlock_t lock; + hard_spinlock_t lock; void __iomem *base; struct gpio_chip gc; @@ -321,6 +321,7 @@ pl061->irq_chip.irq_unmask = pl061_irq_unmask; pl061->irq_chip.irq_set_type = pl061_irq_type; pl061->irq_chip.irq_set_wake = pl061_irq_set_wake; + pl061->irq_chip.flags = IRQCHIP_PIPELINE_SAFE; writeb(0, pl061->base + GPIOIE); /* disable irqs */ irq = adev->irq[0]; diff --git a/kernel/drivers/gpio/gpio-xilinx.c b/kernel/drivers/gpio/gpio-xilinx.c index 67f9f82..33f03ac 100644 --- a/kernel/drivers/gpio/gpio-xilinx.c +++ b/kernel/drivers/gpio/gpio-xilinx.c @@ -45,7 +45,7 @@ unsigned int gpio_width[2]; u32 gpio_state[2]; u32 gpio_dir[2]; - spinlock_t gpio_lock[2]; + hard_spinlock_t gpio_lock[2]; }; static inline int xgpio_index(struct xgpio_instance *chip, int gpio) @@ -110,7 +110,7 @@ int index = xgpio_index(chip, gpio); int offset = xgpio_offset(chip, gpio); - spin_lock_irqsave(&chip->gpio_lock[index], flags); + raw_spin_lock_irqsave(&chip->gpio_lock[index], flags); /* Write to GPIO signal and set its direction to output */ if (val) @@ -121,7 +121,7 @@ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + xgpio_regoffset(chip, gpio), chip->gpio_state[index]); - spin_unlock_irqrestore(&chip->gpio_lock[index], flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags); } /** @@ -141,7 +141,7 @@ int index = xgpio_index(chip, 0); int offset, i; - spin_lock_irqsave(&chip->gpio_lock[index], flags); + raw_spin_lock_irqsave(&chip->gpio_lock[index], flags); /* Write to GPIO signals */ for (i = 0; i < gc->ngpio; i++) { @@ -152,9 +152,9 @@ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]); - spin_unlock_irqrestore(&chip->gpio_lock[index], flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags); index = xgpio_index(chip, i); - spin_lock_irqsave(&chip->gpio_lock[index], flags); + raw_spin_lock_irqsave(&chip->gpio_lock[index], flags); } if (__test_and_clear_bit(i, mask)) { offset = xgpio_offset(chip, i); @@ -168,7 +168,7 @@ xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]); - spin_unlock_irqrestore(&chip->gpio_lock[index], flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags); } /** @@ -187,14 +187,14 @@ int index = xgpio_index(chip, gpio); int offset = xgpio_offset(chip, gpio); - spin_lock_irqsave(&chip->gpio_lock[index], flags); + raw_spin_lock_irqsave(&chip->gpio_lock[index], flags); /* Set the GPIO bit in shadow register and set direction as input */ chip->gpio_dir[index] |= BIT(offset); xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET + xgpio_regoffset(chip, gpio), chip->gpio_dir[index]); - spin_unlock_irqrestore(&chip->gpio_lock[index], flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags); return 0; } @@ -218,7 +218,7 @@ int index = xgpio_index(chip, gpio); int offset = xgpio_offset(chip, gpio); - spin_lock_irqsave(&chip->gpio_lock[index], flags); + raw_spin_lock_irqsave(&chip->gpio_lock[index], flags); /* Write state of GPIO signal */ if (val) @@ -233,7 +233,7 @@ xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET + xgpio_regoffset(chip, gpio), chip->gpio_dir[index]); - spin_unlock_irqrestore(&chip->gpio_lock[index], flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags); return 0; } @@ -291,7 +291,7 @@ if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0])) chip->gpio_width[0] = 32; - spin_lock_init(&chip->gpio_lock[0]); + raw_spin_lock_init(&chip->gpio_lock[0]); if (of_property_read_u32(np, "xlnx,is-dual", &is_dual)) is_dual = 0; @@ -314,7 +314,7 @@ &chip->gpio_width[1])) chip->gpio_width[1] = 32; - spin_lock_init(&chip->gpio_lock[1]); + raw_spin_lock_init(&chip->gpio_lock[1]); } chip->gc.base = -1; diff --git a/kernel/drivers/gpio/gpio-zynq.c b/kernel/drivers/gpio/gpio-zynq.c index c288a75..28c0280 100644 --- a/kernel/drivers/gpio/gpio-zynq.c +++ b/kernel/drivers/gpio/gpio-zynq.c @@ -601,7 +601,7 @@ .irq_request_resources = zynq_gpio_irq_reqres, .irq_release_resources = zynq_gpio_irq_relres, .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | - IRQCHIP_MASK_ON_SUSPEND, + IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE, }; static struct irq_chip zynq_gpio_edge_irqchip = { @@ -614,7 +614,7 @@ .irq_set_wake = zynq_gpio_set_wake, .irq_request_resources = zynq_gpio_irq_reqres, .irq_release_resources = zynq_gpio_irq_relres, - .flags = IRQCHIP_MASK_ON_SUSPEND, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE, }; static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, diff --git a/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c index 96f3908..e82dda7 100644 --- a/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +++ b/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c @@ -94,6 +94,7 @@ .name = "dpu_mdss", .irq_mask = dpu_mdss_irq_mask, .irq_unmask = dpu_mdss_irq_unmask, + .flags = IRQCHIP_PIPELINE_SAFE, }; static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key; diff --git a/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c index 09bd46a..781d701 100644 --- a/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c +++ b/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c @@ -91,6 +91,7 @@ .name = "mdss", .irq_mask = mdss_hw_mask_irq, .irq_unmask = mdss_hw_unmask_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, @@ -254,7 +255,7 @@ } ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), - mdss_irq, 0, "mdss_isr", mdp5_mdss); + mdss_irq, IRQF_OOB, "mdss_isr", mdp5_mdss); if (ret) { DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret); goto fail_irq; diff --git a/kernel/drivers/gpu/ipu-v3/ipu-common.c b/kernel/drivers/gpu/ipu-v3/ipu-common.c index d166ee2..6f4db39 100644 --- a/kernel/drivers/gpu/ipu-v3/ipu-common.c +++ b/kernel/drivers/gpu/ipu-v3/ipu-common.c @@ -1238,6 +1238,7 @@ ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; + ct->chip.flags = IRQCHIP_PIPELINE_SAFE; ct->regs.ack = IPU_INT_STAT(i / 32); ct->regs.mask = IPU_INT_CTRL(i / 32); } diff --git a/kernel/drivers/iio/industrialio-trigger.c b/kernel/drivers/iio/industrialio-trigger.c index 6bcc562..e9172bb 100644 --- a/kernel/drivers/iio/industrialio-trigger.c +++ b/kernel/drivers/iio/industrialio-trigger.c @@ -544,6 +544,7 @@ trig->subirq_chip.name = trig->name; trig->subirq_chip.irq_mask = &iio_trig_subirqmask; trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; + trig->subirq_chip.flags = IRQCHIP_PIPELINE_SAFE; for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); irq_set_handler(trig->subirq_base + i, &handle_simple_irq); diff --git a/kernel/drivers/irqchip/exynos-combiner.c b/kernel/drivers/irqchip/exynos-combiner.c index 0b85d9a..504bdd0 100644 --- a/kernel/drivers/irqchip/exynos-combiner.c +++ b/kernel/drivers/irqchip/exynos-combiner.c @@ -24,7 +24,7 @@ #define IRQ_IN_COMBINER 8 -static DEFINE_SPINLOCK(irq_controller_lock); +static DEFINE_HARD_SPINLOCK(irq_controller_lock); struct combiner_chip_data { unsigned int hwirq_offset; @@ -71,9 +71,9 @@ chained_irq_enter(chip, desc); - spin_lock(&irq_controller_lock); + raw_spin_lock(&irq_controller_lock); status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS); - spin_unlock(&irq_controller_lock); + raw_spin_unlock(&irq_controller_lock); status &= chip_data->irq_mask; if (status == 0) @@ -113,6 +113,7 @@ #ifdef CONFIG_SMP .irq_set_affinity = combiner_set_affinity, #endif + .flags = IRQCHIP_PIPELINE_SAFE, }; static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, diff --git a/kernel/drivers/irqchip/irq-bcm2835.c b/kernel/drivers/irqchip/irq-bcm2835.c index a1e004a..bbf1cdb 100644 --- a/kernel/drivers/irqchip/irq-bcm2835.c +++ b/kernel/drivers/irqchip/irq-bcm2835.c @@ -102,7 +102,8 @@ static struct irq_chip armctrl_chip = { .name = "ARMCTRL-level", .irq_mask = armctrl_mask_irq, - .irq_unmask = armctrl_unmask_irq + .irq_unmask = armctrl_unmask_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr, diff --git a/kernel/drivers/irqchip/irq-bcm2836.c b/kernel/drivers/irqchip/irq-bcm2836.c index cbc7c74..89b47fc 100644 --- a/kernel/drivers/irqchip/irq-bcm2836.c +++ b/kernel/drivers/irqchip/irq-bcm2836.c @@ -58,6 +58,7 @@ .name = "bcm2836-timer", .irq_mask = bcm2836_arm_irqchip_mask_timer_irq, .irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d) @@ -74,6 +75,7 @@ .name = "bcm2836-pmu", .irq_mask = bcm2836_arm_irqchip_mask_pmu_irq, .irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d) @@ -88,6 +90,7 @@ .name = "bcm2836-gpu", .irq_mask = bcm2836_arm_irqchip_mask_gpu_irq, .irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d) diff --git a/kernel/drivers/irqchip/irq-gic-v2m.c b/kernel/drivers/irqchip/irq-gic-v2m.c index fbec07d..c3f19ef 100644 --- a/kernel/drivers/irqchip/irq-gic-v2m.c +++ b/kernel/drivers/irqchip/irq-gic-v2m.c @@ -89,6 +89,7 @@ .irq_unmask = gicv2m_unmask_msi_irq, .irq_eoi = irq_chip_eoi_parent, .irq_write_msi_msg = pci_msi_domain_write_msg, + .flags = IRQCHIP_PIPELINE_SAFE, }; static struct msi_domain_info gicv2m_msi_domain_info = { @@ -130,6 +131,7 @@ .irq_eoi = irq_chip_eoi_parent, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_compose_msi_msg = gicv2m_compose_msi_msg, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, @@ -252,6 +254,7 @@ static struct irq_chip gicv2m_pmsi_irq_chip = { .name = "pMSI", + .flags = IRQCHIP_PIPELINE_SAFE, }; static struct msi_domain_ops gicv2m_pmsi_ops = { diff --git a/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c index 634263d..0b4b81a 100644 --- a/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c +++ b/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c @@ -22,7 +22,8 @@ .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = irq_chip_eoi_parent, - .irq_set_affinity = msi_domain_set_affinity + .irq_set_affinity = msi_domain_set_affinity, + .flags = IRQCHIP_PIPELINE_SAFE, }; static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain, diff --git a/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 87711e0..a148d0d 100644 --- a/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -29,6 +29,7 @@ .irq_mask = its_mask_msi_irq, .irq_eoi = irq_chip_eoi_parent, .irq_write_msi_msg = pci_msi_domain_write_msg, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data) diff --git a/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c index daa6d50..ae29443 100644 --- a/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -12,6 +12,7 @@ static struct irq_chip its_pmsi_irq_chip = { .name = "ITS-pMSI", + .flags = IRQCHIP_PIPELINE_SAFE, }; static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, diff --git a/kernel/drivers/irqchip/irq-gic-v3-mbi.c b/kernel/drivers/irqchip/irq-gic-v3-mbi.c index e81e89a..b213cd7 100644 --- a/kernel/drivers/irqchip/irq-gic-v3-mbi.c +++ b/kernel/drivers/irqchip/irq-gic-v3-mbi.c @@ -216,7 +216,7 @@ .name = "pMSI", .irq_set_type = irq_chip_set_type_parent, .irq_compose_msi_msg = mbi_compose_mbi_msg, - .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, + .flags = IRQCHIP_SUPPORTS_LEVEL_MSI | IRQCHIP_PIPELINE_SAFE, }; static struct msi_domain_ops mbi_pmsi_ops = { diff --git a/kernel/drivers/irqchip/irq-gic-v3.c b/kernel/drivers/irqchip/irq-gic-v3.c index 6db1dbc..128a69f 100644 --- a/kernel/drivers/irqchip/irq-gic-v3.c +++ b/kernel/drivers/irqchip/irq-gic-v3.c @@ -1356,7 +1356,8 @@ .ipi_send_mask = gic_ipi_send_mask, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_MASK_ON_SUSPEND, + IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_PIPELINE_SAFE, }; static struct irq_chip gic_eoimode1_chip = { @@ -1375,7 +1376,8 @@ .ipi_send_mask = gic_ipi_send_mask, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_MASK_ON_SUSPEND, + IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_PIPELINE_SAFE, }; static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, diff --git a/kernel/drivers/irqchip/irq-gic.c b/kernel/drivers/irqchip/irq-gic.c index cd3f72b..4768411 100644 --- a/kernel/drivers/irqchip/irq-gic.c +++ b/kernel/drivers/irqchip/irq-gic.c @@ -91,7 +91,7 @@ #ifdef CONFIG_BL_SWITCHER -static DEFINE_RAW_SPINLOCK(cpu_map_lock); +static DEFINE_HARD_SPINLOCK(cpu_map_lock); #define gic_lock_irqsave(f) \ raw_spin_lock_irqsave(&cpu_map_lock, (f)) @@ -449,7 +449,8 @@ .irq_set_irqchip_state = gic_irq_set_irqchip_state, .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | - IRQCHIP_MASK_ON_SUSPEND, + IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_PIPELINE_SAFE, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) diff --git a/kernel/drivers/irqchip/irq-imx-irqsteer.c b/kernel/drivers/irqchip/irq-imx-irqsteer.c index 1edf769..54a576a 100644 --- a/kernel/drivers/irqchip/irq-imx-irqsteer.c +++ b/kernel/drivers/irqchip/irq-imx-irqsteer.c @@ -29,7 +29,7 @@ struct clk *ipg_clk; int irq[CHAN_MAX_OUTPUT_INT]; int irq_count; - raw_spinlock_t lock; + hard_spinlock_t lock; int reg_num; int channel; struct irq_domain *domain; @@ -74,6 +74,7 @@ .name = "irqsteer", .irq_mask = imx_irqsteer_irq_mask, .irq_unmask = imx_irqsteer_irq_unmask, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq, diff --git a/kernel/drivers/irqchip/irq-omap-intc.c b/kernel/drivers/irqchip/irq-omap-intc.c index d360a6e..a864744 100644 --- a/kernel/drivers/irqchip/irq-omap-intc.c +++ b/kernel/drivers/irqchip/irq-omap-intc.c @@ -211,7 +211,7 @@ ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_unmask = irq_gc_unmask_enable_reg; - ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; + ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE; ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i; ct->regs.disable = INTC_MIR_SET0 + 32 * i; diff --git a/kernel/drivers/irqchip/irq-sun4i.c b/kernel/drivers/irqchip/irq-sun4i.c index fb78d66..4f5c42b 100644 --- a/kernel/drivers/irqchip/irq-sun4i.c +++ b/kernel/drivers/irqchip/irq-sun4i.c @@ -87,7 +87,7 @@ .irq_eoi = sun4i_irq_ack, .irq_mask = sun4i_irq_mask, .irq_unmask = sun4i_irq_unmask, - .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED, + .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | IRQCHIP_PIPELINE_SAFE, }; static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, diff --git a/kernel/drivers/irqchip/irq-sunxi-nmi.c b/kernel/drivers/irqchip/irq-sunxi-nmi.c index a412b5d..ada16f4 100644 --- a/kernel/drivers/irqchip/irq-sunxi-nmi.c +++ b/kernel/drivers/irqchip/irq-sunxi-nmi.c @@ -200,7 +200,9 @@ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; - gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED; + gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | + IRQCHIP_EOI_IF_HANDLED | + IRQCHIP_PIPELINE_SAFE; gc->chip_types[0].regs.ack = reg_offs->pend; gc->chip_types[0].regs.mask = reg_offs->enable; gc->chip_types[0].regs.type = reg_offs->ctrl; diff --git a/kernel/drivers/irqchip/irq-ti-sci-inta.c b/kernel/drivers/irqchip/irq-ti-sci-inta.c index 532d0ae..eec751b 100644 --- a/kernel/drivers/irqchip/irq-ti-sci-inta.c +++ b/kernel/drivers/irqchip/irq-ti-sci-inta.c @@ -262,6 +262,7 @@ list_add_tail(&vint_desc->list, &inta->vint_list); irq_set_chained_handler_and_data(vint_desc->parent_virq, ti_sci_inta_irq_handler, vint_desc); + irq_switch_oob(vint_desc->parent_virq, true); return vint_desc; free_vint_desc: @@ -543,6 +544,7 @@ .irq_set_affinity = ti_sci_inta_set_affinity, .irq_request_resources = ti_sci_inta_request_resources, .irq_release_resources = ti_sci_inta_release_resources, + .flags = IRQCHIP_PIPELINE_SAFE, }; /** diff --git a/kernel/drivers/memory/omap-gpmc.c b/kernel/drivers/memory/omap-gpmc.c index f80c2ea..ebcda04 100644 --- a/kernel/drivers/memory/omap-gpmc.c +++ b/kernel/drivers/memory/omap-gpmc.c @@ -1405,6 +1405,7 @@ gpmc->irq_chip.irq_mask = gpmc_irq_mask; gpmc->irq_chip.irq_unmask = gpmc_irq_unmask; gpmc->irq_chip.irq_set_type = gpmc_irq_set_type; + gpmc->irq_chip.flags = IRQCHIP_PIPELINE_SAFE; gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node, gpmc->nirqs, @@ -1415,7 +1416,7 @@ return -ENODEV; } - rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc); + rc = request_irq(gpmc->irq, gpmc_handle_irq, IRQF_OOB, "gpmc", gpmc); if (rc) { dev_err(gpmc->dev, "failed to request irq %d: %d\n", gpmc->irq, rc); diff --git a/kernel/drivers/mfd/tps65217.c b/kernel/drivers/mfd/tps65217.c index 2d9c282..7987fea 100644 --- a/kernel/drivers/mfd/tps65217.c +++ b/kernel/drivers/mfd/tps65217.c @@ -84,6 +84,7 @@ .irq_bus_sync_unlock = tps65217_irq_sync_unlock, .irq_enable = tps65217_irq_enable, .irq_disable = tps65217_irq_disable, + .flags = IRQCHIP_PIPELINE_SAFE, }; static struct mfd_cell tps65217s[] = { diff --git a/kernel/drivers/misc/Makefile b/kernel/drivers/misc/Makefile index eb485c3..d2adfbd 100644 --- a/kernel/drivers/misc/Makefile +++ b/kernel/drivers/misc/Makefile @@ -67,3 +67,4 @@ obj-$(CONFIG_KHADAS_MCU) += khadas-mcu.o obj-y += nkio/ obj-y += nkmcu/ +obj-y += atemsys-main/ diff --git a/kernel/drivers/misc/atemsys-main/COPYING b/kernel/drivers/misc/atemsys-main/COPYING new file mode 100644 index 0000000..d159169 --- /dev/null +++ b/kernel/drivers/misc/atemsys-main/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + <signature of Ty Coon>, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/kernel/drivers/misc/atemsys-main/Makefile b/kernel/drivers/misc/atemsys-main/Makefile new file mode 100644 index 0000000..28e2cc9 --- /dev/null +++ b/kernel/drivers/misc/atemsys-main/Makefile @@ -0,0 +1,34 @@ +# atemsys.ko: Provides usermode access to: +# +# - PCI configuration space +# - Device IO memory +# - Contiguous DMA memory +# - Single device interrupt +# +# Copyright (c) 2009 - 2018 acontis technologies GmbH, Ravensburg, Germany <info@acontis.com> +# All rights reserved. +# +# Author: K. Olbrich <k.olbrich@acontis.com> +# +# To compile and load the atemsys driver +# +# make modules +# [ -c /dev/atemsys ] || sudo mknod /dev/atemsys c 101 0 +# sudo insmod atemsys.ko + +CONFIG_MODULE_SIG=n + +KERNELDIR ?= /lib/modules/$(shell uname -r)/build + +obj-m += atemsys.o + +all: modules + +modules: + $(MAKE) -C $(KERNELDIR) M=$(shell pwd) modules + +modules_install: + $(MAKE) -C $(KERNELDIR) M=$(shell pwd) modules_install + +clean: + $(MAKE) -C $(KERNELDIR) M=$(shell pwd) modules clean diff --git a/kernel/drivers/misc/atemsys-main/atemsys.c b/kernel/drivers/misc/atemsys-main/atemsys.c new file mode 100644 index 0000000..31dc919 --- /dev/null +++ b/kernel/drivers/misc/atemsys-main/atemsys.c @@ -0,0 +1,4885 @@ +/*----------------------------------------------------------------------------- + * atemsys.c + * Copyright (c) 2009 - 2020 acontis technologies GmbH, Ravensburg, Germany + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Response Paul Bussmann + * Description Provides usermode access to: + * - PCI configuration space + * - Device IO memory + * - Contiguous DMA memory + * - Single device interrupt + * + * + * The driver should be used in the following way: + * + * - Make sure this driver's device node is present. I.e. call "mknod /dev/atemsys c 101 0" + * + * - open() + * Open driver (There can be more then one file descriptor active in parallel). + * + * - close() + * Close driver. Free resources, if any were allocated. + * + * - ioctl(ATEMSYS_IOCTL_PCI_FIND_DEVICE) + * Scan for PCI Devices. + * Input: VendorID, DeviceID, InstanceNo + * Output: BusNo, DevNo, FuncNo + * + * - ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) + * Configures PCI device. This ioctl pins the given PCI device to the current filedescriptor. + * Input: BusNo, DevNo, FuncNo + * Output: Physical IO base address, IO area length, IRQ number + * The device must be released explicitly in order to configure the next device. The ioctl gets + * errno EBUSY if the device is in use by another device driver. + * + * - ioctl(ATEMSYS_IOCTL_PCI_RELEASE_DEVICE) + * Release PCI device and free resources assigned to PCI device (interrupt, DMA memory, ...). + * + * - mmap(0, dwSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, fd, 0); + * Allocates and maps DMA memory of size dwSize. Note that the last parameter (offset) must be 0. + * Input: Length in byte + * Output: Pointer to the allocated memory and DMA physical address. On success this address is + * written into the first 4 bytes of the allocated memory. + * + * - mmap(0, IOphysSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, fd, IOphysAddr); + * Maps IO memory of size IOphysSize. + * PCI device: + * First call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE). The IOphysAddr and IOphysSize + * parameter must corespond with the base IO address and size returned by + * ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE), or the ioctl will fail. + * Non-PCI device: + * Don't call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) before and just pass + * IOphysAddr and IOphysSize. There are no checks done. + * Input: Phys.IO base address, IO area length in byte + * Output: Pointer to the mapped IO memory. + * The user should call dev_munmap() if the requested DMA memory is not needed anymore. In any cases + * the allocated / mapped memory is released / unmapped if the module is unloaded. + * + * - ioctl(ATEMSYS_IOCTL_INT_CONNECT) + * Connect an ISR to the device's interrupt. + * If the parameter is USE_PCI_INT, then the IRQ is taken from the selected PCI device. + * So in this case you have to call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) first, or it will fail. + * Input: IRQ-Number or USE_PCI_INT + * Output: none + * The device interrupt is active if this ioctl succeeds. The caller should do a read() on the file + * descriptor. The read call unblocks if an interrupt is received. If the read is unblocked, the + * interrupt is disabled on the (A)PIC and the caller must acknowledge the interrupt on the device + * (write to mmaped IO register). If the next read() is executed, the interrupt is enabled again + * on the (A)PIC. So a missing interrupt acknowledge will held the INT line active and interrupt + * trashing will happen (ISR is called again, read() unblocks, ...). + * Note that this ioctl will fail with errno EPERM if the interrupt line is shared. + * PCI device: + * The ioctl will try to use Message Signaled Interrupts (MSI) if supported + * by the PCI device. By definition, interrupts are never shared with MSI and MSI are mandatory + * for PCI-Express :). + * + * - ioctl(ATEMSYS_IOCTL_INT_DISCONNECT) + * Disconnect from device's interrupt. + * + * - ioctl(ATEMSYS_IOCTL_INT_INFO) + * Query used interrupt number. + * + * - read() + * see ioctl(ATEMSYS_IOCTL_INT_CONNECT) + * + * + * Changes see atemsys.h + * + *----------------------------------------------------------------------------*/ + +#define ATEMSYS_C + +#include <linux/module.h> +#include "atemsys.h" +#include <linux/pci.h> +#include <linux/platform_device.h> + +#if !(defined NO_IRQ) && (defined __aarch64__) +#define NO_IRQ ((unsigned int)(-1)) +#endif + +#if (defined CONFIG_XENO_COBALT) +#include <rtdm/driver.h> +#else +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/smp.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,00)) +#include <linux/sched/signal.h> +#endif +#include <linux/irq.h> +#include <linux/list.h> +#if (defined CONFIG_OF) +#include <linux/of_device.h> +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) +#include <linux/uaccess.h> +#else +#include <asm/uaccess.h> +#endif + +#include <asm/current.h> +#include <linux/compat.h> +#include <linux/slab.h> +#include <linux/device.h> +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0)) +#include <linux/dma-direct.h> +#endif + +#if (defined CONFIG_DTC) +#include <linux/of.h> +#include <linux/of_irq.h> +#endif /* CONFIG_DTC */ +#endif /* CONFIG_XENO_COBALT */ + +#if ((defined CONFIG_OF) \ + && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) /* not tested */)) +#define INCLUDE_ATEMSYS_DT_DRIVER 1 +#include <linux/etherdevice.h> +#include <linux/clk.h> +#include <linux/phy.h> +#include <linux/clk/clk-conf.h> +#include <linux/pinctrl/consumer.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/of_mdio.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/wait.h> +#include <asm/param.h> +#include <linux/of_gpio.h> +#include <linux/reset.h> +#endif +#if ((defined CONFIG_PCI) \ + && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) /* not tested */)) +#define INCLUDE_ATEMSYS_PCI_DRIVER 1 +#include <linux/aer.h> +#endif + +#if !(defined HAVE_IRQ_TO_DESC) && !(defined CONFIG_HAVE_DOVETAIL) && !(defined CONFIG_IRQ_PIPELINE) + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,1)) + #define INCLUDE_IRQ_TO_DESC + #endif +#else + #if HAVE_IRQ_TO_DESC + #define INCLUDE_IRQ_TO_DESC + #endif +#endif + +/* legacy support */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#define wait_queue_entry_t wait_queue_t +#endif + +#ifndef VM_RESERVED +#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP) +#endif + + +/* define this if IO memory should also be mapped into the kernel (for debugging only) */ +#undef DEBUG_IOREMAP + +MODULE_AUTHOR("acontis technologies GmbH <info@acontis.com>"); +MODULE_DESCRIPTION("Generic usermode PCI driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATEMSYS_VERSION_STR); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +#error "At least kernel version 2.6.18 is needed to compile!" +#endif + +static char* AllowedPciDevices = "PCI_ANY_ID"; +module_param(AllowedPciDevices, charp, 0000); +MODULE_PARM_DESC(AllowedPciDevices, "Bind only pci devices in semicolon separated list e.g. AllowedPciDevices=\"0000:01:00.0\", empty string will turn off atemsys_pci driver."); + +/* Workaround for older kernels */ +/* from 'linux/kern_levels.h' */ +/* integer equivalents of KERN_<LEVEL> */ +#ifndef LOGLEVEL_ERR +#define LOGLEVEL_ERR 3 /* error conditions */ +#endif +#ifndef LOGLEVEL_WARNING +#define LOGLEVEL_WARNING 4 /* warning conditions */ +#endif +#ifndef LOGLEVEL_INFO +#define LOGLEVEL_INFO 6 /* informational */ +#endif +#ifndef LOGLEVEL_DEBUG +#define LOGLEVEL_DEBUG 7 /* debug-level messages */ +#endif + +static int loglevel = LOGLEVEL_INFO; +module_param(loglevel, int, 0); +MODULE_PARM_DESC(loglevel, "Set log level default LOGLEVEL_INFO, see /include/linux/kern_levels.h"); + +#if (defined CONFIG_XENO_COBALT) +#define PRINTK(prio, str, ...) rtdm_printk(prio ATEMSYS_DEVICE_NAME ": " str, ##__VA_ARGS__) +#else +#define PRINTK(prio, str, ...) printk(prio ATEMSYS_DEVICE_NAME ": " str, ##__VA_ARGS__) +#endif /* CONFIG_XENO_COBALT */ + +#define ERR(str, ...) (LOGLEVEL_ERR <= loglevel)? PRINTK(KERN_ERR, str, ##__VA_ARGS__) :0 +#define WRN(str, ...) (LOGLEVEL_WARNING <= loglevel)? PRINTK(KERN_WARNING, str, ##__VA_ARGS__) :0 +#define INF(str, ...) (LOGLEVEL_INFO <= loglevel)? PRINTK(KERN_INFO, str, ##__VA_ARGS__) :0 +#define DBG(str, ...) (LOGLEVEL_DEBUG <= loglevel)? PRINTK(KERN_INFO, str, ##__VA_ARGS__) :0 + + +#ifndef PAGE_UP +#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) +#endif +#ifndef PAGE_DOWN +#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) +#endif + +/* Comments: for kernel 2.6.18 add DMA_BIT_MASK*/ +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +#endif + +#ifndef HAVE_ACCESS_OK_TYPE +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) +#define HAVE_ACCESS_OK_TYPE 0 +#else +#define HAVE_ACCESS_OK_TYPE 1 +#endif +#endif + +#if HAVE_ACCESS_OK_TYPE +#define ACCESS_OK(type, addr, size) access_ok(type, addr, size) +#else +#define ACCESS_OK(type, addr, size) access_ok(addr, size) +#endif + +#if ((defined CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)) && !(defined CONFIG_XENO_COBALT)) + #define OF_DMA_CONFIGURE(dev, of_node) of_dma_configure(dev, of_node, true) +#elif ((defined CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) && !(defined CONFIG_XENO_COBALT)) + #define OF_DMA_CONFIGURE(dev, of_node) of_dma_configure(dev, of_node) +#elif ((defined CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) && !(defined CONFIG_XENO_COBALT)) + #define OF_DMA_CONFIGURE(dev, of_node) of_dma_configure(dev) +#else + #define OF_DMA_CONFIGURE(dev, of_node) +#endif + +typedef struct _ATEMSYS_T_IRQ_DESC +{ + u32 irq; + atomic_t count; + atomic_t totalCount; +#if (defined CONFIG_XENO_COBALT) + rtdm_irq_t irq_handle; + rtdm_event_t irq_event; +#else + atomic_t irqStatus; + wait_queue_head_t q; +#endif /* CONFIG_XENO_COBALT */ +#if (defined INCLUDE_IRQ_TO_DESC) + bool irq_is_level; +#endif +} ATEMSYS_T_IRQ_DESC; + +struct _ATEMSYS_T_PCI_DRV_DESC_PRIVATE; +struct _ATEMSYS_T_DRV_DESC_PRIVATE; +typedef struct _ATEMSYS_T_DEVICE_DESC +{ + struct list_head list; +#if (defined CONFIG_PCI) + struct pci_dev* pPcidev; + #if (defined INCLUDE_ATEMSYS_PCI_DRIVER) + struct _ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pPciDrvDesc; + #endif +#endif + struct platform_device* pPlatformDev; + #if (defined INCLUDE_ATEMSYS_DT_DRIVER) + struct _ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDesc; + #endif + + ATEMSYS_T_IRQ_DESC irqDesc; + + /* supported features */ + bool bSupport64BitDma; +} ATEMSYS_T_DEVICE_DESC; + +typedef struct _ATEMSYS_T_MMAP_DESC +{ + struct list_head list; + ATEMSYS_T_DEVICE_DESC* pDevDesc; + dma_addr_t dmaAddr; + void* pVirtAddr; + size_t len; +} ATEMSYS_T_MMAP_DESC; + +#if (defined CONFIG_OF) +#define ATEMSYS_DT_DRIVER_NAME "atemsys" +/* udev auto-loading support via DTB */ +static const struct of_device_id atemsys_ids[] = { + { .compatible = ATEMSYS_DT_DRIVER_NAME }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, atemsys_ids); +#endif /* CONFIG_OF */ + + +#define ATEMSYS_MAX_NUMBER_DRV_INSTANCES 10 + +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) +typedef struct _ATEMSYS_T_PCI_DRV_DESC_PRIVATE +{ + struct pci_dev* pPciDev; + + int nPciDomain; + int nPciBus; + int nPciDev; + int nPciFun; + + unsigned short wVendorId; + unsigned short wDevice; + unsigned short wRevision; + unsigned short wSubsystem_vendor; + unsigned short wSubsystem_device; + + ATEMSYS_T_PCI_MEMBAR aBars[ATEMSYS_PCI_MAXBAR]; + int nBarCnt; + + ATEMSYS_T_DEVICE_DESC* pDevDesc; + unsigned int dwIndex; +} ATEMSYS_T_PCI_DRV_DESC_PRIVATE; + +static ATEMSYS_T_PCI_DRV_DESC_PRIVATE* S_apPciDrvDescPrivate[ATEMSYS_MAX_NUMBER_DRV_INSTANCES]; +#endif + +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) +#define ATEMSYS_MAX_NUMBER_OF_CLOCKS 10 + +typedef struct +{ + void __iomem* pbyBase; + __u64 qwPhys; + __u32 dwSize; +} ATEMSYS_T_IOMEM; + +typedef struct _ATEMSYS_T_DRV_DESC_PRIVATE +{ + int nDev_id; + struct net_device* netdev; + struct platform_device* pPDev; + struct device_node* pDevNode; + + /* storage and identification */ + ATEMSYS_T_MAC_INFO MacInfo; + + /* powermanagement */ + struct reset_control* pResetCtl; + + /* clocks */ + const char* clk_ids[ATEMSYS_MAX_NUMBER_OF_CLOCKS]; + struct clk* clks[ATEMSYS_MAX_NUMBER_OF_CLOCKS]; + int nCountClk; + + /* PHY */ + ATEMSYS_T_PHY_INFO PhyInfo; + phy_interface_t PhyInterface; + struct device_node* pPhyNode; + struct device_node* pMdioNode; + struct device_node* pMdioDevNode; /* node for own mdio bus */ + struct phy_device* pPhyDev; + struct regulator* pPhyRegulator; + struct task_struct* etx_thread_StartPhy; + struct task_struct* etx_thread_StopPhy; + + /* PHY reset*/ + int nPhyResetGpioPin; + bool bPhyResetGpioActiveHigh; + int nPhyResetDuration; + int nPhyResetPostDelay; + + /* mdio */ + ATEMSYS_T_MDIO_ORDER MdioOrder; + struct mii_bus* pMdioBus; + struct mutex mdio_order_mutex; + struct mutex mdio_mutex; + wait_queue_head_t mdio_wait_queue; + int mdio_wait_queue_cnt; + +#ifdef CONFIG_TI_K3_UDMA + /* Ti CPSWG Channel, Flow & Ring */ +#define ATEMSYS_UDMA_CHANNELS 10 + void* apvTxChan[ATEMSYS_UDMA_CHANNELS]; + int anTxIrq[ATEMSYS_UDMA_CHANNELS]; + void* apvRxChan[ATEMSYS_UDMA_CHANNELS]; + int anRxIrq[ATEMSYS_UDMA_CHANNELS]; +#endif /*#ifdef CONFIG_TI_K3_UDMA*/ + +#define IOMEMLIST_LENGTH 20 + ATEMSYS_T_IOMEM oIoMemList[IOMEMLIST_LENGTH]; + + /* frame descriptor of the EcMaster connection */ + ATEMSYS_T_DEVICE_DESC* pDevDesc; + +} ATEMSYS_T_DRV_DESC_PRIVATE; + +static ATEMSYS_T_DRV_DESC_PRIVATE* S_apDrvDescPrivate[ATEMSYS_MAX_NUMBER_DRV_INSTANCES]; + +static int StartPhyThread(void* pvData); +static int StopPhyThread(void* pvData); +static int CleanUpEthernetDriverOnRelease(ATEMSYS_T_DEVICE_DESC* pDevDesc); +static int GetMacInfoIoctl(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam); +static int PhyStartStopIoctl( unsigned long ioctlParam); +static int GetMdioOrderIoctl(unsigned long ioctlParam); +static int ReturnMdioOrderIoctl(unsigned long ioctlParam); +static int GetPhyInfoIoctl(unsigned long ioctlParam); +static int PhyResetIoctl(unsigned long ioctlParam); +static int ResetPhyViaGpio(ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate); +static int EthernetDriverRemove(struct platform_device* pPDev); +static int EthernetDriverProbe(struct platform_device* pPDev); + +#if (defined CONFIG_XENO_COBALT) +static int StartPhy(struct platform_device* pPDev); +static int StopPhy(struct platform_device* pPDev); +typedef struct _ATEMSYS_T_WORKER_THREAD_DESC +{ + struct task_struct* etx_thread; + int (* pfNextTask)(void*); + void* pNextTaskData; + struct mutex WorkerTask_mutex; + bool bWorkerTaskShutdown; + bool bWorkerTaskRunning; +} ATEMSYS_T_WORKER_THREAD_DESC; +static ATEMSYS_T_WORKER_THREAD_DESC S_oAtemsysWorkerThreadDesc; + +static int AtemsysWorkerThread(void* data) +{ + void* pWorkerTaskData = NULL; + int (* pfWorkerTask)(void*); + pfWorkerTask = NULL; + + S_oAtemsysWorkerThreadDesc.bWorkerTaskRunning = true; + + for (;;) + { + mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + if (S_oAtemsysWorkerThreadDesc.bWorkerTaskShutdown) + { + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + break; + } + pfWorkerTask = S_oAtemsysWorkerThreadDesc.pfNextTask; + pWorkerTaskData = S_oAtemsysWorkerThreadDesc.pNextTaskData; + S_oAtemsysWorkerThreadDesc.pfNextTask = NULL; + S_oAtemsysWorkerThreadDesc.pNextTaskData = NULL; + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + + if ((NULL != pfWorkerTask) && (NULL != pWorkerTaskData)) + { + pfWorkerTask(pWorkerTaskData); + } + msleep(100); + } + + S_oAtemsysWorkerThreadDesc.bWorkerTaskRunning = false; + + return 0; +} +#endif /* #if (defined CONFIG_XENO_COBALT) */ + +#endif /* INCLUDE_ATEMSYS_DT_DRIVER */ + + +static void dev_munmap(struct vm_area_struct* vma); + +#if (defined CONFIG_XENO_COBALT) + static int dev_interrupt_handler(rtdm_irq_t* irq_handle); +#else + static irqreturn_t dev_interrupt_handler(int nIrq, void* pParam); +#endif /* CONFIG_XENO_COBALT */ + +static struct vm_operations_struct mmap_vmop = +{ + .close = dev_munmap, +}; + +static DEFINE_MUTEX(S_mtx); +static ATEMSYS_T_DEVICE_DESC S_DevNode; +static struct class* S_pDevClass; +static struct device* S_pDev; +static struct platform_device* S_pPlatformDev = NULL; + +#if !(defined CONFIG_XENO_COBALT) +static void dev_enable_irq(ATEMSYS_T_IRQ_DESC* pIrqDesc) +{ + /* enable/disable level type interrupts, not edge type interrupts */ +#if (defined INCLUDE_IRQ_TO_DESC) + if (pIrqDesc->irq_is_level) +#endif + { + atomic_inc(&pIrqDesc->irqStatus); + enable_irq(pIrqDesc->irq); + } +} + +static void dev_disable_irq(ATEMSYS_T_IRQ_DESC* pIrqDesc) +{ + /* enable/disable level type interrupts, not edge type interrupts */ +#if (defined INCLUDE_IRQ_TO_DESC) + if (!pIrqDesc->irq_is_level) return; +#endif + + if (atomic_read(&pIrqDesc->irqStatus) > 0) + { + disable_irq_nosync(pIrqDesc->irq); + atomic_dec(&pIrqDesc->irqStatus); + } +} + +static int dev_irq_disabled(ATEMSYS_T_IRQ_DESC* pIrqDesc) +{ + /* only level type interrupts get disabled */ +#if (defined INCLUDE_IRQ_TO_DESC) + if (!pIrqDesc->irq_is_level) return 0; +#endif + + if (atomic_read(&pIrqDesc->irqStatus) == 0) + { + return 1; + } + return 0; +} +#endif /* !CONFIG_XENO_COBALT */ + +#if (!defined __arm__) && (!defined __aarch64__) +static void* dev_dma_alloc(u32 dwLen, dma_addr_t* pDmaAddr) +{ + unsigned long virtAddr; + unsigned long tmpAddr; + u32 tmpSize; + + virtAddr = __get_free_pages(GFP_KERNEL | GFP_DMA, get_order(dwLen)); + if (! virtAddr) + { + ERR("mmap: __get_free_pages failed\n"); + return NULL; + } + + tmpAddr = virtAddr; + tmpSize = dwLen; + + while (tmpSize > 0) + { + SetPageReserved( virt_to_page(tmpAddr) ); + tmpAddr += PAGE_SIZE; + tmpSize -= PAGE_SIZE; + } + + *pDmaAddr = virt_to_phys((void*) virtAddr); + + return (void*) virtAddr; +} + +static void dev_dma_free(u32 dwLen, void* virtAddr) +{ + unsigned long tmpAddr = (unsigned long) virtAddr; + u32 tmpSize = dwLen; + + while (tmpSize > 0) + { + ClearPageReserved( virt_to_page(tmpAddr) ); + tmpAddr += PAGE_SIZE; + tmpSize -= PAGE_SIZE; + } + + free_pages((unsigned long) virtAddr, get_order(dwLen)); +} +#endif /* !__arm__ */ + +static void dev_munmap(struct vm_area_struct* vma) +{ + ATEMSYS_T_MMAP_DESC* pMmapDesc = (ATEMSYS_T_MMAP_DESC*) vma->vm_private_data; + + INF("dev_munmap: 0x%px -> 0x%px (%d)\n", + (void*) pMmapDesc->pVirtAddr, (void*)(unsigned long)pMmapDesc->dmaAddr, (int) pMmapDesc->len); + if (0 == pMmapDesc->dmaAddr) { INF("dev_munmap: 0 == pMmapDesc->dmaAddr!\n"); return; } + if (NULL == pMmapDesc->pVirtAddr) { INF("dev_munmap: NULL == pMmapDesc->pVirtAddr!\n"); return; } + + /* free DMA memory */ +#if (defined CONFIG_PCI) + if (pMmapDesc->pDevDesc->pPcidev == NULL) +#endif + { +#if (defined __arm__) || (defined __aarch64__) + dmam_free_coherent(&pMmapDesc->pDevDesc->pPlatformDev->dev, pMmapDesc->len, pMmapDesc->pVirtAddr, pMmapDesc->dmaAddr); +#else + dev_dma_free(pMmapDesc->len, pMmapDesc->pVirtAddr); +#endif + } +#if (defined CONFIG_PCI) + else + { +#if ((defined __aarch64__) \ + || (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) \ + || ((defined __arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) \ + || ((defined __amd64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) ) + dma_free_coherent(&pMmapDesc->pDevDesc->pPcidev->dev, pMmapDesc->len, pMmapDesc->pVirtAddr, pMmapDesc->dmaAddr); +#else + pci_free_consistent(pMmapDesc->pDevDesc->pPcidev, pMmapDesc->len, pMmapDesc->pVirtAddr, pMmapDesc->dmaAddr); +#endif /* __aarch64__ */ + } +#endif /* CONFIG_PCI */ + kfree(pMmapDesc); +} + +#if (defined CONFIG_PCI) +/* + * Lookup PCI device + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) +struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) +{ + struct pci_dev* dev = NULL; + + for_each_pci_dev(dev) { + if (pci_domain_nr(dev->bus) == 0 && + (dev->bus->number == bus && dev->devfn == devfn)) + return dev; + } + return dev; +} +#endif + +static int dev_pci_select_device(ATEMSYS_T_DEVICE_DESC* pDevDesc, ATEMSYS_T_PCI_SELECT_DESC* pPciDesc, size_t size) +{ + int nRetVal = -EFAULT; + s32 nPciBus, nPciDev, nPciFun, nPciDomain; + + switch (size) + { + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00): + { + ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 oPciDesc_v1_0_00; + nRetVal = copy_from_user(&oPciDesc_v1_0_00, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)pPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)); + if (0 != nRetVal) + { + ERR("dev_pci_select_device failed: %d\n", nRetVal); + goto Exit; + } + nPciBus = oPciDesc_v1_0_00.nPciBus; + nPciDev = oPciDesc_v1_0_00.nPciDev; + nPciFun = oPciDesc_v1_0_00.nPciFun; + nPciDomain = 0; + } break; + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05): + { + ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 oPciDesc_v1_3_05; + nRetVal = copy_from_user(&oPciDesc_v1_3_05, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)pPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)); + if (0 != nRetVal) + { + ERR("dev_pci_select_device failed: %d\n", nRetVal); + goto Exit; + } + nPciBus = oPciDesc_v1_3_05.nPciBus; + nPciDev = oPciDesc_v1_3_05.nPciDev; + nPciFun = oPciDesc_v1_3_05.nPciFun; + nPciDomain = oPciDesc_v1_3_05.nPciDomain; + } break; + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12): + { + ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 oPciDesc_v1_4_12; + nRetVal = copy_from_user(&oPciDesc_v1_4_12, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)pPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)); + if (0 != nRetVal) + { + ERR("dev_pci_select_device failed: %d\n", nRetVal); + goto Exit; + } + nPciBus = oPciDesc_v1_4_12.nPciBus; + nPciDev = oPciDesc_v1_4_12.nPciDev; + nPciFun = oPciDesc_v1_4_12.nPciFun; + nPciDomain = oPciDesc_v1_4_12.nPciDomain; + } break; + default: + { + nRetVal = -EFAULT; + ERR("pci_conf: EFAULT\n"); + goto Exit; + } + } + + /* Lookup for pci_dev object */ + pDevDesc->pPcidev = NULL; +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) + pDevDesc->pPciDrvDesc = NULL; + { + unsigned int i = 0; + + for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++) + { + ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pDrvInstance = S_apPciDrvDescPrivate[i]; + if ( (pDrvInstance != NULL) + && (pDrvInstance->nPciDomain == nPciDomain) + && (pDrvInstance->nPciBus == nPciBus) + && (pDrvInstance->nPciDev == nPciDev) + && (pDrvInstance->nPciFun == nPciFun)) + { + if (pDrvInstance->pDevDesc != NULL) + { + ERR("dev_pci_select_device: device \"%s\" in use by another instance?\n", pci_name(pDrvInstance->pPciDev)); + nRetVal = -EBUSY; + goto Exit; + } + pDevDesc->pPcidev = pDrvInstance->pPciDev; + pDevDesc->pPciDrvDesc = pDrvInstance; + pDrvInstance->pDevDesc = pDevDesc; + INF("pci_select: from pci driver %04x:%02x:%02x.%x\n", (u32)nPciDomain, (u32)nPciBus, (u32)nPciDev, (u32)nPciFun); + break; + } + } + } + if (pDevDesc->pPcidev == NULL) +#endif + { + pDevDesc->pPcidev = pci_get_domain_bus_and_slot(nPciDomain, nPciBus, PCI_DEVFN(nPciDev, nPciFun)); + INF("pci_select: %04x:%02x:%02x.%x\n", (u32)nPciDomain, (u32)nPciBus, (u32)nPciDev, (u32)nPciFun); + } + if (pDevDesc->pPcidev == NULL) + { + WRN("pci_select: PCI-Device %04x:%02x:%02x.%x not found\n", + (unsigned) nPciDomain, (unsigned) nPciBus, (unsigned) nPciDev, (unsigned) nPciFun); + goto Exit; + } + + nRetVal = DRIVER_SUCCESS; + +Exit: + return nRetVal; +} + +static int DefaultPciSettings(struct pci_dev* pPciDev) +{ + int nRetVal = -EIO; + int nRes = -EIO; + + /* Turn on Memory-Write-Invalidate if it is supported by the device*/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + pci_set_mwi(pPciDev); +#else + pci_try_set_mwi(pPciDev); +#endif + + /* remove wrong dma_coherent bit on ARM systems */ +#if ((defined __aarch64__) || (defined __arm__)) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #if (defined CONFIG_PHYS_ADDR_T_64BIT) + if (is_device_dma_coherent(&pPciDev->dev)) + { + pPciDev->dev.archdata.dma_coherent = false; + INF("%s: DefaultPciSettings: Clear device.archdata dma_coherent bit!\n", pci_name(pPciDev)); + } + #endif + #else + #if ((defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL))) + if (0 != pPciDev->dev.dma_coherent) + { + pPciDev->dev.dma_coherent = 0; + INF("%s: DefaultPciSettings: Clear device dma_coherent bit!\n", pci_name(pPciDev)); + } + #endif + #endif +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) || !(defined __aarch64__)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,55)) + nRes = dma_set_coherent_mask(&pPciDev->dev, DMA_BIT_MASK(32)); +#else + nRes = dma_set_mask_and_coherent(&pPciDev->dev, DMA_BIT_MASK(32)); +#endif + if (nRes) +#endif + { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,55)) + nRes = dma_set_coherent_mask(&pPciDev->dev, DMA_BIT_MASK(64)); +#else + nRes = dma_set_mask_and_coherent(&pPciDev->dev, DMA_BIT_MASK(64)); +#endif + if (nRes) + { + ERR("%s: DefaultPciSettings: dma_set_mask_and_coherent failed\n", pci_name(pPciDev)); + nRetVal = nRes; + goto Exit; + } + } + pci_set_master(pPciDev); + + /* Try to enable MSI (Message Signaled Interrupts). MSI's are non shared, so we can + * use interrupt mode, also if we have a non exclusive interrupt line with legacy + * interrupts. + */ + if (pci_enable_msi(pPciDev)) + { + INF("%s: DefaultPciSettings: legacy INT configured\n", pci_name(pPciDev)); + } + else + { + INF("%s: DefaultPciSettings: MSI configured\n", pci_name(pPciDev)); + } + + nRetVal = 0; + +Exit: + return nRetVal; +} + +/* + * See also kernel/Documentation/PCI/pci.txt for the recommended PCI initialization sequence + */ +static int ioctl_pci_configure_device(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam, size_t size) +{ + int nRetVal = -EIO; + int nRc; + int i; + unsigned long ioBase; + s32 nBar = 0; + u32 dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0); + ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 oPciDesc; + memset(&oPciDesc, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)); + switch (size) + { + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00): + { + dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0); + } break; + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05): + { + dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,3,5); + } break; + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12): + { + dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,4,12); + } break; + default: + { + nRetVal = -EIO; + ERR("pci_conf: Invalid parameter\n"); + goto Exit; + } + } + + if (pDevDesc->pPcidev != NULL) + { + WRN("pci_conf: error call ioctl(ATEMSYS_IOCTL_PCI_RELEASE_DEVICE) first\n"); + goto Exit; + } + if (dev_pci_select_device(pDevDesc, (ATEMSYS_T_PCI_SELECT_DESC*)ioctlParam, size) != DRIVER_SUCCESS) + { + goto Exit; + } + +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) + if (NULL != pDevDesc->pPciDrvDesc) + { + for (i = 0; i < pDevDesc->pPciDrvDesc->nBarCnt ; i++) + { + if ((EC_ATEMSYSVERSION(1,4,12) != dwAtemsysApiVersion) && (pDevDesc->pPciDrvDesc->aBars[i].qwIOMem > 0xFFFFFFFF)) + { + ERR("pci_conf: 64-Bit IO address not supported\n"); + INF("pci_conf: Update LinkLayer for 64-Bit IO address support!\n"); + nRetVal = -ENODEV; + goto Exit; + } + + oPciDesc.aBar[i].qwIOMem = pDevDesc->pPciDrvDesc->aBars[i].qwIOMem; + oPciDesc.aBar[i].dwIOLen = pDevDesc->pPciDrvDesc->aBars[i].dwIOLen; + + } + + oPciDesc.nBarCnt = pDevDesc->pPciDrvDesc->nBarCnt; + oPciDesc.dwIrq = (u32)pDevDesc->pPcidev->irq; + } + else +#endif + { + /* enable device */ + nRc = pci_enable_device(pDevDesc->pPcidev); + if (nRc < 0) + { + ERR("pci_conf: pci_enable_device failed\n"); + pDevDesc->pPcidev = NULL; + goto Exit; + } + + /* Check if IO-memory is in use by another driver */ + nRc = pci_request_regions(pDevDesc->pPcidev, ATEMSYS_DEVICE_NAME); + if (nRc < 0) + { + ERR("pci_conf: device \"%s\" in use by another driver?\n", pci_name(pDevDesc->pPcidev)); + pDevDesc->pPcidev = NULL; + nRetVal = -EBUSY; + goto Exit; + } + + /* find the memory BAR */ + for (i = 0; i < ATEMSYS_PCI_MAXBAR ; i++) + { + if (pci_resource_flags(pDevDesc->pPcidev, i) & IORESOURCE_MEM) + { + /* IO area address */ + ioBase = pci_resource_start(pDevDesc->pPcidev, i); + + if ((EC_ATEMSYSVERSION(1,4,12) != dwAtemsysApiVersion) && (ioBase > 0xFFFFFFFF)) + { + ERR("pci_conf: 64-Bit IO address not supported\n"); + pci_release_regions(pDevDesc->pPcidev); + pDevDesc->pPcidev = NULL; + nRetVal = -ENODEV; + goto Exit; + } + + /* IO area length */ + oPciDesc.aBar[nBar].dwIOLen = pci_resource_len(pDevDesc->pPcidev, i); + oPciDesc.aBar[nBar].qwIOMem = ioBase; + + nBar++; + } + } + + nRc = DefaultPciSettings(pDevDesc->pPcidev); + if (nRc) + { + pci_release_regions(pDevDesc->pPcidev); + pDevDesc->pPcidev = NULL; + goto Exit; + } + + /* number of memory BARs */ + /* assigned IRQ */ + oPciDesc.nBarCnt = nBar; + oPciDesc.dwIrq = pDevDesc->pPcidev->irq; + } + +#if defined(__arm__) && 0 + /* + * This is required for TI's TMDXEVM8168 (Cortex A8) eval board + * \sa TI "DM81xx AM38xx PCI Express Root Complex Driver User Guide" + * "DM81xx RC supports maximum remote read request size (MRRQS) as 256 bytes" + */ + pcie_set_readrq(pDevDesc->pPcidev, 256); +#endif + + switch (dwAtemsysApiVersion) + { + case EC_ATEMSYSVERSION(1,0,0): + { + ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 oPciDesc_v1_0_00; + memset(&oPciDesc_v1_0_00, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)); + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00))) + { + nRetVal = -EFAULT; + ERR("pci_conf: EFAULT\n"); + goto Exit; + } + oPciDesc_v1_0_00.nBarCnt = oPciDesc.nBarCnt; + oPciDesc_v1_0_00.dwIrq = oPciDesc.dwIrq; + for (i = 0; i < oPciDesc_v1_0_00.nBarCnt ; i++) + { + oPciDesc_v1_0_00.aBar[i].dwIOLen = oPciDesc.aBar[i].dwIOLen; + oPciDesc_v1_0_00.aBar[i].dwIOMem = (u32)oPciDesc.aBar[i].qwIOMem; + } + nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, &oPciDesc_v1_0_00, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)); + if (0 != nRetVal) + { + ERR("ioctl_pci_configure_device failed: %d\n", nRetVal); + goto Exit; + } + } break; + case EC_ATEMSYSVERSION(1,3,5): + { + ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 oPciDesc_v1_3_05; + memset(&oPciDesc_v1_3_05, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)); + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05))) + { + nRetVal = -EFAULT; + ERR("pci_conf: EFAULT\n"); + goto Exit; + } + oPciDesc_v1_3_05.nBarCnt = oPciDesc.nBarCnt; + oPciDesc_v1_3_05.dwIrq = oPciDesc.dwIrq; + for (i = 0; i < oPciDesc_v1_3_05.nBarCnt ; i++) + { + oPciDesc_v1_3_05.aBar[i].dwIOLen = oPciDesc.aBar[i].dwIOLen; + oPciDesc_v1_3_05.aBar[i].dwIOMem = (u32)oPciDesc.aBar[i].qwIOMem; + } + nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, &oPciDesc_v1_3_05, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)); + if (0 != nRetVal) + { + ERR("ioctl_pci_configure_device failed: %d\n", nRetVal); + goto Exit; + } + } break; + case EC_ATEMSYSVERSION(1,4,12): + { + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12))) + { + nRetVal = -EFAULT; + ERR("pci_conf: EFAULT\n"); + goto Exit; + } + nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, &oPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)); + if (0 != nRetVal) + { + ERR("ioctl_pci_configure_device failed: %d\n", nRetVal); + goto Exit; + } + } break; + default: + { + nRetVal = -EFAULT; + goto Exit; + } + } + + nRetVal = 0; + +Exit: + return nRetVal; +} + +static int ioctl_pci_finddevice(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam, size_t size) +{ + int nRetVal = -EIO; + struct pci_dev* pPciDev = NULL; + s32 nVendor, nDevice, nInstance, nInstanceId; + u32 dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0); + ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 oPciDesc_v1_0_00; + ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 oPciDesc_v1_3_05; + ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 oPciDesc_v1_4_12; + + + + + switch (size) + { + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00): + { + dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0); + memset(&oPciDesc_v1_0_00, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)); + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00))) + { + nRetVal = -EFAULT; + } + nRetVal = copy_from_user(&oPciDesc_v1_0_00, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)); + if (0 != nRetVal) + { + ERR("ioctl_pci_finddevice failed: %d\n", nRetVal); + goto Exit; + } + nVendor = oPciDesc_v1_0_00.nVendID; + nDevice = oPciDesc_v1_0_00.nDevID; + nInstance = oPciDesc_v1_0_00.nInstance; + } break; + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05): + { + dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,3,5); + memset(&oPciDesc_v1_3_05, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)); + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05))) + { + nRetVal = -EFAULT; + } + nRetVal = copy_from_user(&oPciDesc_v1_3_05, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)); + if (0 != nRetVal) + { + ERR("ioctl_pci_finddevice failed: %d\n", nRetVal); + goto Exit; + } + nVendor = oPciDesc_v1_3_05.nVendID; + nDevice = oPciDesc_v1_3_05.nDevID; + nInstance = oPciDesc_v1_3_05.nInstance; + } break; + case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12): + { + dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,4,12); + memset(&oPciDesc_v1_4_12, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)); + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12))) + { + nRetVal = -EFAULT; + } + nRetVal = copy_from_user(&oPciDesc_v1_4_12, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)); + if (0 != nRetVal) + { + ERR("ioctl_pci_finddevice failed: %d\n", nRetVal); + goto Exit; + } + nVendor = oPciDesc_v1_4_12.nVendID; + nDevice = oPciDesc_v1_4_12.nDevID; + nInstance = oPciDesc_v1_4_12.nInstance; + } break; + default: + { + nRetVal = -EIO; + ERR("pci_conf: Invalid parameter\n"); + goto Exit; + } + } + + if (-EFAULT == nRetVal) + { + ERR("pci_find: EFAULT\n"); + nRetVal = -EFAULT; + goto Exit; + } + + INF("pci_find: ven 0x%x dev 0x%x nInstance %d\n", nVendor, nDevice, nInstance); + + for (nInstanceId = 0; nInstanceId <= nInstance; nInstanceId++ ) + { + pPciDev = pci_get_device (nVendor, nDevice, pPciDev); + } + + if (pPciDev == NULL) + { + WRN("pci_find: device 0x%x:0x%x:%d not found\n", nVendor, nDevice, nInstance); + nRetVal = -ENODEV; + goto Exit; + } + + INF("pci_find: found 0x%x:0x%x:%d -> %s\n", + nVendor, nDevice, nInstance, pci_name(pPciDev)); + + switch (dwAtemsysApiVersion) + { + case EC_ATEMSYSVERSION(1,0,0): + { + oPciDesc_v1_0_00.nPciBus = (s32)pPciDev->bus->number; + oPciDesc_v1_0_00.nPciDev = (s32)PCI_SLOT(pPciDev->devfn); + oPciDesc_v1_0_00.nPciFun = (s32)PCI_FUNC(pPciDev->devfn); + + nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, &oPciDesc_v1_0_00, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)); + if (0 != nRetVal) + { + ERR("ioctl_pci_finddevice failed: %d\n", nRetVal); + goto Exit; + } + } break; + case EC_ATEMSYSVERSION(1,3,5): + { + oPciDesc_v1_3_05.nPciDomain = (s32)pci_domain_nr(pPciDev->bus); + oPciDesc_v1_3_05.nPciBus = (s32)pPciDev->bus->number; + oPciDesc_v1_3_05.nPciDev = (s32)PCI_SLOT(pPciDev->devfn); + oPciDesc_v1_3_05.nPciFun = (s32)PCI_FUNC(pPciDev->devfn); + + nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, &oPciDesc_v1_3_05, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)); + if (0 != nRetVal) + { + ERR("ioctl_pci_finddevice failed: %d\n", nRetVal); + goto Exit; + } + } break; + case EC_ATEMSYSVERSION(1,4,12): + { + oPciDesc_v1_4_12.nPciDomain = (s32)pci_domain_nr(pPciDev->bus); + oPciDesc_v1_4_12.nPciBus = (s32)pPciDev->bus->number; + oPciDesc_v1_4_12.nPciDev = (s32)PCI_SLOT(pPciDev->devfn); + oPciDesc_v1_4_12.nPciFun = (s32)PCI_FUNC(pPciDev->devfn); + + nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, &oPciDesc_v1_4_12, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)); + if (0 != nRetVal) + { + ERR("ioctl_pci_finddevice failed: %d\n", nRetVal); + goto Exit; + } + } break; + } + + nRetVal = 0; + +Exit: + return nRetVal; +} +#endif /* CONFIG_PCI */ + +#if (defined CONFIG_DTC) +/* + * Lookup Nth (0: first) compatible device tree node with "interrupts" property present. + */ +static struct device_node * atemsys_of_lookup_intnode(const char* compatible, int deviceIdx) +{ + struct device_node* device = NULL; + struct device_node* child = NULL; + struct device_node* tmp = NULL; + int devCnt; + + /* Lookup Nth device tree node */ + devCnt = 0; + for_each_compatible_node(tmp, NULL, compatible) + { + if (devCnt == deviceIdx) + { + device = tmp; + break; + } + ++devCnt; + } + + if (device == NULL) return NULL; + + if (of_get_property(device, "interrupts", NULL)) return device; + + /* i.e. vETSEC has 2 groups. Search them */ + for_each_child_of_node(device, child) + { + if (of_get_property(child, "interrupts", NULL)) return child; + } + + return NULL; +} + +/* + * Map interrupt number taken from the OF Device Tree (\sa .dts file) into + * virtual interrupt number which can be passed to request_irq(). + * The usual (device driver) way is to use the irq_of_parse_and_map() function. + * + * We search all device tree nodes which have the "compatible" property + * equal to compatible. Search until the Nth device is found. Then + * map the Nth interrupt (given by intIdx) with irq_of_parse_and_map(). + */ +static unsigned atemsys_of_map_irq_to_virq(const char* compatible, int deviceIdx, int intIdx) +{ + unsigned virq; + struct device_node* device = NULL; + + /* Lookup Nth device */ + device = atemsys_of_lookup_intnode(compatible, deviceIdx); + if (! device) + { + INF("atemsys_of_map_irq_to_virq: device tree node '%s':%d not found.\n", + compatible, deviceIdx); + return NO_IRQ; + } + + virq = irq_of_parse_and_map(device, intIdx); + if (virq == NO_IRQ) + { + ERR("atemsys_of_map_irq_to_virq: irq_of_parse_and_map failed for" + " device tree node '%s':%d, IntIdx %d.\n", + compatible, deviceIdx, intIdx); + } + + return virq; +} +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) +static unsigned int atemsysDtDriver_of_map_irq_to_virq(ATEMSYS_T_DEVICE_DESC* pDevDesc, int nIdx) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + struct device_node* device = NULL; + unsigned int irq; + unsigned int i = 0; + + /* get node from atemsys platform driver list */ + for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++) + { + + pDrvDescPrivate = S_apDrvDescPrivate[i]; + if (NULL == pDrvDescPrivate) + { + continue; + } + + if (pDrvDescPrivate->pDevDesc == pDevDesc) + { + device = pDrvDescPrivate->pDevNode; + break; + } + } + if ((NULL == device) || (NULL == pDrvDescPrivate)) + { + INF("atemsysDtDriver_of_map_irq_to_virq: Cannot find connected device tree node\n"); + return NO_IRQ; + } + + /* get interrupt from node */ + irq = irq_of_parse_and_map(device, nIdx); + if (NO_IRQ == irq) + { + ERR("atemsysDtDriver_of_map_irq_to_virq: irq_of_parse_and_map failed for" + " device tree node Interrupt index %d\n", + nIdx); + } + + return irq; +} +#endif /* INCLUDE_ATEMSYS_DT_DRIVER) */ +#endif /* CONFIG_DTC */ + +#if (defined INCLUDE_IRQ_TO_DESC) +static bool atemsys_irq_is_level(unsigned int irq_id) +{ + bool irq_is_level = true; + struct irq_data* irq_data = NULL; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,1)) + { + irq_data = irq_get_irq_data(irq_id); + } +#else + { + struct irq_desc* desc; + desc = irq_to_desc(irq_id); + if (desc) + { + irq_data = &desc->irq_data; + } + } +#endif + if (irq_data) + { + irq_is_level = irqd_is_level_type(irq_data); + } + + return irq_is_level; +} +#endif /* INCLUDE_IRQ_TO_DESC */ + +static int ioctl_int_connect(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam) +{ + int nRetVal = -EIO; + int nRc; + ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL; + unsigned int irq = 0; + +#if (defined CONFIG_PCI) + if (ioctlParam == ATEMSYS_USE_PCI_INT) + { + /* Use IRQ number from selected PCI device */ + + if (pDevDesc->pPcidev == NULL) + { + WRN("intcon: error call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) first\n"); + goto Exit; + } + + irq = pDevDesc->pPcidev->irq; + INF("intcon: Use IRQ (%d) from PCI config\n", irq); + } + else +#endif /* CONFIG_PCI */ + { +#if (defined CONFIG_DTC) + /* The ioctlParam is the Nth compatible device in the OF device tree (0: first, 1: second, ...) + * TODO "compatible string" and "interrupt index" should be provided by usermode as IOCTL param + */ + if ( /* Use interrupt number at idx 1 (Rx-Interrupt) for TSEC / eTSEC */ + ((irq = atemsys_of_map_irq_to_virq("fsl,etsec2", ioctlParam, 1)) == NO_IRQ) /* PPC, eTSEC */ + && ((irq = atemsys_of_map_irq_to_virq("gianfar", ioctlParam, 1)) == NO_IRQ) /* PPC, eTSEC */ + /* PRU-ICSS for am572x, am335x */ + && ((irq = atemsys_of_map_irq_to_virq("acontis,device", 0, ioctlParam)) == NO_IRQ) + /* Use interrupt number at idx 0 (Catch-All-Interrupt) for GEM */ + && ((irq = atemsys_of_map_irq_to_virq("xlnx,ps7-ethernet-1.00.a", ioctlParam, 0)) == NO_IRQ) /* ARM, Xilinx Zynq */ + ) + { +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) + /* Get Interrupt from binded device tree node */ + if ((irq = atemsysDtDriver_of_map_irq_to_virq(pDevDesc, ioctlParam)) == NO_IRQ) +#endif + { + nRetVal = -EPERM; + goto Exit; + } + } + +#else + /* Use IRQ number passed as ioctl argument */ + irq = ioctlParam; + INF("intcon: Use IRQ (%d) passed by user\n", irq); +#endif + } + + pIrqDesc = &pDevDesc->irqDesc; + if (pIrqDesc->irq) + { + WRN("intcon: error IRQ %u already connected. Call ioctl(ATEMSYS_IOCTL_INT_DISCONNECT) first\n", + (unsigned) pIrqDesc->irq); + goto Exit; + } + + /* Setup some data which is needed during Interrupt handling */ + memset(pIrqDesc, 0, sizeof(ATEMSYS_T_IRQ_DESC)); + atomic_set(&pIrqDesc->count, 0); + atomic_set(&pIrqDesc->totalCount, 0); + +#if (defined CONFIG_XENO_COBALT) + rtdm_event_init(&pIrqDesc->irq_event, 0); + nRc = rtdm_irq_request(&pIrqDesc->irq_handle, irq, dev_interrupt_handler, 0, ATEMSYS_DEVICE_NAME, pDevDesc); + if (nRc) + { + ERR("ioctl_int_connect: rtdm_irq_request() for IRQ %d returned error: %d\n", irq, nRc); + nRetVal = nRc; + goto Exit; + } + nRc = rtdm_irq_enable(&pIrqDesc->irq_handle); + if (nRc) + { + ERR("ioctl_int_connect: rtdm_irq_enable() for IRQ %d returned error: %d\n", irq, nRc); + nRetVal = nRc; + goto Exit; + } +#else + init_waitqueue_head(&pIrqDesc->q); + atomic_set(&pIrqDesc->irqStatus, 1); /* IRQ enabled */ + + /* Setup non shared IRQ */ + nRc = request_irq(irq, dev_interrupt_handler, 0, ATEMSYS_DEVICE_NAME, pDevDesc); + if (nRc) + { + ERR("ioctl_int_connect: request_irq (IRQ %d) failed. Err %d\n", irq, nRc); + nRetVal = -EPERM; + goto Exit; + } +#endif /* CONFIG_XENO_COBALT */ + + pIrqDesc->irq = irq; +#if (defined INCLUDE_IRQ_TO_DESC) + pIrqDesc->irq_is_level = atemsys_irq_is_level(irq); +#endif + +#if (defined INCLUDE_IRQ_TO_DESC) + INF("intcon: IRQ %d connected, irq_is_level = %d\n", irq, pIrqDesc->irq_is_level); +#else + INF("intcon: IRQ %d connected\n", irq); +#endif + + nRetVal = 0; +Exit: + return nRetVal; +} + +static int ioctl_intinfo(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam) +{ + int nRetVal = -EIO; +#if (defined CONFIG_XENO_COBALT) + ATEMSYS_T_INT_INFO* pIntInfo = (ATEMSYS_T_INT_INFO*) ioctlParam; + struct rtdm_fd* fd = rtdm_private_to_fd(pDevDesc); + if (rtdm_fd_is_user(fd)) + { + nRetVal = rtdm_safe_copy_to_user(fd, &pIntInfo->dwInterrupt, &pDevDesc->irqDesc.irq, sizeof(__u32)); + if (nRetVal) + { + ERR("ioctl_intinfo failed: %d\n", nRetVal); + goto Exit; + } + } +#else + ATEMSYS_T_INT_INFO oIntInfo; + memset(&oIntInfo, 0, sizeof(ATEMSYS_T_INT_INFO)); + if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_INT_INFO*)ioctlParam, sizeof(ATEMSYS_T_INT_INFO))) + { + ERR("ioctl_intinfo: EFAULT\n"); + nRetVal = -EFAULT; + goto Exit; + } + oIntInfo.dwInterrupt = pDevDesc->irqDesc.irq; + nRetVal = copy_to_user((ATEMSYS_T_INT_INFO*)ioctlParam, &oIntInfo, sizeof(ATEMSYS_T_INT_INFO)); + if (0 != nRetVal) + { + ERR("ioctl_intinfo failed: %d\n", nRetVal); + goto Exit; + } +#endif /* CONFIG_XENO_COBALT */ + +Exit: + return nRetVal; +} + + +static int dev_int_disconnect(ATEMSYS_T_DEVICE_DESC* pDevDesc) +{ + int nRetVal = -EIO; + int nCnt; + ATEMSYS_T_IRQ_DESC* pIrqDesc = &(pDevDesc->irqDesc); + +#if (defined CONFIG_XENO_COBALT) + int nRc; + if (pIrqDesc->irq) + { + nRc = rtdm_irq_disable(&pIrqDesc->irq_handle); + if (nRc) + { + ERR("dev_int_disconnect: rtdm_irq_disable() for IRQ %d returned error: %d\n", (u32) pIrqDesc->irq, nRc); + nRetVal = nRc; + goto Exit; + } + + nRc = rtdm_irq_free(&pIrqDesc->irq_handle); + if (nRc) + { + ERR("dev_int_disconnect: rtdm_irq_free() for IRQ %d returned error: %d\n", (u32) pIrqDesc->irq, nRc); + nRetVal = nRc; + goto Exit; + } + + nCnt = atomic_read(&pIrqDesc->totalCount); + INF("pci_intdcon: IRQ %u disconnected. %d interrupts rcvd\n", (u32) pIrqDesc->irq, nCnt); + + pIrqDesc->irq = 0; + rtdm_event_signal(&pIrqDesc->irq_event); + } +#else + if (pIrqDesc->irq) + { + /* Disable INT line. We can call this, because we only allow exclusive interrupts */ + disable_irq_nosync(pIrqDesc->irq); + + /* Unregister INT routine.This will block until all pending interrupts are handled */ + free_irq(pIrqDesc->irq, pDevDesc); + + nCnt = atomic_read(&pIrqDesc->totalCount); + INF("pci_intdcon: IRQ %u disconnected. %d interrupts rcvd\n", (u32) pIrqDesc->irq, nCnt); + + pIrqDesc->irq = 0; + + /* Wakeup sleeping threads -> read() */ + wake_up(&pIrqDesc->q); + } +#endif /* CONFIG_XENO_COBALT */ + nRetVal = 0; + +#if (defined CONFIG_XENO_COBALT) +Exit: +#endif + return nRetVal; +} + +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) +#ifdef CONFIG_TI_K3_UDMA + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)) + #define CPSWG_STRUCT_VERSION_2 1 +#endif + +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_protocol.h> + +/* from */ +struct k3_ring_state { + u32 free; + u32 occ; + u32 windex; + u32 rindex; +#ifdef CPSWG_STRUCT_VERSION_2 + u32 tdown_complete:1; +#endif +}; + +struct k3_ring { + struct k3_ring_rt_regs __iomem *rt; + struct k3_ring_fifo_regs __iomem *fifos; + struct k3_ringacc_proxy_target_regs __iomem *proxy; + dma_addr_t ring_mem_dma; + void *ring_mem_virt; + struct k3_ring_ops *ops; + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; + u32 flags; +#define K3_RING_FLAG_BUSY BIT(1) +#define K3_RING_FLAG_SHARED BIT(2) +#ifdef CPSWG_STRUCT_VERSION_2 + #define K3_RING_FLAG_REVERSE BIT(3) +#endif + struct k3_ring_state state; + u32 ring_id; + struct k3_ringacc *parent; + u32 use_count; + int proxy_id; +#ifdef CPSWG_STRUCT_VERSION_2 + struct device *dma_dev; + u32 asel; +#define K3_ADDRESS_ASEL_SHIFT 48 +#endif +}; + +struct k3_udma_glue_common { + struct device *dev; +#ifdef CPSWG_STRUCT_VERSION_2 + struct device chan_dev; +#endif + struct udma_dev *udmax; + const struct udma_tisci_rm *tisci_rm; + struct k3_ringacc *ringacc; + u32 src_thread; + u32 dst_thread; + + u32 hdesc_size; + bool epib; + u32 psdata_size; + u32 swdata_size; + u32 atype; +#ifdef CPSWG_STRUCT_VERSION_2 + struct psil_endpoint_config *ep_config; +#endif +}; + +struct k3_udma_glue_tx_channel { + struct k3_udma_glue_common common; + + struct udma_tchan *udma_tchanx; + int udma_tchan_id; + + struct k3_ring *ringtx; + struct k3_ring *ringtxcq; + + bool psil_paired; + + int virq; + + atomic_t free_pkts; + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; +#ifdef CPSWG_STRUCT_VERSION_2 + int udma_tflow_id; +#endif +}; + + + +struct k3_udma_glue_rx_flow { + struct udma_rflow *udma_rflow; + int udma_rflow_id; + struct k3_ring *ringrx; + struct k3_ring *ringrxfdq; + + int virq; +}; + +struct k3_udma_glue_rx_channel { + struct k3_udma_glue_common common; + + struct udma_rchan *udma_rchanx; + int udma_rchan_id; + bool remote; + + bool psil_paired; + + u32 swdata_size; + int flow_id_base; + + struct k3_udma_glue_rx_flow *flows; + u32 flow_num; + u32 flows_ready; +}; + + +#define AM65_CPSW_NAV_SW_DATA_SIZE 16 +#define AM65_CPSW_MAX_RX_FLOWS 1 + +#include "../drivers/dma/ti/k3-udma.h" + +#include <linux/dma/k3-udma-glue.h> +void cleanup(void *data, dma_addr_t desc_dma) +{ + return; +} + +static int CpswgCmd(void* arg, ATEMSYS_T_CPSWG_CMD* pConfig) +{ + struct k3_udma_glue_tx_channel** ppTxChn = NULL; + struct k3_udma_glue_rx_channel** ppRxChn = NULL; + __u32* pnTxIrq; + __u32* pnRxIrq; + ATEMSYS_T_CPSWG_CMD oConfig; + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + memset(&oConfig, 0, sizeof(ATEMSYS_T_CPSWG_CMD)); + + if (NULL == pConfig) + { + nRetVal = copy_from_user(&oConfig, (ATEMSYS_T_CPSWG_CMD *)arg, sizeof(ATEMSYS_T_CPSWG_CMD)); + } + else + { + memcpy(&oConfig, pConfig, sizeof(ATEMSYS_T_CPSWG_CMD)); + nRetVal = 0; + } + if (0 != nRetVal) + { + ERR("CpswgCmd(): failed: %d\n", nRetVal); + goto Exit; + } + if (oConfig.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[oConfig.dwIndex]; + if (NULL == pDrvDescPrivate) + { + ERR("CpswgCmd(): cant find instance\n"); + nRetVal = -EBUSY; + goto Exit; + } + + DBG("CpswgCmd(): dwCmd: %d\n", oConfig.dwCmd); + ppTxChn = (struct k3_udma_glue_tx_channel**)&pDrvDescPrivate->apvTxChan[oConfig.dwChannelIdx]; + ppRxChn = (struct k3_udma_glue_rx_channel**)&pDrvDescPrivate->apvRxChan[oConfig.dwChannelIdx]; + pnTxIrq = &pDrvDescPrivate->anTxIrq[oConfig.dwChannelIdx]; + pnRxIrq = &pDrvDescPrivate->anRxIrq[oConfig.dwChannelIdx]; + + + switch (oConfig.dwCmd) + { + case ATEMSYS_CPSWG_CMD_CONFIG_TX: + { + char tx_chn_name[128]; + struct k3_ring_cfg ring_cfg = + { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_RING, + .flags = 0 + }; + struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 }; + + tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; + tx_cfg.tx_cfg = ring_cfg; + tx_cfg.txcq_cfg = ring_cfg; + tx_cfg.tx_cfg.size = oConfig.dwRingSize; + tx_cfg.txcq_cfg.size = oConfig.dwRingSize; + snprintf(tx_chn_name, sizeof(tx_chn_name), "tx%d", 0); + + *ppTxChn = k3_udma_glue_request_tx_chn(&pDrvDescPrivate->pPDev->dev, + tx_chn_name, + &tx_cfg); + if (IS_ERR(*ppTxChn)) + { + ERR("CpswgCmd(): Failed to request tx dma channel %ld\n", PTR_ERR(*ppTxChn)); + *ppTxChn = NULL; + goto Exit; + } + + *pnTxIrq = k3_udma_glue_tx_get_irq(*ppTxChn); + if (*pnTxIrq <= 0) + { + ERR("CpswgCmd(): Failed to get tx dma irq %d\n", *pnTxIrq); + goto Exit; + } + + { + struct k3_udma_glue_tx_channel* pData = (struct k3_udma_glue_tx_channel*)*ppTxChn; + DBG("CpswgCmd(): k3_udma_glue_request_tx_chn(): udma_tchan_id:0x%x, ringtx:0x%x::0x%px, ringtxcq:0x%x::0x%px\n", + pData->udma_tchan_id, + pData->ringtx->ring_id, (unsigned char*)NULL + pData->ringtx->ring_mem_dma, + pData->ringtxcq->ring_id, (unsigned char*)NULL + pData->ringtxcq->ring_mem_dma); + + oConfig.dwChanId = pData->udma_tchan_id; + oConfig.dwRingId = pData->ringtx->ring_id; + oConfig.qwRingDma = pData->ringtx->ring_mem_dma; + oConfig.dwRingSize = pData->ringtx->size; + oConfig.dwRingFdqId = pData->ringtxcq->ring_id; + oConfig.qwRingFdqDma = pData->ringtxcq->ring_mem_dma; + oConfig.dwRingFdqSize = pData->ringtxcq->size; + + nRetVal = copy_to_user((ATEMSYS_T_CPSWG_CMD *)arg, &oConfig, sizeof(ATEMSYS_T_CPSWG_CMD)); + if (0 != nRetVal) + { + ERR("CpswgCmd(): copy_to_user() failed: %d\n", nRetVal); + } + } + } break; + case ATEMSYS_CPSWG_CMD_CONFIG_RX: + { + u32 rx_flow_id_base = -1; + u32 fdqring_id; + + struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; + + rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; + rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS; + rx_cfg.flow_id_base = rx_flow_id_base; + + *ppRxChn = k3_udma_glue_request_rx_chn(&pDrvDescPrivate->pPDev->dev, "rx", &rx_cfg); + if (IS_ERR(*ppRxChn)) { + ERR("CpswgCmd(): Failed to request rx dma channel %ld\n", PTR_ERR(*ppRxChn)); + *ppRxChn = NULL; + goto Exit; + } + + rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(*ppRxChn); + fdqring_id = K3_RINGACC_RING_ID_ANY; + /*for*/ + { + u32 i = 0; + struct k3_ring_cfg rxring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_RING, + .flags = 0, + }; + struct k3_ring_cfg fdqring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_MESSAGE, + .flags = K3_RINGACC_RING_SHARED, + }; + struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { + .rx_cfg = rxring_cfg, + .rxfdq_cfg = fdqring_cfg, + .ring_rxq_id = K3_RINGACC_RING_ID_ANY, + .src_tag_lo_sel = K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, + }; + + rx_flow_cfg.ring_rxfdq0_id = fdqring_id; + rx_flow_cfg.rx_cfg.size = oConfig.dwRingSize; + rx_flow_cfg.rxfdq_cfg.size = oConfig.dwRingSize; + + nRetVal = k3_udma_glue_rx_flow_init(*ppRxChn, i, &rx_flow_cfg); + if (nRetVal) { + ERR("CpswgCmd(): Failed to init rx flow%d %d\n", i, nRetVal); + goto Exit; + } + if (!i) + fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(*ppRxChn, i); + + *pnRxIrq = k3_udma_glue_rx_get_irq(*ppRxChn, i); + + if (*pnRxIrq <= 0) { + ERR("CpswgCmd(): Failed to get rx dma irq %d\n", *pnRxIrq); + goto Exit; + } + } + { + struct k3_udma_glue_rx_flow* pData = (struct k3_udma_glue_rx_flow*)(*ppRxChn)->flows; + + DBG("CpswgCmd(): k3_udma_glue_request_tx_chn(): udma_rflow_id:0x%x, rx_flow_id_base:0x%x, ringrx:0x%x::0x%px, ringrxfdq:0x%x::0x%px\n", + pData->udma_rflow_id, rx_flow_id_base, + pData->ringrx->ring_id, (unsigned char*)NULL + pData->ringrx->ring_mem_dma, + pData->ringrxfdq->ring_id, (unsigned char*)NULL + pData->ringrxfdq->ring_mem_dma); + + oConfig.dwChanId = pData->udma_rflow_id; + oConfig.dwRingId = pData->ringrx->ring_id; + oConfig.qwRingDma = pData->ringrx->ring_mem_dma; + oConfig.dwRingSize = pData->ringrx->size; + oConfig.dwRingFdqId = pData->ringrxfdq->ring_id; + oConfig.qwRingFdqDma = pData->ringrxfdq->ring_mem_dma; + oConfig.dwRingFdqSize = pData->ringrxfdq->size; + oConfig.dwFlowIdBase = rx_flow_id_base; + + nRetVal = copy_to_user((ATEMSYS_T_CPSWG_CMD *)arg, &oConfig, sizeof(ATEMSYS_T_CPSWG_CMD)); + if (0 != nRetVal) + { + ERR("CpswgCmd(): copy_to_user() failed: %d\n", nRetVal); + } + } + } break; + case ATEMSYS_CPSWG_CMD_ENABLE_TX: + { + if (NULL == *ppTxChn) + { + nRetVal = -1; + ERR("CpswgCmd(): tx channel not ready %d\n", nRetVal); + goto Exit; + } + nRetVal = k3_udma_glue_enable_tx_chn(*ppTxChn); + if (nRetVal) + { + ERR("CpswgCmd(): k3_udma_glue_enable_tx_chn() failed %d\n", nRetVal); + goto Exit; + } + + } break; + case ATEMSYS_CPSWG_CMD_ENABLE_RX: + { + if (NULL == *ppRxChn) + { + nRetVal = -1; + ERR("CpswgCmd(): rx channel not ready %d\n", nRetVal); + goto Exit; + } + nRetVal = k3_udma_glue_enable_rx_chn(*ppRxChn); + if (nRetVal) { + ERR("CpswgCmd(): k3_udma_glue_enable_rx_chn() failed %d\n", nRetVal); + goto Exit; + } + + } break; + case ATEMSYS_CPSWG_CMD_DISABLE_TX: + { + if (NULL == *ppTxChn) + { + nRetVal = -1; + ERR("CpswgCmd(): tx channel not ready %d\n", nRetVal); + goto Exit; + } + //for (i = 0; i < tx_ch_num; i++) + k3_udma_glue_tdown_tx_chn(*ppTxChn, false); + + //for (i = 0; i < tx_ch_num; i++) + { + k3_udma_glue_reset_tx_chn(*ppTxChn, NULL, cleanup); + k3_udma_glue_disable_tx_chn(*ppTxChn); + } + } break; + case ATEMSYS_CPSWG_CMD_DISABLE_RX: + { + int i = 0; + if (NULL == *ppRxChn) + { + nRetVal = -1; + ERR("CpswgCmd(): rx channel not ready %d\n", nRetVal); + goto Exit; + } + k3_udma_glue_tdown_rx_chn(*ppRxChn, true); + for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) + k3_udma_glue_reset_rx_chn(*ppRxChn, i, NULL, cleanup, !!i); + + k3_udma_glue_disable_rx_chn(*ppRxChn); + } break; + case ATEMSYS_CPSWG_CMD_RELEASE_TX: + { + if (NULL == *ppTxChn) + { + nRetVal = -1; + ERR("CpswgCmd(): tx channel not ready %d\n", nRetVal); + goto Exit; + } + k3_udma_glue_release_tx_chn(*ppTxChn); + *ppTxChn = NULL; + } break; + case ATEMSYS_CPSWG_CMD_RELEASE_RX: + { + if (NULL == *ppRxChn) + { + nRetVal = -1; + ERR("CpswgCmd(): rx channel not ready %d\n", nRetVal); + goto Exit; + } + k3_udma_glue_release_rx_chn(*ppRxChn); + *ppRxChn = NULL; + } break; + } + + + +Exit: + return nRetVal; +} + + + +static void CleanCpswgCmd(ATEMSYS_T_DEVICE_DESC* pDevDesc) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + ATEMSYS_T_CPSWG_CMD oConfig; + unsigned int dwChannelIdx = 0; + unsigned int dwIndex = 0; + if (pDevDesc == NULL) + { + return; + } + for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++) + { + if ((NULL != S_apDrvDescPrivate[dwIndex]) && pDevDesc == S_apDrvDescPrivate[dwIndex]->pDevDesc) + { + pDrvDescPrivate = S_apDrvDescPrivate[dwIndex]; + break; + } + } + if (pDrvDescPrivate == NULL) + { + return; + } + for (dwChannelIdx = 0; ATEMSYS_UDMA_CHANNELS > dwChannelIdx; dwChannelIdx++) + { + void** ppvTxChn = &pDrvDescPrivate->apvTxChan[dwChannelIdx]; + void** ppvRxChn = &pDrvDescPrivate->apvRxChan[dwChannelIdx]; + + if ((NULL != ppvTxChn) && (NULL != *ppvTxChn)) + { + memset(&oConfig, 0, sizeof(ATEMSYS_T_CPSWG_CMD)); + oConfig.dwIndex = dwIndex; + oConfig.dwChannelIdx = dwChannelIdx; + oConfig.dwCmd = ATEMSYS_CPSWG_CMD_DISABLE_TX; + CpswgCmd(NULL, &oConfig); + oConfig.dwCmd = ATEMSYS_CPSWG_CMD_RELEASE_TX; + CpswgCmd(NULL, &oConfig); + } + if ((NULL != ppvRxChn) && (NULL != *ppvRxChn)) + { + memset(&oConfig, 0, sizeof(ATEMSYS_T_CPSWG_CMD)); + oConfig.dwIndex = dwIndex; + oConfig.dwChannelIdx = dwChannelIdx; + oConfig.dwCmd = ATEMSYS_CPSWG_CMD_DISABLE_RX; + CpswgCmd(NULL, &oConfig); + oConfig.dwCmd = ATEMSYS_CPSWG_CMD_RELEASE_RX; + CpswgCmd(NULL, &oConfig); + } + } +} +#endif /*#ifdef CONFIG_TI_K3_UDMA*/ + + +static int IoMemCmd(void* arg) +{ + ATEMSYS_T_IOMEM_CMD oIoMem; + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + unsigned int dwRetVal = 0; + int nRetVal = -1; + unsigned int dwIndex = 0; + nRetVal = copy_from_user(&oIoMem, (unsigned long long *)arg, sizeof(ATEMSYS_T_IOMEM_CMD)); + if (0 != nRetVal) + { + goto Exit; + } + if (oIoMem.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[oIoMem.dwIndex]; + if (NULL == pDrvDescPrivate) + { + ERR("IoMemCmd(): cant find instance\n"); + nRetVal = -EBUSY; + goto Exit; + } + + + if (ATEMSYS_IOMEM_CMD_MAP_PERMANENT == oIoMem.dwCmd) + { + for (dwIndex = 0; IOMEMLIST_LENGTH>dwIndex; dwIndex++) + { + if (NULL == pDrvDescPrivate->oIoMemList[dwIndex].pbyBase) + { + break; + } + } + if (IOMEMLIST_LENGTH < dwIndex) + { + nRetVal = -EFAULT; + goto Exit; + } + pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = devm_ioremap(&pDrvDescPrivate->pPDev->dev, oIoMem.qwPhys, oIoMem.dwSize); + if (NULL == pDrvDescPrivate->oIoMemList[dwIndex].pbyBase ) + { + pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = NULL; + nRetVal = -ENOMEM;; + goto Exit; + } + pDrvDescPrivate->oIoMemList[dwIndex].qwPhys = oIoMem.qwPhys; + pDrvDescPrivate->oIoMemList[dwIndex].dwSize = oIoMem.dwSize; + DBG("IoMemCmd(): ATEMSYS_IOMEM_CMD_MAP_PERMANENT Virt:0x%px, Phys:0x%px, Size:0x%08x\n", pDrvDescPrivate->oIoMemList[dwIndex].pbyBase, (unsigned char*)NULL + oIoMem.qwPhys, oIoMem.dwSize); + } + else + { + for (dwIndex = 0; IOMEMLIST_LENGTH>dwIndex; dwIndex++) + { + if (pDrvDescPrivate->oIoMemList[dwIndex].qwPhys == oIoMem.qwPhys) + { + break; + } + } + if (IOMEMLIST_LENGTH == dwIndex) + { + nRetVal = EFAULT; + goto Exit; + } + + if (ATEMSYS_IOMEM_CMD_UNMAP_PERMANENT == oIoMem.dwCmd) + { + devm_iounmap(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->oIoMemList[dwIndex].pbyBase); + pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = NULL; + pDrvDescPrivate->oIoMemList[dwIndex].qwPhys = 0; + pDrvDescPrivate->oIoMemList[dwIndex].dwSize = 0; + } + else + { + if (ATEMSYS_IOMEM_CMD_WRITE == oIoMem.dwCmd) + { + if (sizeof(unsigned int)/* 4 */ == oIoMem.dwDataSize) + *(unsigned int*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset) = oIoMem.dwData[0]; + else if (sizeof(unsigned long long)/* 8 */ == oIoMem.dwDataSize) + { + *(unsigned long long*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset) = *(unsigned long long*)&oIoMem.dwData[0]; + } + else + { + int i = 0; + for (i = 0; i < oIoMem.dwDataSize; i++) + { + ((unsigned char*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset))[i] = ((unsigned char*)oIoMem.dwData)[i]; + } + } + } + else if (ATEMSYS_IOMEM_CMD_READ == oIoMem.dwCmd) + { + if (sizeof(unsigned int)/* 4 */ == oIoMem.dwDataSize) + oIoMem.dwData[0] = *(unsigned int*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset); + else + { + int i = 0; + for (i = 0; i < oIoMem.dwDataSize; i++) + { + ((unsigned char*)oIoMem.dwData)[i] = ((unsigned char*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset))[i]; + } + } + nRetVal = copy_to_user((unsigned long long *)arg, &oIoMem, sizeof(ATEMSYS_T_IOMEM_CMD)); + if (0 != nRetVal) + { + goto Exit; + } + } + } + } + nRetVal = 0; +Exit: + return nRetVal; +} + +static void CleanIoMemCmd(ATEMSYS_T_DEVICE_DESC* pDevDesc) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + unsigned int dwIndex = 0; + if (pDevDesc == NULL) + { + return; + } + for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++) + { + pDrvDescPrivate = S_apDrvDescPrivate[dwIndex]; + if (NULL == pDrvDescPrivate) + continue; + if (pDrvDescPrivate->pDevDesc == pDevDesc) + break; + pDrvDescPrivate = NULL; + } + if (NULL == pDrvDescPrivate) + { + return; + } + for (dwIndex = 0; IOMEMLIST_LENGTH>dwIndex; dwIndex++) + { + if (NULL != pDrvDescPrivate->oIoMemList[dwIndex].pbyBase ) + { + devm_iounmap(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->oIoMemList[dwIndex].pbyBase); + pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = NULL; + pDrvDescPrivate->oIoMemList[dwIndex].qwPhys = 0; + pDrvDescPrivate->oIoMemList[dwIndex].dwSize = 0; + } + } +} +#endif /*#ifdef INCLUDE_ATEMSYS_DT_DRIVER)*/ + + +#if ((defined CONFIG_SMP) && (LINUX_VERSION_CODE > KERNEL_VERSION(5,14,0))) +static int SetIntCpuAffinityIoctl(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam, size_t size) +{ + int nRetVal = -EIO; + ATEMSYS_T_IRQ_DESC* pIrqDesc = &(pDevDesc->irqDesc); + struct cpumask* pCpuMask = 0; + + if (size > sizeof(struct cpumask)) + { + ERR("SetIntCpuAffinityIoctl: cpu mask length mismatch\n"); + nRetVal = -EINVAL; + goto Exit; + } + + /* prepare cpu affinity mask*/ + pCpuMask = (struct cpumask*)kzalloc(sizeof(struct cpumask), GFP_KERNEL); + if (NULL == pCpuMask) + { + ERR("SetIntCpuAffinityIoctl: no memory\n"); + nRetVal = -ENOMEM; + goto Exit; + } + memset(pCpuMask, 0, sizeof(struct cpumask)>size? sizeof(struct cpumask): size); + + nRetVal = copy_from_user(pCpuMask, (struct cpumask *)ioctlParam, size); + if (0 != nRetVal) + { + ERR("SetIntCpuAffinityIoctl failed: %d\n", nRetVal); + goto Exit; + } + + /* set cpu affinity mask*/ + if (pIrqDesc->irq) + { + nRetVal = irq_set_affinity(pIrqDesc->irq, pCpuMask); + if (0 != nRetVal) + { + ERR("SetIntCpuAffinityIoctl: irq_set_affinity failed: %d\n", nRetVal); + nRetVal = -EIO; + goto Exit; + } + } + + nRetVal = 0; +Exit: + if (NULL != pCpuMask) + kfree(pCpuMask); + + return nRetVal; +} +#endif /* #if ((defined CONFIG_SMP) && (LINUX_VERSION_CODE > KERNEL_VERSION(5,14,0))) */ + +#if (defined CONFIG_PCI) +static void dev_pci_release(ATEMSYS_T_DEVICE_DESC* pDevDesc) +{ +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) + if (NULL != pDevDesc->pPciDrvDesc) + { + INF("pci_release: Disconnect from PCI device driver %s \n", pci_name(pDevDesc->pPcidev)); + pDevDesc->pPciDrvDesc->pDevDesc = NULL; +#if !(defined CONFIG_XENO_COBALT) + pDevDesc->pPcidev = NULL; +#endif + pDevDesc->pPciDrvDesc = NULL; + } + else +#endif + + if (pDevDesc->pPcidev) + { + pci_disable_device(pDevDesc->pPcidev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + /* Make sure bus master DMA is disabled if the DMA buffers are finally released */ + pci_clear_master(pDevDesc->pPcidev); +#endif + pci_release_regions(pDevDesc->pPcidev); + + pci_disable_msi(pDevDesc->pPcidev); + + INF("pci_release: PCI device %s released\n", pci_name(pDevDesc->pPcidev)); + +#if !(defined CONFIG_XENO_COBALT) + pDevDesc->pPcidev = NULL; +#endif + } +} +#endif /* CONFIG_PCI */ + +#if (defined CONFIG_XENO_COBALT) +static int dev_interrupt_handler(rtdm_irq_t* irq_handle) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = rtdm_irq_get_arg(irq_handle, ATEMSYS_T_DEVICE_DESC); + ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL; + + if (pDevDesc != NULL) + { + pIrqDesc = &(pDevDesc->irqDesc); + if (pIrqDesc != NULL) + { + atomic_inc(&pIrqDesc->count); + atomic_inc(&pIrqDesc->totalCount); + rtdm_event_signal(&pIrqDesc->irq_event); + } + } + return RTDM_IRQ_HANDLED; +} +#else +static irqreturn_t dev_interrupt_handler(int nIrq, void* pParam) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) pParam; + ATEMSYS_T_IRQ_DESC* pIrqDesc = &(pDevDesc->irqDesc); + + /* Disable IRQ on (A)PIC to prevent interrupt trashing if the ISR is left. + * In usermode the IRQ must be acknowledged on the device (IO register). + * The IRQ is enabled again in the read() handler! + * Just disabling the IRQ here doesn't work with shared IRQs! + */ + dev_disable_irq(pIrqDesc); + + atomic_inc(&pIrqDesc->count); + atomic_inc(&pIrqDesc->totalCount); + + /* Wakeup sleeping threads -> read() */ + wake_up(&pIrqDesc->q); + + return IRQ_HANDLED; +} +#endif /* CONFIG_XENO_COBALT */ + +/* + * This is called whenever a process attempts to open the device file + */ +#if (defined CONFIG_XENO_COBALT) +static int device_open(struct rtdm_fd* fd, int oflags) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd); + memset(pDevDesc, 0, sizeof(ATEMSYS_T_DEVICE_DESC)); + rtdm_event_init(&pDevDesc->irqDesc.irq_event, 0); + INF("device_open %s\n", rtdm_fd_device(fd)->label); +#else +static int device_open(struct inode* inode, struct file* file) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc; + + INF("device_open(0x%px)\n", file); + + /* create device descriptor */ + pDevDesc = (ATEMSYS_T_DEVICE_DESC*) kzalloc(sizeof(ATEMSYS_T_DEVICE_DESC), GFP_KERNEL); + if (pDevDesc == NULL) + { + return -ENOMEM; + } + + file->private_data = (void*) pDevDesc; + + /* Add descriptor to descriptor list */ + mutex_lock(&S_mtx); + list_add(&pDevDesc->list, &S_DevNode.list); + mutex_unlock(&S_mtx); + try_module_get(THIS_MODULE); +#endif /* CONFIG_XENO_COBALT */ + + /* use module's platform device for memory maping and allocation */ + pDevDesc->pPlatformDev = S_pPlatformDev; + + return DRIVER_SUCCESS; +} + +#if (defined CONFIG_XENO_COBALT) +static void device_release(struct rtdm_fd* fd) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd); + ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL; +#else +static int device_release(struct inode* inode, struct file* file) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = file->private_data; +#endif /* CONFIG_XENO_COBALT */ + + /* release device descriptor */ + if (pDevDesc != NULL ) + { + INF("device_release, pDevDesc = 0x%px\n", pDevDesc); + + /* Try to tear down interrupts if they are on */ + dev_int_disconnect(pDevDesc); + +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) + CleanIoMemCmd(pDevDesc); + + #ifdef CONFIG_TI_K3_UDMA + CleanCpswgCmd(pDevDesc); + #endif + + CleanUpEthernetDriverOnRelease(pDevDesc); +#endif + +#if (defined CONFIG_PCI) + /* Try to release PCI resources */ + dev_pci_release(pDevDesc); +#endif + +#if (defined CONFIG_XENO_COBALT) + pIrqDesc = &(pDevDesc->irqDesc); + + if (pIrqDesc != NULL ) + { + rtdm_event_clear(&pIrqDesc->irq_event); + rtdm_event_destroy(&pIrqDesc->irq_event); + } + } + return; +#else + /* Remove descriptor from descriptor list */ + mutex_lock(&S_mtx); + + list_del(&pDevDesc->list); + + mutex_unlock(&S_mtx); + + kfree(pDevDesc); + } + + module_put(THIS_MODULE); + + return DRIVER_SUCCESS; +#endif /* CONFIG_XENO_COBALT */ +} + +/* + * This function is called whenever a process which has already opened the + * device file attempts to read from it. + */ + #if (defined CONFIG_XENO_COBALT) +static ssize_t device_read(struct rtdm_fd* fd, void* bufp, size_t len) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd); + ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL; + s32 nPending; + int ret=0; + + if (! pDevDesc) + { + return -EINVAL; + } + + pIrqDesc = &(pDevDesc->irqDesc); + if (! pIrqDesc) + { + return -EINVAL; + } + + if (len < sizeof(u32)) + { + return -EINVAL; + } + + if (rtdm_in_rt_context() == false) + { + return -EINVAL; + } + + if (rtdm_fd_is_user(fd) == false) + { + return -EINVAL; + } + + ret = rtdm_event_wait(&pIrqDesc->irq_event); + if (ret) + { + return ret; + } + + nPending = atomic_read(&pIrqDesc->count); + + ret = rtdm_safe_copy_to_user(fd, bufp, &nPending, sizeof(nPending)); + + if (ret) + { + ERR("device_read: rtdm_safe_copy_to_user() returned error: %d\n", ret); + return ret; + } + + atomic_sub(nPending, &pIrqDesc->count); + + return sizeof(nPending); +} +#else +static ssize_t device_read( + struct file* filp, /* see include/linux/fs.h */ + char __user* bufp, /* buffer to be filled with data */ + size_t len, /* length of the buffer */ + loff_t* ppos) +{ + + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) filp->private_data; + ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL; + s32 nPending; + wait_queue_entry_t wait; + + if (! pDevDesc) + { + return -EINVAL; + } + + pIrqDesc = &(pDevDesc->irqDesc); + + /* DBG("device_read...(0x%px,0x%px,%d)\n", filp, bufp, len); */ + + init_wait(&wait); + + if (len < sizeof(u32)) + { + return -EINVAL; + } + + if (pIrqDesc->irq == 0) /* IRQ already disabled */ + { + return -EINVAL; + } + + nPending = atomic_read(&pIrqDesc->count); + if (nPending == 0) + { + if (dev_irq_disabled(pIrqDesc)) + { + dev_enable_irq(pIrqDesc); + } + if (filp->f_flags & O_NONBLOCK) + { + return -EWOULDBLOCK; + } + } + + while (nPending == 0) + { + prepare_to_wait(&pIrqDesc->q, &wait, TASK_INTERRUPTIBLE); + nPending = atomic_read(&pIrqDesc->count); + if (nPending == 0) + { + schedule(); + } + finish_wait(&pIrqDesc->q, &wait); + if (pIrqDesc->irq == 0) /* IRQ disabled while waiting for IRQ */ + { + return -EINVAL; + } + if (signal_pending(current)) + { + return -ERESTARTSYS; + } + } + + if (copy_to_user(bufp, &nPending, sizeof(nPending))) + { + return -EFAULT; + } + + *ppos += sizeof(nPending); + atomic_sub(nPending, &pIrqDesc->count); + + return sizeof(nPending); +} +#endif /* CONFIG_XENO_COBALT */ + +/* + * character device mmap method + */ +#if (defined CONFIG_XENO_COBALT) +static int device_mmap(struct rtdm_fd* fd, struct vm_area_struct* vma) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd); +#else +static int device_mmap(struct file* filp, struct vm_area_struct* vma) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = filp->private_data; +#endif /* CONFIG_XENO_COBALT */ + + int nRet = -EIO; + u32 dwLen; + void* pVa = NULL; + dma_addr_t dmaAddr; + ATEMSYS_T_MMAP_DESC* pMmapNode; +#if (defined CONFIG_PCI) + int i; + unsigned long ioBase; + u32 dwIOLen, dwPageOffset; +#endif + + DBG("mmap: vm_pgoff 0x%px vm_start = 0x%px vm_end = 0x%px\n", + (void*) vma->vm_pgoff, (void*) vma->vm_start, (void*) vma->vm_end); + + if (pDevDesc == NULL) + { + ERR("mmap: Invalid device dtor\n"); + goto Exit; + } + + dwLen = PAGE_UP(vma->vm_end - vma->vm_start); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) + vm_flags_set(vma, VM_RESERVED | VM_LOCKED | VM_DONTCOPY); +#else + vma->vm_flags |= VM_RESERVED | VM_LOCKED | VM_DONTCOPY; +#endif + + if (vma->vm_pgoff != 0) + { + /* map device IO memory */ +#if (defined CONFIG_PCI) + if (pDevDesc->pPcidev != NULL) + { + INF("mmap: Doing PCI device sanity check\n"); + + /* sanity check. Make sure that the offset parameter of the mmap() call in userspace + * corresponds with the PCI base IO address. + * Make sure the user doesn't map more IO memory than the device provides. + */ + for (i = 0; i < ATEMSYS_PCI_MAXBAR; i++) + { + if (pci_resource_flags(pDevDesc->pPcidev, i) & IORESOURCE_MEM) + { + /* IO area address */ + ioBase = PAGE_DOWN( pci_resource_start(pDevDesc->pPcidev, i) ); + + dwPageOffset = pci_resource_start(pDevDesc->pPcidev, i) - ioBase; + + /* IO area length */ + dwIOLen = PAGE_UP( pci_resource_len(pDevDesc->pPcidev, i) + dwPageOffset ); + + if ( ((vma->vm_pgoff << PAGE_SHIFT) >= ioBase) + && (((vma->vm_pgoff << PAGE_SHIFT) + dwLen) <= (ioBase + dwIOLen)) + ) + { + /* for systems where physical address is in x64 space, high dword is not passes from user io + * use correct address from pci_resource_start */ + resource_size_t res_start = pci_resource_start(pDevDesc->pPcidev, i); + unsigned long pgoff_new = (res_start>>PAGE_SHIFT); + if (pgoff_new != vma->vm_pgoff) + { + INF("mmap: Correcting page offset from 0x%lx to 0x%lx, for Phys address 0x%llx", + vma->vm_pgoff, pgoff_new, (u64)res_start); + vma->vm_pgoff = pgoff_new; + } + + break; + } + } + } + + /* IO bar not found? */ + if (i == ATEMSYS_PCI_MAXBAR) + { + ERR("mmap: Invalid arguments\n"); + nRet = -EINVAL; + goto Exit; + } + } +#endif /* CONFIG_PCI */ + + /* avoid swapping, request IO memory */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) + vm_flags_set(vma, VM_IO); +#else + vma->vm_flags |= VM_IO; +#endif + + /* + * avoid caching (this is at least needed for POWERPC, + * or machine will lock on first IO access) + */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if ((nRet = remap_pfn_range(vma, + vma->vm_start, + vma->vm_pgoff, + dwLen, + vma->vm_page_prot)) < 0) + { + ERR("mmap: remap_pfn_range failed\n"); + goto Exit; + } + + INF("mmap: mapped IO memory, Phys:0x%llx UVirt:0x%px Size:%u\n", + (u64) (((u64)vma->vm_pgoff) << PAGE_SHIFT), (void*) vma->vm_start, dwLen); + +#if (defined DEBUG_IOREMAP) + { + volatile unsigned char* ioaddr; + unsigned long ioBase = vma->vm_pgoff << PAGE_SHIFT; + INF("try to remap %p\n", (void*)ioBase); + /* DEBUG Map device's IO memory into kernel space pagetables */ + ioaddr = (volatile unsigned char*) ioremap_nocache(ioBase, dwLen); + if (ioaddr == NULL) + { + ERR("ioremap_nocache failed\n"); + goto Exit; + } + INF("io_base %p, *io_base[0]: %08x\n", ioaddr, readl(ioaddr)); + } +#endif /* DEBUG_IOREMAP */ + } + else + { + /* allocated and map DMA memory */ +#if (defined CONFIG_PCI) + if (pDevDesc->pPcidev != NULL) + { +#if ( (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) \ + || (defined __aarch64__) \ + || ((defined __arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) \ + || ((defined __i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) \ + || ((defined __amd64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) ) + pVa = dma_alloc_coherent(&pDevDesc->pPcidev->dev, dwLen, &dmaAddr, GFP_KERNEL); + if (NULL == pVa) + { + ERR("mmap: dma_alloc_coherent failed\n"); + nRet = -ENOMEM; + goto Exit; + } +#else + pVa = pci_alloc_consistent(pDevDesc->pPcidev, dwLen, &dmaAddr); + if (NULL == pVa) + { + ERR("mmap: pci_alloc_consistent failed\n"); + nRet = -ENOMEM; + goto Exit; + } +#endif + } + else +#endif /* CONFIG_PCI */ + { +#if (defined __arm__) || (defined __aarch64__) + #if (defined CONFIG_OF) + OF_DMA_CONFIGURE(&pDevDesc->pPlatformDev->dev,pDevDesc->pPlatformDev->dev.of_node); + #endif + /* dma_alloc_coherent() is currently not tested on PPC. + * TODO test this and remove legacy dev_dma_alloc() + */ + pVa = dmam_alloc_coherent(&pDevDesc->pPlatformDev->dev, dwLen, &dmaAddr, GFP_KERNEL); + if (NULL == pVa) + { + ERR("mmap: dmam_alloc_coherent failed\n"); + nRet = -ENOMEM; + goto Exit; + } +#else + pVa = dev_dma_alloc(dwLen, &dmaAddr); + if (NULL == pVa) + { + ERR("mmap: dev_dma_alloc failed\n"); + nRet = -ENOMEM; + goto Exit; + } +#endif + } + + if ((dmaAddr > 0xFFFFFFFF) && !pDevDesc->bSupport64BitDma) + { + ERR("mmap: Can't handle 64-Bit DMA address\n"); + INF("mmap: Update LinkLayer for 64-Bit DMA support!\n"); + nRet = -ENOMEM; + goto ExitAndFree; + } + + /* zero memory for security reasons */ + memset(pVa, 0, dwLen); + + /* Always use noncached DMA memory for ARM. Otherwise cache invaliation/sync + * would be necessary from usermode. + * Can't do that without a kernel call because this OP's are privileged. + */ + + /* map the whole physically contiguous area in one piece */ +#if (!(defined ATEMSYS_LEGACY_DMA) && (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0))) || ((defined ATEMSYS_LEGACY_DMA) && (0 != ATEMSYS_LEGACY_DMA)) + { + unsigned int dwDmaPfn = 0; + +#if (defined __arm__) || (defined __aarch64__) + dwDmaPfn = (dmaAddr >> PAGE_SHIFT); + #if (defined CONFIG_PCI) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) + if ((NULL != pDevDesc->pPcidev) && (0 != pDevDesc->pPcidev->dev.dma_pfn_offset)) + { + dwDmaPfn = dwDmaPfn + pDevDesc->pPcidev->dev.dma_pfn_offset; + INF("mmap: remap_pfn_range dma pfn 0x%x, offset pfn 0x%x\n", + dwDmaPfn, (u32)pDevDesc->pPcidev->dev.dma_pfn_offset); + } + #else + if ((NULL != pDevDesc->pPcidev) && (NULL != pDevDesc->pPcidev->dev.dma_range_map)) + { + const struct bus_dma_region* map = pDevDesc->pPcidev->dev.dma_range_map; + unsigned long dma_pfn_offset = ((map->offset) >> PAGE_SHIFT); + dwDmaPfn = dwDmaPfn + dma_pfn_offset; + INF("mmap: remap_pfn_range dma pfn 0x%x, offset pfn 0x%x\n", + dwDmaPfn, (u32)dma_pfn_offset); + } + #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0))*/ + #endif /* (defined CONFIG_PCI) */ +#if (!defined ATEMSYS_DONT_SET_NONCACHED_DMA_PAGEPROTECTIONLFAG) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif +#elif (defined __PPC__) + dwDmaPfn = (dmaAddr >> PAGE_SHIFT); +#else /* x86 / x86_64 */ + dwDmaPfn = virt_to_phys((void*)pVa) >> PAGE_SHIFT; +#endif + nRet = remap_pfn_range(vma, /* user space mapping */ + vma->vm_start, /* User space virtual addr */ + dwDmaPfn, /* physical page frame number */ + dwLen, /* size in bytes */ + vma->vm_page_prot); + if (nRet < 0) + { + ERR("remap_pfn_range failed\n"); + goto ExitAndFree; + } + } +#else /* #if (defined ATEMSYS_LEGACY_DMA) */ + { + struct device* pDmaDev = NULL; + + #if (defined CONFIG_PCI) + if (NULL != pDevDesc->pPcidev) + { + pDmaDev = &pDevDesc->pPcidev->dev; + } + else + #endif /* (defined CONFIG_PCI) */ + if (NULL != pDevDesc->pPlatformDev) + { + pDmaDev = &pDevDesc->pPlatformDev->dev; + } + +#if ((defined __arm__) || (defined __aarch64__)) && (!defined ATEMSYS_DONT_SET_NONCACHED_DMA_PAGEPROTECTIONLFAG) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +#endif + /* for Platform Device */ + nRet = dma_mmap_coherent(pDmaDev, + vma, /* user space mapping */ + pVa, /* kernel virtual address */ + dmaAddr, /* Phys address */ + dwLen); /* size */ + if (nRet < 0) + { + ERR("dma_mmap_coherent failed\n"); + goto ExitAndFree; + } + } +#endif /* #if (defined ATEMSYS_LEGACY_DMA) */ + + /* Write the physical DMA address into the first 4 bytes of allocated memory */ + /* If there is 64 bit DMA support write upper part into the the next 4 byte */ + if (pDevDesc->bSupport64BitDma) + { + ((u32*) pVa)[0] = (u32)((u64)dmaAddr & 0xFFFFFFFF); + ((u32*) pVa)[1] = (u32)(((u64)dmaAddr >> 32) & 0xFFFFFFFF); + } + else + { + *((u32*) pVa) = (u32) dmaAddr; + } + + /* Some housekeeping to be able to cleanup the allocated memory later */ + pMmapNode = kzalloc(sizeof(ATEMSYS_T_MMAP_DESC), GFP_KERNEL); + if (! pMmapNode) + { + ERR("mmap: kmalloc() failed\n"); + nRet = -ENOMEM; + goto ExitAndFree; + } + + pMmapNode->pDevDesc = pDevDesc; + pMmapNode->dmaAddr = dmaAddr; + pMmapNode->pVirtAddr = pVa; + pMmapNode->len = dwLen; + + /* Setup close callback -> deallocates DMA memory if region is unmapped by the system */ + vma->vm_ops = &mmap_vmop; + vma->vm_private_data = pMmapNode; + + INF("mmap: mapped DMA memory, Phys:0x%px KVirt:0x%px UVirt:0x%px Size:%u\n", + (void*)(unsigned long)dmaAddr, (void*)pVa, (void*)vma->vm_start, dwLen); + } + + nRet = 0; + + goto Exit; + +ExitAndFree: + + if (pVa == NULL) goto Exit; + +#if (defined CONFIG_PCI) + if (pDevDesc->pPcidev != NULL) + { +#if ( (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) \ + || (defined __aarch64__) \ + || ((defined __arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) \ + || ((defined __i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) \ + || ((defined __amd64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) ) + dma_free_coherent(&pDevDesc->pPcidev->dev, dwLen, pVa, dmaAddr); +#else + pci_free_consistent(pDevDesc->pPcidev, dwLen, pVa, dmaAddr); +#endif + } + else +#endif + { +#if (defined __arm__) || (defined __aarch64__) + dmam_free_coherent(&pDevDesc->pPlatformDev->dev, dwLen, pVa, dmaAddr); +#else + dev_dma_free(dwLen, pVa); +#endif + } + +Exit: + return nRet; +} + + +/* + * This function is called whenever a process tries to do an ioctl on our + * device file. + * + * If the ioctl is write or read/write (meaning output is returned to the + * calling process), the ioctl call returns the output of this function. + * + */ +#if (defined CONFIG_XENO_COBALT) +static int atemsys_ioctl(struct rtdm_fd* fd, unsigned int cmd, void __user* user_arg) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd); + unsigned long arg = (unsigned long) user_arg; +#else +static long atemsys_ioctl( + struct file* file, + unsigned int cmd, + unsigned long arg) +{ + ATEMSYS_T_DEVICE_DESC* pDevDesc = file->private_data; +#endif /* CONFIG_XENO_COBALT */ + + int nRetVal = -EFAULT; + + if (pDevDesc == NULL) + { + ERR("ioctl: Invalid device dtor\n"); + goto Exit; + } + + /* + * Switch according to the ioctl called + */ + switch (cmd) + { +#if (defined CONFIG_PCI) + case ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00: + case ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_05: + case ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_4_12: + { + nRetVal = ioctl_pci_finddevice(pDevDesc, arg, _IOC_SIZE(cmd)); /* size determines version */ + if (0 != nRetVal) + { + /* be quiet. ioctl may fail */ + goto Exit; + } + } break; + case ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00: + case ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_05: + case ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_4_12: + { + nRetVal = ioctl_pci_configure_device(pDevDesc, arg, _IOC_SIZE(cmd)); /* size determines version */ + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_PCI_CONF_DEVICE failed: %d\n", nRetVal); + goto Exit; + } + } break; + + case ATEMSYS_IOCTL_PCI_RELEASE_DEVICE: + { + if (pDevDesc->pPcidev == NULL) + { + DBG("pci_release: No PCI device selected. Call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) first\n"); + goto Exit; + } + /* do nothing */ + /* see device_release() -> dev_pci_release(pDevDesc)*/ + } break; +#endif + case ATEMSYS_IOCTL_INT_CONNECT: + { + nRetVal = ioctl_int_connect(pDevDesc, arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_INT_CONNECT failed: %d\n", nRetVal); + goto Exit; + } + } break; + + case ATEMSYS_IOCTL_INT_DISCONNECT: + { + nRetVal = dev_int_disconnect(pDevDesc); + if (0 != nRetVal) + { + /* be quiet. ioctl may fail */ + goto Exit; + } + } break; + + case ATEMSYS_IOCTL_INT_INFO: + { + nRetVal = ioctl_intinfo(pDevDesc, arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_INT_INFO failed: %d\n", nRetVal); + goto Exit; + } + } break; + + case ATEMSYS_IOCTL_MOD_GETVERSION: + { + __u32 dwVersion = USE_ATEMSYS_API_VERSION; + +#if (defined CONFIG_XENO_COBALT) + nRetVal = rtdm_safe_copy_to_user(fd, user_arg, &dwVersion, sizeof(__u32)); +#else + nRetVal = put_user(dwVersion, (__u32*)arg); +#endif /* CONFIG_XENO_COBALT */ + + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_MOD_GETVERSION failed: %d\n", nRetVal); + goto Exit; + } + } break; + + case ATEMSYS_IOCTL_MOD_SET_API_VERSION: + { + __u32 dwApiVersion = 0; + +#if (defined CONFIG_XENO_COBALT) + nRetVal = rtdm_safe_copy_from_user(fd, &dwApiVersion, user_arg, sizeof(__u32)); +#else + nRetVal = get_user(dwApiVersion, (__u32*)arg); +#endif + + /* activate supported features */ + if (EC_ATEMSYSVERSION(1,4,15) <= dwApiVersion) + { + pDevDesc->bSupport64BitDma = true; + } + + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_MOD_SETVERSION failed: %d\n", nRetVal); + goto Exit; + } + } break; +#if ((defined CONFIG_SMP) && (LINUX_VERSION_CODE > KERNEL_VERSION(5,14,0))) + case ATEMSYS_IOCTL_INT_SET_CPU_AFFINITY: + { + nRetVal = SetIntCpuAffinityIoctl(pDevDesc, arg, _IOC_SIZE(cmd)); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_INT_SET_CPU_AFFINITY failed: %d\n", nRetVal); + goto Exit; + } + } break; +#endif + +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) + case ATEMSYS_IOCTL_IOMEM_CMD: + { + nRetVal = IoMemCmd((void*)arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_IOMEM_CMD failed: 0x%x\n", nRetVal); + goto Exit; + } + } break; + + +#ifdef CONFIG_TI_K3_UDMA + case ATEMSYS_IOCTL_CPSWG_CMD: + { + nRetVal = CpswgCmd((__u32*)arg, NULL); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_CPSWG_CMD failed: 0x%x\n", nRetVal); + goto Exit; + } + } break; +#endif /*#ifdef CONFIG_TI_K3_UDMA*/ + + case ATEMSYS_IOCTL_GET_MAC_INFO: + { + nRetVal = GetMacInfoIoctl(pDevDesc, arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_GET_MAC_INFO failed: 0x%x\n", nRetVal); + goto Exit; + } + } break; + case ATEMSYS_IOCTL_PHY_START_STOP: + { + nRetVal = PhyStartStopIoctl(arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_PHY_START_STOP failed: %d\n", nRetVal); + goto Exit; + } + } break; + case ATEMSYS_IOCTL_GET_MDIO_ORDER: + { + nRetVal = GetMdioOrderIoctl(arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_GET_MDIO_ORDER failed: %d\n", nRetVal); + goto Exit; + } + } break; + case ATEMSYS_IOCTL_RETURN_MDIO_ORDER: + { + nRetVal = ReturnMdioOrderIoctl(arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_RETURN_MDIO_ORDER failed: %d\n", nRetVal); + goto Exit; + } + } break; + case ATEMSYS_IOCTL_GET_PHY_INFO: + { + nRetVal = GetPhyInfoIoctl(arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_GET_PHY_INFO failed: %d\n", nRetVal); + goto Exit; + } + } break; + case ATEMSYS_IOCTL_PHY_RESET: + { + nRetVal = PhyResetIoctl(arg); + if (0 != nRetVal) + { + ERR("ioctl ATEMSYS_IOCTL_PHY_RESET failed: %d\n", nRetVal); + goto Exit; + } + } break; +#endif /* INCLUDE_ATEMSYS_DT_DRIVER */ + + default: + { + nRetVal = -EOPNOTSUPP; + goto Exit; + } /* no break */ + } + + nRetVal = DRIVER_SUCCESS; + +Exit: + return nRetVal; +} + +#if (defined CONFIG_COMPAT) && !(defined CONFIG_XENO_COBALT) +/* + * ioctl processing for 32 bit process on 64 bit system + */ +static long atemsys_compat_ioctl( + struct file* file, + unsigned int cmd, + unsigned long arg) +{ + return atemsys_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif /* CONFIG_COMPAT && !CONFIG_XENO_COBALT */ + +/* Module Declarations */ + +/* + * This structure will hold the functions to be called + * when a process does something to the device we + * created. Since a pointer to this structure is kept in + * the devices table, it can't be local to + * module_init. NULL is for unimplemented functions. + */ + +#if (defined CONFIG_XENO_COBALT) +static struct rtdm_driver driver = { + .profile_info = RTDM_PROFILE_INFO(atemsys, RTDM_CLASS_EXPERIMENTAL, MAJOR_NUM, 1), + .device_flags = RTDM_NAMED_DEVICE, + .device_count = 1, + .context_size = sizeof(ATEMSYS_T_DEVICE_DESC), + + .ops = { + .open = device_open, + .close = device_release, + .read_rt = device_read, + .ioctl_rt = atemsys_ioctl, + .ioctl_nrt = atemsys_ioctl, + .mmap = device_mmap, + }, +}; + +static struct rtdm_device device = { + .driver = &driver, + .label = ATEMSYS_DEVICE_NAME, +}; +#else /* !CONFIG_XENO_COBALT */ +struct file_operations Fops = { + .read = device_read, + .unlocked_ioctl = atemsys_ioctl, +#if (defined CONFIG_COMPAT) + .compat_ioctl = atemsys_compat_ioctl, /* ioctl processing for 32 bit process on 64 bit system */ +#endif + .open = device_open, + .mmap = device_mmap, + .release = device_release, /* a.k.a. close */ +}; +#endif /* !CONFIG_XENO_COBALT */ + + +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) +static int GetMacInfoIoctl(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam) +{ + ATEMSYS_T_MAC_INFO oInfo; + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + unsigned int i = 0; + + memset(&oInfo, 0, sizeof(ATEMSYS_T_MAC_INFO)); + nRetVal = copy_from_user(&oInfo, (ATEMSYS_T_MAC_INFO *)ioctlParam, sizeof(ATEMSYS_T_MAC_INFO)); + if (0 != nRetVal) + { + ERR("GetMacInfoIoctl failed: %d\n", nRetVal); + goto Exit; + } + + for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++) + { + if (NULL == S_apDrvDescPrivate[i]) + { + continue; + } + if ((0 == strcmp(S_apDrvDescPrivate[i]->MacInfo.szIdent, oInfo.szIdent)) && + (S_apDrvDescPrivate[i]->MacInfo.dwInstance == oInfo.dwInstance)) + { + pDrvDescPrivate = S_apDrvDescPrivate[i]; + break; + } + } + + if (NULL != pDrvDescPrivate) + { + if (pDrvDescPrivate->pDevDesc != NULL) + { + ERR("GetMacInfoIoctl: device \"%s\" in use by another instance?\n", pDrvDescPrivate->pPDev->name); + nRetVal = -EBUSY; + goto Exit; + } + + oInfo.qwRegAddr = pDrvDescPrivate->MacInfo.qwRegAddr; + oInfo.dwRegSize = pDrvDescPrivate->MacInfo.dwRegSize; + oInfo.dwStatus = pDrvDescPrivate->MacInfo.dwStatus; + oInfo.ePhyMode = pDrvDescPrivate->MacInfo.ePhyMode; + oInfo.dwIndex = pDrvDescPrivate->MacInfo.dwIndex; + oInfo.bNoMdioBus = pDrvDescPrivate->MacInfo.bNoMdioBus; + oInfo.dwPhyAddr = pDrvDescPrivate->MacInfo.dwPhyAddr; + oInfo.bPhyResetSupported = pDrvDescPrivate->MacInfo.bPhyResetSupported; + + /* save descriptor of callee for cleanup on device_release */ + pDrvDescPrivate->pDevDesc = pDevDesc; + + /* add driver's platfrom device to device descriptor of callee for memory mapping and allocation */ + pDevDesc->pPlatformDev = pDrvDescPrivate->pPDev; + pDevDesc->pDrvDesc = pDrvDescPrivate; + dwRetVal = 0; /* EC_E_NOERROR */ + } + else + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND */ + } + + nRetVal = 0; +Exit: + oInfo.dwErrorCode = dwRetVal; + nRetVal = copy_to_user((ATEMSYS_T_MAC_INFO *)ioctlParam, &oInfo, sizeof(ATEMSYS_T_MAC_INFO)); + if (0 != nRetVal) + { + ERR("GetMacInfoIoctl failed: %d\n", nRetVal); + } + return nRetVal; +} + +static int PhyStartStopIoctl( unsigned long ioctlParam) +{ + ATEMSYS_T_PHY_START_STOP_INFO oPhyStartStopInfo; + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + memset(&oPhyStartStopInfo, 0, sizeof(ATEMSYS_T_PHY_START_STOP_INFO)); + nRetVal = copy_from_user(&oPhyStartStopInfo, (ATEMSYS_T_PHY_START_STOP_INFO *)ioctlParam, sizeof(ATEMSYS_T_PHY_START_STOP_INFO)); + if (0 != nRetVal) + { + ERR("PhyStartStopIoctl failed: %d\n", nRetVal); + goto Exit; + } + if (oPhyStartStopInfo.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[oPhyStartStopInfo.dwIndex]; + if (NULL == pDrvDescPrivate) + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/ + nRetVal = 0; + goto Exit; + } + if (oPhyStartStopInfo.bStart) + { +#if (defined CONFIG_XENO_COBALT) + mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + if (NULL == S_oAtemsysWorkerThreadDesc.pfNextTask) + { + S_oAtemsysWorkerThreadDesc.pfNextTask = StartPhyThread; + S_oAtemsysWorkerThreadDesc.pNextTaskData = (void*)pDrvDescPrivate->pPDev; + } + else + { + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + ERR("PhyStartStopIoctl: StartPhy failed! WorkerThread is busy!\n"); + nRetVal = -EAGAIN; + goto Exit; + } + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); +#else + pDrvDescPrivate->etx_thread_StartPhy = kthread_create(StartPhyThread,(void*)pDrvDescPrivate->pPDev,"StartPhyThread"); + if(NULL == pDrvDescPrivate->etx_thread_StartPhy) + { + ERR("PhyStartStopIoctl: Cannot create kthread for StartPhyThread\n"); + nRetVal = -EAGAIN; + goto Exit; + } + wake_up_process(pDrvDescPrivate->etx_thread_StartPhy); +#endif /*#if (defined CONFIG_XENO_COBALT)*/ + } + else + { +#if (defined CONFIG_XENO_COBALT) + mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + if (NULL == S_oAtemsysWorkerThreadDesc.pfNextTask) + { + S_oAtemsysWorkerThreadDesc.pfNextTask = StopPhyThread; + S_oAtemsysWorkerThreadDesc.pNextTaskData = (void*)pDrvDescPrivate->pPDev; + } + else + { + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + ERR("PhyStartStopIoctl: StopPhy failed! WorkerThread is busy!\n"); + nRetVal = -EAGAIN; + goto Exit; + } + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); +#else + pDrvDescPrivate->etx_thread_StopPhy = kthread_create(StopPhyThread,(void*)pDrvDescPrivate->pPDev,"StopPhyThread"); + if(NULL == pDrvDescPrivate->etx_thread_StopPhy) + { + ERR("PhyStartStopIoctl: Cannot create kthread for StopPhyThread\n"); + nRetVal = -EAGAIN; + goto Exit; + } + wake_up_process(pDrvDescPrivate->etx_thread_StopPhy); +#endif /* #if (defined CONFIG_XENO_COBALT) */ + } + nRetVal = 0; + dwRetVal = 0; /* EC_E_NOERROR */ +Exit: + oPhyStartStopInfo.dwErrorCode = dwRetVal; + + nRetVal = copy_to_user((ATEMSYS_T_PHY_START_STOP_INFO *)ioctlParam, &oPhyStartStopInfo, sizeof(ATEMSYS_T_PHY_START_STOP_INFO)); + if (0 != nRetVal) + { + ERR("PhyStartStopIoctl failed: %d\n", nRetVal); + } + return nRetVal; +} + + +static int GetMdioOrderIoctl( unsigned long ioctlParam) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + ATEMSYS_T_MDIO_ORDER oOrder; + bool bLocked = false; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + memset(&oOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER)); + nRetVal = copy_from_user(&oOrder, (ATEMSYS_T_MDIO_ORDER *)ioctlParam, sizeof(ATEMSYS_T_MDIO_ORDER)); + if (0 != nRetVal) + { + ERR("GetMdioOrderIoctl failed: %d\n", nRetVal); + goto Exit; + } + if (oOrder.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[oOrder.dwIndex]; + if (NULL == pDrvDescPrivate) + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/ + nRetVal = 0; + goto Exit; + } + + if (mutex_trylock(&pDrvDescPrivate->mdio_order_mutex)) + { + bLocked = true; + if ((pDrvDescPrivate->MdioOrder.bInUse) && (pDrvDescPrivate->MdioOrder.bInUseByIoctl)) + { + oOrder.bInUse = pDrvDescPrivate->MdioOrder.bInUse; + oOrder.bInUseByIoctl = pDrvDescPrivate->MdioOrder.bInUseByIoctl; + oOrder.bWriteOrder = pDrvDescPrivate->MdioOrder.bWriteOrder; + oOrder.wMdioAddr = pDrvDescPrivate->MdioOrder.wMdioAddr; + oOrder.wReg = pDrvDescPrivate->MdioOrder.wReg; + oOrder.wValue = pDrvDescPrivate->MdioOrder.wValue; + } + } + + dwRetVal = 0; /* EC_E_NOERROR*/ + nRetVal = 0; +Exit: + if (bLocked) + { + mutex_unlock(&pDrvDescPrivate->mdio_order_mutex); + } + oOrder.dwErrorCode = dwRetVal; + nRetVal = copy_to_user((ATEMSYS_T_MDIO_ORDER *)ioctlParam, &oOrder, sizeof(ATEMSYS_T_MDIO_ORDER)); + if (0 != nRetVal) + { + ERR("GetMdioOrderIoctl failed: %d\n", nRetVal); + } + return nRetVal; +} + +static int ReturnMdioOrderIoctl( unsigned long ioctlParam) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + ATEMSYS_T_MDIO_ORDER oOrder; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + memset(&oOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER)); + nRetVal = copy_from_user(&oOrder, (ATEMSYS_T_MDIO_ORDER *)ioctlParam, sizeof(ATEMSYS_T_MDIO_ORDER)); + if (0 != nRetVal) + { + ERR("ReturnMdioOrderIoctl failed: %d\n", nRetVal); + goto Exit; + } + + if (oOrder.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[oOrder.dwIndex]; + if (NULL == pDrvDescPrivate) + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/ + nRetVal = 0; + goto Exit; + } + + pDrvDescPrivate = S_apDrvDescPrivate[oOrder.dwIndex]; + if (NULL == pDrvDescPrivate) + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/ + nRetVal = 0; + goto Exit; + } + + mutex_lock(&pDrvDescPrivate->mdio_order_mutex); + pDrvDescPrivate->MdioOrder.wValue = oOrder.wValue; + pDrvDescPrivate->MdioOrder.bInUseByIoctl = false; + mutex_unlock(&pDrvDescPrivate->mdio_order_mutex); + + /* wake MdioRead or MdioWrite */ + pDrvDescPrivate->mdio_wait_queue_cnt = 1; + wake_up_interruptible(&pDrvDescPrivate->mdio_wait_queue); + + dwRetVal = 0 /* EC_E_NOERROR*/; + nRetVal = 0; + +Exit: + oOrder.dwErrorCode = dwRetVal; + nRetVal = copy_to_user((ATEMSYS_T_MDIO_ORDER *)ioctlParam, &oOrder, sizeof(ATEMSYS_T_MDIO_ORDER)); + if (0 != nRetVal) + { + ERR("ReturnMdioOrderIoctl failed: %d\n", nRetVal); + } + return nRetVal; +} + +static int GetPhyInfoIoctl(unsigned long ioctlParam) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + ATEMSYS_T_PHY_INFO oStatus; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + memset(&oStatus, 0, sizeof(ATEMSYS_T_PHY_INFO)); + nRetVal = copy_from_user(&oStatus, (ATEMSYS_T_PHY_INFO *)ioctlParam, sizeof(ATEMSYS_T_PHY_INFO)); + if (0 != nRetVal) + { + ERR("GetPhyInfoIoctl failed: %d\n", nRetVal); + goto Exit; + } + + if (oStatus.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[oStatus.dwIndex]; + if (NULL == pDrvDescPrivate) + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/ + nRetVal = 0; + goto Exit; + } + + oStatus.dwLink = pDrvDescPrivate->PhyInfo.dwLink; + oStatus.dwDuplex = pDrvDescPrivate->PhyInfo.dwDuplex; + oStatus.dwSpeed = pDrvDescPrivate->PhyInfo.dwSpeed; + oStatus.bPhyReady = pDrvDescPrivate->PhyInfo.bPhyReady; + + dwRetVal = 0; /* EC_E_NOERROR */ + nRetVal = 0; +Exit: + oStatus.dwErrorCode = dwRetVal; + nRetVal = copy_to_user((ATEMSYS_T_PHY_INFO *)ioctlParam, &oStatus, sizeof(ATEMSYS_T_PHY_INFO)); + if (0 != nRetVal) + { + ERR("GetPhyInfoIoctl failed: %d\n", nRetVal); + } + return nRetVal; +} + +static int PhyResetIoctl(unsigned long ioctlParam) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + unsigned int* pdwIoctlData = (__u32*)ioctlParam; + unsigned int dwIndex = 0; + unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */ + int nRetVal = -1; + int nRes = -1; + + nRes = get_user(dwIndex, pdwIoctlData); + if (0 != nRes) { nRetVal = nRes; goto Exit; } + + if (dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */ + nRetVal = 0; + goto Exit; + } + pDrvDescPrivate = S_apDrvDescPrivate[dwIndex]; + if (NULL == pDrvDescPrivate) + { + dwRetVal = 0x9811000C; /* EC_E_NOTFOUND */ + nRetVal = 0; + goto Exit; + } + + if (!pDrvDescPrivate->MacInfo.bPhyResetSupported) + { + DBG("PhyResetIoctl: PhyReset not supported\n"); + dwRetVal = 0x98110001; /* EC_E_NOTSUPPORTED */ + nRetVal = 0; + goto Exit; + } + + nRes = ResetPhyViaGpio(pDrvDescPrivate); + if (0 != nRes) + { + dwRetVal = 0x98110000; /* EC_E_ERROR */ + nRetVal = 0; + goto Exit; + } + + dwRetVal = 0; /* EC_E_NOERROR */ + nRetVal = 0; +Exit: + put_user(dwRetVal, pdwIoctlData); + + return nRetVal; +} + +static void UpdatePhyInfoByLinuxPhyDriver(struct net_device* ndev) +{ + struct phy_device* phy_dev = ndev->phydev; + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(ndev); + + if (LOGLEVEL_DEBUG <= loglevel) + { + phy_print_status(phy_dev); + } + + pDrvDescPrivate->PhyInfo.dwLink = phy_dev->link; + pDrvDescPrivate->PhyInfo.dwDuplex = phy_dev->duplex; + pDrvDescPrivate->PhyInfo.dwSpeed = phy_dev->speed; + pDrvDescPrivate->PhyInfo.bPhyReady = true; +} + +static int MdioProbe(struct net_device* ndev) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(ndev); + struct phy_device* pPhyDev = NULL; + char mdio_bus_id[MII_BUS_ID_SIZE]; + char phy_name[MII_BUS_ID_SIZE + 3]; + int nPhy_id = 0; + + if (NULL != pDrvDescPrivate->pPhyNode) + { + pPhyDev = of_phy_connect(ndev, pDrvDescPrivate->pPhyNode, + &UpdatePhyInfoByLinuxPhyDriver, 0, + pDrvDescPrivate->PhyInterface); + } + else if (NULL != pDrvDescPrivate->pMdioNode) + { + struct platform_device* mdio; + mdio = of_find_device_by_node(pDrvDescPrivate->pMdioNode); + snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio->name, pDrvDescPrivate->MacInfo.dwPhyAddr); + pPhyDev = phy_connect(ndev, phy_name, &UpdatePhyInfoByLinuxPhyDriver, pDrvDescPrivate->PhyInterface); + } + else if (NULL != pDrvDescPrivate->pMdioBus) + { + int nDev_id = pDrvDescPrivate->nDev_id; + /* check for attached phy */ + for (nPhy_id = 0; (nPhy_id < PHY_MAX_ADDR); nPhy_id++) + { + if (!mdiobus_is_registered_device(pDrvDescPrivate->pMdioBus, nPhy_id)) + { + continue; + } + if (0 != nDev_id--) + { + continue; + } + strlcpy(mdio_bus_id, pDrvDescPrivate->pMdioBus->id, MII_BUS_ID_SIZE); + break; + } + + if (nPhy_id >= PHY_MAX_ADDR) + { + INF("%s: no PHY, assuming direct connection to switch\n", pDrvDescPrivate->pPDev->name); + strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + nPhy_id = 0; + } + + snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, nPhy_id); + pPhyDev = phy_connect(ndev, phy_name, &UpdatePhyInfoByLinuxPhyDriver, pDrvDescPrivate->PhyInterface); + } + + if ((NULL == pPhyDev) || IS_ERR(pPhyDev)) + { + ERR("%s: Could not attach to PHY (pPhyDev %p)\n", pDrvDescPrivate->pPDev->name, pPhyDev); + return -ENODEV; + } + + /* adjust maximal link speed */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + phy_set_max_speed(pPhyDev, 100); +#else + pPhyDev->supported &= PHY_BASIC_FEATURES; + pPhyDev->advertising = pPhyDev->supported; +#endif + if (LOGLEVEL_INFO <= loglevel) + { + phy_attached_info(pPhyDev); + } + + pDrvDescPrivate->pPhyDev = pPhyDev; + pDrvDescPrivate->PhyInfo.dwLink = 0; + pDrvDescPrivate->PhyInfo.dwDuplex = 0; + pDrvDescPrivate->PhyInfo.dwSpeed = 0; + + return 0; +} + +static int MdioRead(struct mii_bus* pBus, int mii_id, int regnum) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = pBus->priv; + int nRetVal = -1; + int nRes = -1; + + nRes = pm_runtime_get_sync(&pDrvDescPrivate->pPDev->dev); + if (0 > nRes) + { + return nRes; + } + + /* get lock for the Mdio bus only one MdioRead or MdioWrite*/ + mutex_lock(&pDrvDescPrivate->mdio_mutex); + + mutex_lock(&pDrvDescPrivate->mdio_order_mutex); + memset(&pDrvDescPrivate->MdioOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER)); + pDrvDescPrivate->MdioOrder.bInUse = true; + pDrvDescPrivate->MdioOrder.bInUseByIoctl = true; + pDrvDescPrivate->MdioOrder.bWriteOrder = false; + pDrvDescPrivate->MdioOrder.wMdioAddr = (__u16)mii_id; + pDrvDescPrivate->MdioOrder.wReg = (__u16)regnum; + mutex_unlock(&pDrvDescPrivate->mdio_order_mutex); + + /* wait for result */ + wait_event_interruptible(pDrvDescPrivate->mdio_wait_queue, pDrvDescPrivate->mdio_wait_queue_cnt != 0); + pDrvDescPrivate->mdio_wait_queue_cnt = pDrvDescPrivate->mdio_wait_queue_cnt - 1; + + nRetVal = pDrvDescPrivate->MdioOrder.wValue; + + mutex_lock(&pDrvDescPrivate->mdio_order_mutex); + pDrvDescPrivate->MdioOrder.bInUse = false; + pDrvDescPrivate->MdioOrder.bInUseByIoctl = false; + mutex_unlock(&pDrvDescPrivate->mdio_order_mutex); + + pm_runtime_mark_last_busy(&pDrvDescPrivate->pPDev->dev); + pm_runtime_put_autosuspend(&pDrvDescPrivate->pPDev->dev); + + mutex_unlock(&pDrvDescPrivate->mdio_mutex); + + return nRetVal; +} + +static int MdioWrite(struct mii_bus* pBus, int mii_id, int regnum, u16 value) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = pBus->priv; + int nRetVal; + + nRetVal = pm_runtime_get_sync(&pDrvDescPrivate->pPDev->dev); + if (0 > nRetVal) + { + return nRetVal; + } + + /* get lock for the Mdio bus only one MdioRead or MdioWrite*/ + mutex_lock(&pDrvDescPrivate->mdio_mutex); + + mutex_lock(&pDrvDescPrivate->mdio_order_mutex); + memset(&pDrvDescPrivate->MdioOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER)); + pDrvDescPrivate->MdioOrder.bInUse = true; + pDrvDescPrivate->MdioOrder.bInUseByIoctl = true; + pDrvDescPrivate->MdioOrder.bWriteOrder = true; + pDrvDescPrivate->MdioOrder.wMdioAddr = (__u16)mii_id; + pDrvDescPrivate->MdioOrder.wReg = (__u16)regnum; + pDrvDescPrivate->MdioOrder.wValue = (__u16)value; + mutex_unlock(&pDrvDescPrivate->mdio_order_mutex); + + /* wait for result */ + wait_event_interruptible(pDrvDescPrivate->mdio_wait_queue, pDrvDescPrivate->mdio_wait_queue_cnt != 0); + pDrvDescPrivate->mdio_wait_queue_cnt = pDrvDescPrivate->mdio_wait_queue_cnt - 1; + + nRetVal = 0; + + mutex_lock(&pDrvDescPrivate->mdio_order_mutex); + pDrvDescPrivate->MdioOrder.bInUse = false; + pDrvDescPrivate->MdioOrder.bInUseByIoctl = false; + mutex_unlock(&pDrvDescPrivate->mdio_order_mutex); + + pm_runtime_mark_last_busy(&pDrvDescPrivate->pPDev->dev); + pm_runtime_put_autosuspend(&pDrvDescPrivate->pPDev->dev); + + mutex_unlock(&pDrvDescPrivate->mdio_mutex); + + return nRetVal; +} + +static int MdioInit(struct platform_device* pPDev) +{ + struct net_device* pNDev = platform_get_drvdata(pPDev); + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev); + int nRes = -ENXIO; + + if (pDrvDescPrivate->MacInfo.bNoMdioBus) + { + pDrvDescPrivate->pMdioBus = NULL; + nRes = 0; + goto Exit; + } + + pDrvDescPrivate->pMdioBus = mdiobus_alloc(); + if (NULL == pDrvDescPrivate->pMdioBus) + { + nRes = -ENOMEM; + goto Exit; + } + + pDrvDescPrivate->pMdioBus->name = "atemsys_mdio_bus"; + pDrvDescPrivate->pMdioBus->read = &MdioRead; + pDrvDescPrivate->pMdioBus->write = &MdioWrite; + snprintf(pDrvDescPrivate->pMdioBus->id, MII_BUS_ID_SIZE, "%s-%x", pPDev->name, pDrvDescPrivate->nDev_id + 1); + pDrvDescPrivate->pMdioBus->priv = pDrvDescPrivate; + pDrvDescPrivate->pMdioBus->parent = &pPDev->dev; + + if (NULL != pDrvDescPrivate->pMdioDevNode) + { + nRes = of_mdiobus_register(pDrvDescPrivate->pMdioBus, pDrvDescPrivate->pMdioDevNode); + of_node_put(pDrvDescPrivate->pMdioDevNode); + } + else + { + if (NULL == pDrvDescPrivate->pPhyNode) + { + nRes = mdiobus_register(pDrvDescPrivate->pMdioBus); + } + else + { + /* no Mdio sub-node use main node */ + nRes = of_mdiobus_register(pDrvDescPrivate->pMdioBus, pDrvDescPrivate->pDevNode); + } + } + if (0 != nRes) + { + mdiobus_free(pDrvDescPrivate->pMdioBus); + } + +Exit: + return nRes; +} + + +static int StopPhy(struct platform_device* pPDev) +{ + struct net_device* pNDev = platform_get_drvdata(pPDev); + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev); + + /* phy */ + if (NULL != pDrvDescPrivate->pPhyDev) + { + phy_stop(pDrvDescPrivate->pPhyDev); + phy_disconnect(pDrvDescPrivate->pPhyDev); + pDrvDescPrivate->pPhyDev = NULL; + } + + /* mdio bus */ + if (NULL != pDrvDescPrivate->pMdioBus) + { + mdiobus_unregister(pDrvDescPrivate->pMdioBus); + mdiobus_free(pDrvDescPrivate->pMdioBus); + pDrvDescPrivate->pMdioBus = NULL; + } + + pDrvDescPrivate->PhyInfo.bPhyReady = false; + pDrvDescPrivate->mdio_wait_queue_cnt = 0; + + return 0; +} + +static int StartPhy(struct platform_device* pPDev) +{ + struct net_device* pNDev = platform_get_drvdata(pPDev); + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev); + int nRes = -1; + + if ((NULL != pDrvDescPrivate->pPhyDev) || (NULL != pDrvDescPrivate->pMdioBus)) + { + StopPhy(pPDev); + } + + /* mdio bus */ + nRes = MdioInit(pPDev); + if (0 != nRes) + { + pDrvDescPrivate->pMdioBus = NULL; + } + nRes = MdioProbe(pNDev); + if (0 != nRes) + { + return nRes; + } + /* phy */ + phy_start(pDrvDescPrivate->pPhyDev); + phy_start_aneg(pDrvDescPrivate->pPhyDev); + + return 0; +} + +static int StartPhyThread(void* data) +{ + struct platform_device* pPDev = (struct platform_device*)data; + + StartPhy(pPDev); + + return 0; +} + +static int StopPhyThread(void* data) +{ + struct platform_device* pPDev = (struct platform_device*)data; + + StopPhy(pPDev); + + return 0; +} + +static int StopPhyWithoutIoctlMdioHandling(struct platform_device* pPDev) +{ + struct net_device* pNDev = platform_get_drvdata(pPDev); + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev); + + /* start StopPhy as thread */ +#if (defined CONFIG_XENO_COBALT) + mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + if (NULL == S_oAtemsysWorkerThreadDesc.pfNextTask) + { + S_oAtemsysWorkerThreadDesc.pfNextTask = StopPhyThread; + S_oAtemsysWorkerThreadDesc.pNextTaskData = (void*)pDrvDescPrivate->pPDev; + } + else + { + ERR("StopPhyWithoutIoctlMdioHandling failed! WorkerThread is busy!\n"); + return -EAGAIN; + } + mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); +#else + pDrvDescPrivate->etx_thread_StopPhy = kthread_create(StopPhyThread,(void*)pDrvDescPrivate->pPDev,"StopPhyThread"); + if(NULL == pDrvDescPrivate->etx_thread_StopPhy) + { + ERR("Cannot create kthread for StopPhyThread\n"); + return -1; + } + wake_up_process(pDrvDescPrivate->etx_thread_StopPhy); +#endif /* #if (defined CONFIG_XENO_COBALT) */ + + /* trigger event to continue MdioRead and MdioWrite */ + /* MdioRead returns always 0 */ + pDrvDescPrivate->mdio_wait_queue_cnt = 1000; // wait will be skipped 1000 times + wake_up_interruptible(&pDrvDescPrivate->mdio_wait_queue); + + return 0; +} + +static struct device_node* findDeviceTreeNode(struct platform_device* pPDev) +{ + int nTimeout; + unsigned int dwRegAddr32; + long long unsigned int qwRegAddr64; + char aBuff[32] = {0}; + struct device_node* pDevNode; + + pDevNode = NULL; + nTimeout = 100; + while(0 < nTimeout) + { + pDevNode = of_find_node_by_name(pDevNode, "ethernet"); + if (NULL == pDevNode) + break; + + of_property_read_u32(pDevNode, "reg", &dwRegAddr32); + of_property_read_u64(pDevNode, "reg", &qwRegAddr64); + + sprintf(aBuff, "%x.ethernet", dwRegAddr32); + if (strcmp(pPDev->name, aBuff) == 0) break; + + sprintf(aBuff, "%x.ethernet", (unsigned int)qwRegAddr64); + if (strcmp(pPDev->name, aBuff) == 0) break; + + nTimeout--; + } + if (0 == nTimeout) + pDevNode = NULL; + + return pDevNode; +} + +static int ResetPhyViaGpio(ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate) +{ + int nRes = 0; + + nRes = devm_gpio_request_one(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->nPhyResetGpioPin, + pDrvDescPrivate->bPhyResetGpioActiveHigh ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + "phy-reset"); + if (nRes) + { + ERR("%s: failed to get atemsys-phy-reset-gpios: %d \n", pDrvDescPrivate->pPDev->name, nRes); + return nRes; + } + + if (pDrvDescPrivate->nPhyResetDuration > 20) + msleep(pDrvDescPrivate->nPhyResetDuration); + else + usleep_range(pDrvDescPrivate->nPhyResetDuration * 1000, pDrvDescPrivate->nPhyResetDuration * 1000 + 1000); + + gpio_set_value_cansleep(pDrvDescPrivate->nPhyResetGpioPin, !pDrvDescPrivate->bPhyResetGpioActiveHigh); + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0)) + devm_gpio_free(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->nPhyResetGpioPin); +#endif + + if (!pDrvDescPrivate->nPhyResetPostDelay) + return 0; + + if (pDrvDescPrivate->nPhyResetPostDelay > 20) + msleep(pDrvDescPrivate->nPhyResetPostDelay); + else + usleep_range(pDrvDescPrivate->nPhyResetPostDelay * 1000, pDrvDescPrivate->nPhyResetPostDelay * 1000 + 1000); + + return 0; +} + +static int EthernetDriverProbe(struct platform_device* pPDev) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + struct net_device* pNDev = NULL; + const struct of_device_id* pOf_id = NULL; + static int nDev_id = 0; + unsigned int dwIndex = 0; + int nRes = 0; + struct device_node* pDevNode = NULL; + + INF("Atemsys: Probe device: %s\n", pPDev->name); + + pDevNode = pPDev->dev.of_node; + if (NULL == pDevNode) + { + struct device_node* pDevNodeNew = NULL; + WRN("%s: Device node empty\n", pPDev->name); + + pDevNodeNew = findDeviceTreeNode(pPDev); + if (NULL == pDevNodeNew) + { + ERR("%s: Device node not found\n", pPDev->name); + return -ENODATA; + } + else + { + pDevNode = pDevNodeNew; + } + } + + /* Init network device */ + pNDev = alloc_etherdev_mqs(sizeof(ATEMSYS_T_DRV_DESC_PRIVATE), 1 , 1); /* No TX and RX queues requiered */ + if (NULL == pNDev) + { + return -ENOMEM; + } + SET_NETDEV_DEV(pNDev, &pPDev->dev); + + /* setup board info structure */ + pOf_id = of_match_device(atemsys_ids, &pPDev->dev); + if (NULL != pOf_id) + { + pPDev->id_entry = pOf_id->data; + } + + pDrvDescPrivate = netdev_priv(pNDev); + memset(pDrvDescPrivate, 0, sizeof(ATEMSYS_T_DRV_DESC_PRIVATE)); + pDrvDescPrivate->pPDev = pPDev; + pDrvDescPrivate->nDev_id = nDev_id++; + platform_set_drvdata(pPDev, pNDev); + pDrvDescPrivate->netdev = pNDev; + pDrvDescPrivate->pDevNode = pDevNode; + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pPDev->dev); + + /* enable clock */ + pDrvDescPrivate->nCountClk = of_property_count_strings(pDevNode,"clock-names"); + if (0 > pDrvDescPrivate->nCountClk) + { + pDrvDescPrivate->nCountClk = 0; + } + DBG("%s: found %d Clocks\n", pPDev->name , pDrvDescPrivate->nCountClk); + + for (dwIndex = 0; dwIndex < pDrvDescPrivate->nCountClk; dwIndex++) + { + if(!of_property_read_string_index(pDevNode, "clock-names", dwIndex, &pDrvDescPrivate->clk_ids[dwIndex])) + { + pDrvDescPrivate->clks[dwIndex] = devm_clk_get(&pPDev->dev, pDrvDescPrivate->clk_ids[dwIndex]); + if (!IS_ERR(pDrvDescPrivate->clks[dwIndex])) + { + clk_prepare_enable(pDrvDescPrivate->clks[dwIndex]); + DBG("%s: Clock %s enabled\n", pPDev->name, pDrvDescPrivate->clk_ids[dwIndex]); + } + else + { + pDrvDescPrivate->clks[dwIndex] = NULL; + } + } + } + + /* enable PHY regulator*/ + pDrvDescPrivate->pPhyRegulator = devm_regulator_get(&pPDev->dev, "phy"); + if (!IS_ERR(pDrvDescPrivate->pPhyRegulator)) + { + if (regulator_enable(pDrvDescPrivate->pPhyRegulator)) + { + WRN("%s: can't enable PHY regulator!\n", pPDev->name); + } + } + else + { + pDrvDescPrivate->pPhyRegulator = NULL; + } + + /* Device run-time power management */ + pm_runtime_dont_use_autosuspend(&pPDev->dev); + pm_runtime_get_noresume(&pPDev->dev); + pm_runtime_set_active(&pPDev->dev); + pm_runtime_enable(&pPDev->dev); + + /* resets */ + { + struct reset_control* pResetCtl; + const char* szTempString = NULL; + + nRes = of_property_read_string(pDevNode, "reset-names", &szTempString); + pResetCtl = devm_reset_control_get_optional(&pPDev->dev, szTempString); + if (NULL != pResetCtl) + { + nRes = reset_control_assert(pResetCtl); + reset_control_deassert(pResetCtl); + + /* Some reset controllers have only reset callback instead of + * assert + deassert callbacks pair. + */ + if (-ENOTSUPP == nRes) + { + reset_control_reset(pResetCtl); + pDrvDescPrivate->pResetCtl = pResetCtl; + } + } + } + + /* get prepare data for atemsys and print some data to kernel log */ + { + unsigned int dwTemp = 0; + const char* szTempString = NULL; + unsigned int adwTempValues[6]; + + /* get identification */ + nRes = of_property_read_string(pDevNode, "atemsys-Ident", &szTempString); + if ((0 == nRes) && (NULL != szTempString)) + { + INF("%s: atemsys-Ident: %s\n", pPDev->name, szTempString); + memcpy(pDrvDescPrivate->MacInfo.szIdent,szTempString, EC_LINKOS_IDENT_MAX_LEN); + } + else + { + INF("%s: Missing atemsys-Ident in the Device Tree\n", pPDev->name); + } + + /* get instance number */ + nRes = of_property_read_u32(pDevNode, "atemsys-Instance", &dwTemp); + if (0 == nRes) + { + INF("%s: atemsys-Instance: %d\n", pPDev->name , dwTemp); + pDrvDescPrivate->MacInfo.dwInstance = dwTemp; + } + else + { + pDrvDescPrivate->MacInfo.dwInstance = 0; + INF("%s: Missing atemsys-Instance in the Device Tree\n", pPDev->name); + } + + /* status */ + szTempString = NULL; + nRes = of_property_read_string(pDevNode, "status", &szTempString); + if ((0 == nRes) && (NULL != szTempString)) + { + DBG("%s: status: %s\n", pPDev->name , szTempString); + pDrvDescPrivate->MacInfo.dwStatus = (strcmp(szTempString, "okay")==0)? 1:0; + } + + /* interrupt-parent */ + nRes = of_property_read_u32(pDevNode, "interrupt-parent", &dwTemp); + if (0 == nRes) + { + DBG("%s: interrupt-parent: %d\n", pPDev->name , dwTemp); + } + + /* interrupts */ + nRes = of_property_read_u32_array(pDevNode, "interrupts", adwTempValues, 6); + if (0 == nRes) + { + DBG("%s: interrupts: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pPDev->name , + adwTempValues[0], adwTempValues[1], adwTempValues[2], adwTempValues[3], adwTempValues[4], adwTempValues[5]); + } + + /* reg */ +#if (defined __arm__) + nRes = of_property_read_u32_array(pDevNode, "reg", adwTempValues, 2); + if (0 == nRes) + { + DBG("%s: reg: 0x%x 0x%x\n", pPDev->name , adwTempValues[0], adwTempValues[1]); + pDrvDescPrivate->MacInfo.qwRegAddr = adwTempValues[0]; + pDrvDescPrivate->MacInfo.dwRegSize = adwTempValues[1]; + } +#endif + + /* get phy-mode */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0)) + nRes = of_get_phy_mode(pPDev->dev.of_node, &pDrvDescPrivate->PhyInterface); + if ((strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSWG") == 0) && (0==pDrvDescPrivate->PhyInterface)) + { + struct device_node* pDevNodeNew = pDevNode; + pDevNodeNew = of_get_child_by_name(pDevNodeNew, "ethernet-ports"); + pDevNodeNew = of_get_child_by_name(pDevNodeNew, "port"); + nRes = of_get_phy_mode(pDevNodeNew, &pDrvDescPrivate->PhyInterface); + } +#else + pDrvDescPrivate->PhyInterface = of_get_phy_mode(pPDev->dev.of_node); +#endif + switch (pDrvDescPrivate->PhyInterface) + { + case PHY_INTERFACE_MODE_MII: + { + INF("%s: phy-mode: MII\n", pPDev->name); + pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_MII; + } break; + case PHY_INTERFACE_MODE_RMII: + { + INF("%s: phy-mode: RMII\n", pPDev->name); + pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_RMII; + } break; + case PHY_INTERFACE_MODE_GMII: + { + INF("%s: phy-mode: GMII\n", pPDev->name); + pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_GMII; + } break; + case PHY_INTERFACE_MODE_SGMII: + { + INF("%s: phy-mode: SGMII\n", pPDev->name); + pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_SGMII; + } break; + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII: + { + INF("%s: phy-mode: RGMII\n", pPDev->name); + pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_RGMII; + } break; + default: + { + pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_RGMII; + pDrvDescPrivate->PhyInterface = PHY_INTERFACE_MODE_RGMII; + WRN("%s: Missing phy-mode in the Device Tree, using RGMII\n", pPDev->name); + } + } + + /* pinctrl-names */ + szTempString = NULL; + nRes = of_property_read_string(pDevNode, "pinctrl-names", &szTempString); + if ((0 == nRes) && (NULL != szTempString)) + { + DBG("%s: pinctrl-names: %s\n", pPDev->name , szTempString); + } + + /* PHY address*/ + pDrvDescPrivate->MacInfo.dwPhyAddr = PHY_AUTO_ADDR; + pDrvDescPrivate->pPhyNode = of_parse_phandle(pDevNode, "phy-handle", 0); + if ((strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSWG") == 0) && (NULL == pDrvDescPrivate->pPhyNode)) + { + struct device_node* pDevNodeNew = pDevNode; + pDevNodeNew = of_get_child_by_name(pDevNodeNew, "ethernet-ports"); + pDevNodeNew = of_get_child_by_name(pDevNodeNew, "port"); + pDrvDescPrivate->pPhyNode = of_parse_phandle(pDevNodeNew, "phy-handle", 0); + } + if (NULL != pDrvDescPrivate->pPhyNode) + { + nRes = of_property_read_u32(pDrvDescPrivate->pPhyNode, "reg", &dwTemp); + if (0 == nRes) + { + INF("%s: PHY mdio addr: %d\n", pPDev->name , dwTemp); + pDrvDescPrivate->MacInfo.dwPhyAddr = dwTemp; + } + } + else + { + int nLen; + const __be32* pPhyId; + pPhyId = of_get_property(pDevNode, "phy_id", &nLen); + + if (nLen == (sizeof(__be32) * 2)) + { + pDrvDescPrivate->pMdioNode = of_find_node_by_phandle(be32_to_cpup(pPhyId)); + pDrvDescPrivate->MacInfo.dwPhyAddr = be32_to_cpup(pPhyId+1); + } + else + { + INF("%s: Missing phy-handle in the Device Tree\n", pPDev->name); + } + } + + /* check if mdio node is sub-node and mac has own mdio bus */ + { + pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "mdio"); + if (NULL == pDrvDescPrivate->pMdioDevNode) + pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "mdio0"); + if (NULL == pDrvDescPrivate->pMdioDevNode) + pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "mdio1"); + if (NULL == pDrvDescPrivate->pMdioDevNode) + pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "phy"); + if (NULL == pDrvDescPrivate->pMdioDevNode) + pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "ethernet-phy"); + + if ((NULL == pDrvDescPrivate->pMdioDevNode) && (NULL != pDrvDescPrivate->pPhyNode)) + { + /* check if phy node is subnode and us first sub-node as node for mdio bus */ + struct device_node *pTempNode = of_get_parent(pDrvDescPrivate->pPhyNode); + if ((NULL != pTempNode) && (pTempNode == pDevNode)) + { + pDrvDescPrivate->pMdioDevNode = pDrvDescPrivate->pPhyNode; + } + else if ((NULL != pTempNode) && (of_get_parent(pTempNode) == pDevNode)) + { + pDrvDescPrivate->pMdioDevNode = pTempNode; + } + } + + if (NULL != pDrvDescPrivate->pMdioDevNode) + { + /* mdio bus is owned by current mac instance */ + pDrvDescPrivate->MacInfo.bNoMdioBus = false; + INF("%s: mac has mdio bus.\n", pPDev->name ); + } + else if ((NULL != pDrvDescPrivate->pPhyNode) || (NULL != pDrvDescPrivate->pMdioNode)) + { + /* mdio bus owned by another mac instance */ + pDrvDescPrivate->MacInfo.bNoMdioBus = true; + INF("%s: mac has no mdio bus, uses mdio bus of other instance.\n", pPDev->name); + } + else + { + /* legacy mode: no node for mdio bus in device tree defined */ + pDrvDescPrivate->MacInfo.bNoMdioBus = false; + INF("%s: handle mdio bus without device tree node.\n", pPDev->name ); + } + } + + /* PHY reset data */ + nRes = of_property_read_u32(pDevNode, "atemsys-phy-reset-duration", &pDrvDescPrivate->nPhyResetDuration); + if (nRes) pDrvDescPrivate->nPhyResetDuration = 0; + pDrvDescPrivate->nPhyResetGpioPin = of_get_named_gpio(pDevNode, "atemsys-phy-reset-gpios", 0); + nRes = of_property_read_u32(pDevNode, "atemsys-phy-reset-post-delay", &pDrvDescPrivate->nPhyResetPostDelay); + if (nRes) pDrvDescPrivate->nPhyResetPostDelay = 0; + pDrvDescPrivate->bPhyResetGpioActiveHigh = of_property_read_bool(pDevNode, "atemsys-phy-reset-active-high"); + + if ((0 != pDrvDescPrivate->nPhyResetDuration) && (pDrvDescPrivate->nPhyResetGpioPin != -EPROBE_DEFER) + && gpio_is_valid(pDrvDescPrivate->nPhyResetGpioPin)) + { + pDrvDescPrivate->MacInfo.bPhyResetSupported = true; + DBG("%s: PhyReset ready: GpioPin: %d; Duration %d, bActiveHigh %d, post delay %d\n", pPDev->name, + pDrvDescPrivate->nPhyResetGpioPin, pDrvDescPrivate->nPhyResetDuration, + pDrvDescPrivate->bPhyResetGpioActiveHigh, pDrvDescPrivate->nPhyResetPostDelay); + } + } + + /* insert device to array */ + for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++) + { + if (NULL == S_apDrvDescPrivate[dwIndex]) + { + S_apDrvDescPrivate[dwIndex] = pDrvDescPrivate; + pDrvDescPrivate->MacInfo.dwIndex = dwIndex; + break; + } + } + if (dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES) + { + ERR("%s: Maximum number of instances exceeded!\n", pPDev->name); + return EthernetDriverRemove(pPDev); + } + + /* start drivers of sub-nodes */ + if (strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSW") == 0 + || strcmp(pDrvDescPrivate->MacInfo.szIdent, "ICSS") == 0) + { + of_platform_populate(pDevNode, NULL, NULL, &pPDev->dev); + DBG("%s: start drivers of sub-nodes.\n", pPDev->name ); + } + if (strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSWG") == 0) + { + /* in subnode "ethernet-ports" start driver for "port@2" */ + struct device_node* pDevNodeNew = pDevNode; + pDevNodeNew = of_get_child_by_name(pDevNodeNew, "ethernet-ports"); + of_platform_populate(pDevNodeNew, NULL, NULL, &pPDev->dev); + DBG("%s: start drivers of sub-nodes.\n", pPDev->name ); + } + + /* prepare mutex for mdio */ + mutex_init(&pDrvDescPrivate->mdio_mutex); + mutex_init(&pDrvDescPrivate->mdio_order_mutex); + init_waitqueue_head(&pDrvDescPrivate->mdio_wait_queue); + pDrvDescPrivate->mdio_wait_queue_cnt = 0; + + return 0; +} + + +static int EthernetDriverRemove(struct platform_device* pPDev) +{ + struct net_device* pNDev = platform_get_drvdata(pPDev); + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev); + unsigned int i = 0; + + if ((NULL != pDrvDescPrivate->pPhyDev) || (NULL != pDrvDescPrivate->pMdioBus)) + { + ERR("%s: EthernetDriverRemove: PHY driver is still active!\n", pPDev->name); + } + + if (NULL != pDrvDescPrivate->pPhyRegulator) + { + regulator_disable(pDrvDescPrivate->pPhyRegulator); + } + + /* Decrement refcount */ + of_node_put(pDrvDescPrivate->pPhyNode); + + pm_runtime_put(&pPDev->dev); + pm_runtime_disable(&pPDev->dev); + + /* resets */ + if (NULL != pDrvDescPrivate->pResetCtl) + { + reset_control_assert(pDrvDescPrivate->pResetCtl); + } + for (i = 0; i < ATEMSYS_MAX_NUMBER_OF_CLOCKS; i++) + { + if (NULL != pDrvDescPrivate->clk_ids[i]) + { + clk_disable_unprepare(pDrvDescPrivate->clks[i]); + DBG("%s: Clock %s unprepared\n", pPDev->name, pDrvDescPrivate->clk_ids[i]); + } + } + mutex_destroy(&pDrvDescPrivate->mdio_mutex); + mutex_destroy(&pDrvDescPrivate->mdio_order_mutex); + + pinctrl_pm_select_sleep_state(&pPDev->dev); + + free_netdev(pNDev); + + INF("%s: atemsys driver removed: %s Instance %d\n", pPDev->name, pDrvDescPrivate->MacInfo.szIdent, pDrvDescPrivate->MacInfo.dwInstance); + + S_apDrvDescPrivate[pDrvDescPrivate->MacInfo.dwIndex] = NULL; + + if (NULL != pDrvDescPrivate->pDevDesc) + { + pDrvDescPrivate->pDevDesc->pPlatformDev = NULL; + pDrvDescPrivate->pDevDesc->pDrvDesc = NULL; + pDrvDescPrivate->pDevDesc = NULL; + } + + return 0; +} + +static int CleanUpEthernetDriverOnRelease(ATEMSYS_T_DEVICE_DESC* pDevDesc) +{ + ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL; + int nRes = -1; + unsigned int i = 0; + + if (pDevDesc == NULL) + { + return 0; + } + + for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++) + { + + pDrvDescPrivate = S_apDrvDescPrivate[i]; + if (NULL == pDrvDescPrivate) + { + continue; + } + + if (pDrvDescPrivate->pDevDesc == pDevDesc) + { + INF("%s: Cleanup: pDevDesc = 0x%px\n", pDrvDescPrivate->pPDev->name, pDevDesc); + + /* ensure mdio bus and PHY are down */ + if ((NULL != pDrvDescPrivate->pPhyDev) || (NULL != pDrvDescPrivate->pMdioBus)) + { + int timeout = 0; + for (timeout = 50; timeout-- < 0; msleep(100)) + { + nRes = StopPhyWithoutIoctlMdioHandling(pDrvDescPrivate->pPDev); + if (-EAGAIN != nRes) + break; + } + } + /* clean descriptor */ + pDrvDescPrivate->pDevDesc = NULL; + pDevDesc->pPlatformDev = NULL; + pDevDesc->pDrvDesc = NULL; + } + } + + return 0; +} + +static struct platform_device_id mac_devtype[] = { + { + .name = ATEMSYS_DT_DRIVER_NAME, + .driver_data = 0, + }, { + /* sentinel */ + } +}; + + +MODULE_DEVICE_TABLE(platform, mac_devtype); + +static struct platform_driver mac_driver = { + .driver = { + .name = ATEMSYS_DT_DRIVER_NAME, + .of_match_table = atemsys_ids, + }, + .id_table = mac_devtype, + .probe = EthernetDriverProbe, + .remove = EthernetDriverRemove, +}; +#endif /* INCLUDE_ATEMSYS_DT_DRIVER */ + + +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) +#define ATEMSYS_PCI_DRIVER_NAME "atemsys_pci" +#define PCI_VENDOR_ID_BECKHOFF 0x15EC + +static void PciDriverRemove(struct pci_dev* pPciDev) +{ + ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pPciDrvDescPrivate = (ATEMSYS_T_PCI_DRV_DESC_PRIVATE*)pci_get_drvdata(pPciDev); + + if (NULL != pPciDrvDescPrivate) + { + /* remove references to the device */ + if (NULL != pPciDrvDescPrivate->pDevDesc) + { + pPciDrvDescPrivate->pDevDesc->pPcidev = NULL; + pPciDrvDescPrivate->pDevDesc->pPciDrvDesc = NULL; + pPciDrvDescPrivate->pDevDesc = NULL; + } + S_apPciDrvDescPrivate[pPciDrvDescPrivate->dwIndex] = NULL; + + kfree(pPciDrvDescPrivate); + } + + /* disable device */ + pci_disable_msi(pPciDev); + pci_release_regions(pPciDev); + pci_disable_pcie_error_reporting(pPciDev); + pci_disable_device(pPciDev); + + INF("%s: %s: disconnected\n", pci_name(pPciDev), ATEMSYS_PCI_DRIVER_NAME); +} + +static int PciDriverProbe(struct pci_dev* pPciDev, const struct pci_device_id* id) +{ + ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pPciDrvDescPrivate = NULL; + int nRes = -ENODEV; + int dwIndex = 0; + + /* check if is wanted pci device */ + if ((strcmp(AllowedPciDevices, "PCI_ANY_ID") != 0) && (strstr(AllowedPciDevices, pci_name(pPciDev)) == NULL)) + { + /* don't attach driver */ + DBG("%s: PciDriverProbe: restricted by user parameters!\n", pci_name(pPciDev)); + + return -ENODEV; /* error code doesn't create error message */ + } + + /* setup pci device */ + nRes = pci_enable_device_mem(pPciDev); + if (nRes) + { + ERR("%s: PciDriverProbe: pci_enable_device_mem failed!\n", pci_name(pPciDev)); + goto Exit; + } + + nRes = DefaultPciSettings(pPciDev); + if (nRes) + { + ERR("%s: PciDriverProbe: DefaultPciSettings failed\n", pci_name(pPciDev)); + goto Exit; + } + pci_save_state(pPciDev); + pci_enable_pcie_error_reporting(pPciDev); + nRes = pci_request_regions(pPciDev, ATEMSYS_DEVICE_NAME); + if (nRes < 0) + { + ERR("%s: PciDriverProbe: device in use by another driver?\n", pci_name(pPciDev)); + nRes = -EBUSY; + goto Exit; + } + + /* create private desc */ + pPciDrvDescPrivate = (ATEMSYS_T_PCI_DRV_DESC_PRIVATE*)kzalloc(sizeof(ATEMSYS_T_PCI_DRV_DESC_PRIVATE), GFP_KERNEL); + if (NULL == pPciDrvDescPrivate) + { + nRes = -ENOMEM; + goto Exit; + } + pPciDrvDescPrivate->pPciDev = pPciDev; + + /* get Pci Info */ + pPciDrvDescPrivate->wVendorId = pPciDev->vendor; + pPciDrvDescPrivate->wDevice = pPciDev->device; + pPciDrvDescPrivate->wRevision = pPciDev->revision; + pPciDrvDescPrivate->wSubsystem_vendor = pPciDev->subsystem_vendor; + pPciDrvDescPrivate->wSubsystem_device = pPciDev->subsystem_device; + pPciDrvDescPrivate->nPciBus = pPciDev->bus->number; + pPciDrvDescPrivate->nPciDomain = pci_domain_nr(pPciDev->bus); + pPciDrvDescPrivate->nPciDev = PCI_SLOT(pPciDev->devfn); + pPciDrvDescPrivate->nPciFun = PCI_FUNC(pPciDev->devfn); + + INF("%s: %s: connected vendor:0x%04x device:0x%04x rev:0x%02x - sub_vendor:0x%04x sub_device:0x%04x\n", pci_name(pPciDev), ATEMSYS_PCI_DRIVER_NAME, + pPciDev->vendor, pPciDev->device, pPciDev->revision, + pPciDev->subsystem_vendor, pPciDev->subsystem_device); + + /* find the memory BAR */ + { + unsigned long ioBase = 0; + unsigned int dwIOLen = 0; + int i = 0; + int nBar = 0; + + for (i = 0; i < ATEMSYS_PCI_MAXBAR ; i++) + { + if (pci_resource_flags(pPciDev, i) & IORESOURCE_MEM) + { + /* IO area address */ + ioBase = pci_resource_start(pPciDev, i); + pPciDrvDescPrivate->aBars[nBar].qwIOMem = ioBase; + + /* IO area length */ + dwIOLen = pci_resource_len(pPciDev, i); + pPciDrvDescPrivate->aBars[nBar].dwIOLen = dwIOLen; + + nBar++; + } + } + + if (nBar == 0) + { + WRN("%s: PciDriverProbe: No memory BAR found\n", pci_name(pPciDev)); + } + + pPciDrvDescPrivate->nBarCnt = nBar; + } + + /* insert device to array */ + for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++) + { + if (NULL == S_apPciDrvDescPrivate[dwIndex]) + { + S_apPciDrvDescPrivate[dwIndex] = pPciDrvDescPrivate; + pPciDrvDescPrivate->dwIndex = dwIndex; + break; + } + } + if (ATEMSYS_MAX_NUMBER_DRV_INSTANCES <= dwIndex) + { + ERR("%s: PciDriverProbe: insert device to array failed\n", pci_name(pPciDev)); + nRes = -EBUSY; + goto Exit; + } + + pci_set_drvdata(pPciDev, pPciDrvDescPrivate); + + nRes = 0; /* OK */ +Exit: + if (nRes != 0 /* OK */) + { + if (NULL != pPciDrvDescPrivate) + { + kfree(pPciDrvDescPrivate); + } + } + return nRes; +} + +typedef struct _ATEMSYS_PCI_INFO { +} ATEMSYS_PCI_INFO; + +static const struct _ATEMSYS_PCI_INFO oAtemsysPciInfo = { +}; + + +static const struct pci_device_id pci_devtype[] = { + { + /* all devices of class PCI_CLASS_NETWORK_ETHERNET */ + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00), + .driver_data = (kernel_ulong_t)&oAtemsysPciInfo + }, + { + /* all devices with BECKHOFF vendor id */ + .vendor = PCI_VENDOR_ID_BECKHOFF, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&oAtemsysPciInfo + }, + {} +}; + +MODULE_DEVICE_TABLE(pci, pci_devtype); +static struct pci_driver oPciDriver = { + .name = ATEMSYS_PCI_DRIVER_NAME, + .id_table = pci_devtype, + .probe = PciDriverProbe, + .remove = PciDriverRemove, +}; + +#endif /* (defined INCLUDE_ATEMSYS_PCI_DRIVER) */ + + +/* + * Initialize the module - Register the character device + */ +int init_module(void) +{ +#if (defined CONFIG_XENO_COBALT) + + int major = rtdm_dev_register(&device); + if (major < 0) + { + INF("Failed to register %s (err: %d)\n", device.label, major); + return major; + } +#else + + /* Register the character device */ + int major = register_chrdev(MAJOR_NUM, ATEMSYS_DEVICE_NAME, &Fops); + if (major < 0) + { + INF("Failed to register %s (err: %d)\n", + ATEMSYS_DEVICE_NAME, major); + return major; + } +#endif /* CONFIG_XENO_COBALT */ + + /* Register Pci and Platform Driver */ +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) + memset(S_apDrvDescPrivate ,0, ATEMSYS_MAX_NUMBER_DRV_INSTANCES * sizeof(ATEMSYS_T_DRV_DESC_PRIVATE*)); + platform_driver_register(&mac_driver); +#if (defined CONFIG_XENO_COBALT) + memset(&S_oAtemsysWorkerThreadDesc, 0, sizeof(ATEMSYS_T_WORKER_THREAD_DESC)); + mutex_init(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); + S_oAtemsysWorkerThreadDesc.etx_thread = kthread_create(AtemsysWorkerThread,(void*)&S_oAtemsysWorkerThreadDesc,"Atemsys_WorkerThread"); + if(NULL == S_oAtemsysWorkerThreadDesc.etx_thread) + { + ERR("Cannot create kthread for AtemsysWorkerThread\n"); + } + wake_up_process(S_oAtemsysWorkerThreadDesc.etx_thread); +#endif /*#if (defined CONFIG_XENO_COBALT)*/ +#endif + +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) + memset(S_apPciDrvDescPrivate ,0, ATEMSYS_MAX_NUMBER_DRV_INSTANCES * sizeof(ATEMSYS_T_PCI_DRV_DESC_PRIVATE*)); + + if (0 == strcmp(AllowedPciDevices, "")) + { + DBG("Atemsys PCI driver not registered\n"); + } + else + { + if (0 != pci_register_driver(&oPciDriver)) + { + INF("Register Atemsys PCI driver failed!\n"); + } + } +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,4,0)) + S_pDevClass = class_create(ATEMSYS_DEVICE_NAME); +#else + S_pDevClass = class_create(THIS_MODULE, ATEMSYS_DEVICE_NAME); +#endif + if (IS_ERR(S_pDevClass)) + { + INF("class_create failed\n"); + return -1; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) + S_pDev = class_device_create(S_pDevClass, NULL, MKDEV(MAJOR_NUM, 0), NULL, ATEMSYS_DEVICE_NAME); +#else + S_pDev = device_create(S_pDevClass, NULL, MKDEV(MAJOR_NUM, 0), NULL, ATEMSYS_DEVICE_NAME); +#endif + +#if (defined __arm__) || (defined __aarch64__) + { + int nRetVal = 0; + S_pPlatformDev = platform_device_alloc("atemsys_PDev", MKDEV(MAJOR_NUM, 0)); + S_pPlatformDev->dev.parent = S_pDev; + + nRetVal = platform_device_add(S_pPlatformDev); + if (nRetVal != 0) { + ERR("platform_device_add failed. return=%d\n", nRetVal); + } + + #if (defined __arm__) || (defined CONFIG_ZONE_DMA32) + S_pPlatformDev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + if (!S_pPlatformDev->dev.dma_mask) + { + S_pPlatformDev->dev.dma_mask = &S_pPlatformDev->dev.coherent_dma_mask; + } + #endif + } +#else + S_pPlatformDev = NULL; +#endif + + if (IS_ERR(S_pDev)) + { + INF("device_create failed\n"); + return -1; + } + + S_pDev->coherent_dma_mask = DMA_BIT_MASK(32); + if (!S_pDev->dma_mask) + { + S_pDev->dma_mask = &S_pDev->coherent_dma_mask; + } + +#if (defined CONFIG_OF) + OF_DMA_CONFIGURE(S_pDev,S_pDev->of_node); +#endif + + INIT_LIST_HEAD(&S_DevNode.list); + + INF("%s v%s loaded\n", ATEMSYS_DEVICE_NAME, ATEMSYS_VERSION_STR); + return 0; +} + +/* + * Cleanup - unregister the appropriate file from /proc + */ +void cleanup_module(void) +{ + INF("%s v%s unloaded\n", ATEMSYS_DEVICE_NAME, ATEMSYS_VERSION_STR); + + /* Unregister Pci and Platform Driver */ +#if (defined INCLUDE_ATEMSYS_DT_DRIVER) + platform_driver_unregister(&mac_driver); +#if (defined CONFIG_XENO_COBALT) + S_oAtemsysWorkerThreadDesc.bWorkerTaskShutdown = true; + for (;;) + { + if (!S_oAtemsysWorkerThreadDesc.bWorkerTaskRunning) + { + break; + } + + msleep(100); + } + mutex_destroy(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex); +#endif /*#if (defined CONFIG_XENO_COBALT)*/ +#endif + +#if (defined INCLUDE_ATEMSYS_PCI_DRIVER) + if (0 != strcmp(AllowedPciDevices, "")) + { + pci_unregister_driver(&oPciDriver); + } +#endif + +#if (defined __arm__) || (defined __aarch64__) + if (NULL != S_pPlatformDev) + { + platform_device_del(S_pPlatformDev); + platform_device_put(S_pPlatformDev); + S_pPlatformDev = NULL; + } +#endif + +#if (defined CONFIG_OF) + device_release_driver(S_pDev); //see device_del() -> bus_remove_device() +#endif + + device_destroy(S_pDevClass, MKDEV(MAJOR_NUM, 0)); + class_destroy(S_pDevClass); + +#if (defined CONFIG_XENO_COBALT) + rtdm_dev_unregister(&device); +#else + unregister_chrdev(MAJOR_NUM, ATEMSYS_DEVICE_NAME); +#endif /* CONFIG_XENO_COBALT */ +} + diff --git a/kernel/drivers/misc/atemsys-main/atemsys.h b/kernel/drivers/misc/atemsys-main/atemsys.h new file mode 100644 index 0000000..d17c370 --- /dev/null +++ b/kernel/drivers/misc/atemsys-main/atemsys.h @@ -0,0 +1,428 @@ +/*----------------------------------------------------------------------------- + * atemsys.h + * Copyright (c) 2009 - 2020 acontis technologies GmbH, Ravensburg, Germany + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Response Paul Bussmann + * Description atemsys.ko headerfile + * Note: This header is also included by userspace! + + * Changes: + * + * V1.0.00 - Inital, PCI/PCIe only. + * V1.1.00 - PowerPC tweaks. + * Support for SoC devices (no PCI, i.e. Freescale eTSEC). + * Support for current linux kernel's (3.0). Removed deprecated code. + * V1.2.00 - 64 bit support. Compat IOCTL's for 32-Bit usermode apps. + * V1.2.01 - request_irq() sometimes failed -> Map irq to virq under powerpc. + * V1.2.02 - Support for current Linux kernel (3.8.0) + * V1.2.03 - Support for current Linux kernel (3.8.13) on armv7l (beaglebone) + * V1.2.04 - Use dma_alloc_coherent for arm, because of DMA memory corruption on + * Xilinx Zynq. + * V1.2.05 - OF Device Tree support for Xilinx Zynq (VIRQ mapping) + * V1.2.06 - Wrong major version. + * V1.2.07 - Tolerate closing, e.g. due to system()-calls. + * V1.2.08 - Add VM_DONTCOPY to prevent crash on system()-calls + * V1.2.09 - Apply second controller name change in dts (standard GEM driver for Xilinx Zynq) to avoid default driver loading. + * V1.2.10 - Removed IO address alignment to support R6040 + * V1.2.11 - Fix lockup in device_read (tLinkOsIst if NIC in interrupt mode) on dev_int_disconnect + * V1.2.12 - Fix underflow in dev_disable_irq() when more than one interrupts pending because of disable_irq_nosync usage + * V1.2.13 - Fix usage of x64 PCI physical addresses + * V1.2.14 - Changes for using with kernel beginnig from 2.6.18 + * V1.2.15 - Add udev auto-loading support via DTB + * V1.2.16 - Add interrupt mode support for Xenomai 3 (Cobalt) + * V1.3.01 - Add IOCTL_MOD_GETVERSION + * V1.3.02 - Add support for kernel >= 4.11.00 + * V1.3.03 - Fix IOCTL_MOD_GETVERSION + * V1.3.04 - Fix interrupt deadlock in Xenomai 2 + * V1.3.05 - Use correct PCI domain + * V1.3.06 - Use rtdm_printk for Cobalt, add check if dev_int_disconnect was successful + * V1.3.07 - Remove IOCTL_PCI_RELEASE_DEVICE warnings due to untracked IOCTL_PCI_CONF_DEVICE + * V1.3.08 - Add support for kernel >= 4.13.00 + * V1.3.09 - Add support for PRU ICSS in Device Tree + * V1.3.10 - Fix compilation on Ubuntu 18.04, Kernel 4.9.90, Xenomai 3.0.6 x64 Cobalt + * V1.3.11 - Add enable access to ARM cycle count register(CCNT) + * V1.3.12 - Add atemsys API version selection + * V1.3.13 - Add ARM64 support + * V1.3.14 - Fix edge type interrupt (enabled if Kernel >= 3.4.1, because exported irq_to_desc needed) + * Fix Xenomai Cobalt interrupt mode + * V1.3.15 - Fix crash while loading kernel module on ARM64 + * Add support for kernel >= 5.0.00 + * V1.3.16 - Handle API changes at kernel >= 4.18.00 + * Fix ARM DMA allocation for PCIe + * V1.4.01 - Register atemsys as Device Tree Ethernet driver "atemsys" + * and use Linux PHY and Mdio-Bus Handling + * V1.4.02 - Device Tree Ethernet driver improved robustness for unbind linux driver + * Fix for kernel >= 5.0.00 with device tree, + * Fix ARM/AARCH64 DMA configuration for PCIe and + * Fix occasional insmod Kernel Oops + * V1.4.03 - Add log level (insmod atemsys loglevel=6) analog to kernel log level + * V1.4.04 - Fix Device Tree Ethernet driver robustness + * Add Device Tree Ethernet driver support for ICSS + * V1.4.05 - Add IOMMU/Vt-D support + * V1.4.06 - Fix IOMMU/Vt-D support for ARM + * Fix Mdio-Bus timeout for kernel >= 5.0.00 + * V1.4.07 - Add support for imx8 / FslFec 64bit + * V1.4.08 - Fix Xilinx Ultrascale + * Fix cleanup of Device Tree Ethernet driver + * V1.4.09 - Add atemsys as PCI driver for Intel, Realtek and Beckhoff + * Add memory allocation and mapping on platform / PCI driver device + * Fix PHY driver for FslFec 64Bit + * V1.4.10 - Fix Device Tree Ethernet driver: Mdio/Phy sup-node, test 4.6.x kernel + * Add Device Tree Ethernet driver support for GEM + * Fix PCI driver: force DMA to 32 bit + * V1.4.11 - Fix for kernel >= 5.5.00 with device tree, + * Fix Device Tree Ethernet driver support for DW3504 + * Fix PCI driver: only for kernel >= 4.4.00 + * V1.4.12 - Fix for kernel >= 5.11.00, + * Add support for 64Bit IO Memory of PCI card + * V1.4.13 - Fix for kernel <= 3.16.00, + * Add HAVE_ACCESS_OK_TYPE define to handle non-mainstream API variance + * Connect to interrupt via binded device tree - platform device + * V1.4.14 - Fix for arm/aarch64 kernel >= 5.10.00, + * Add support for 64Bit DMA Memory + * Add support for PCI DMA address translation + * V1.4.15 - Fix API version IO Controls + * V1.4.16 - Fix Xenomai3 on arm, + * Add support for Device Tree Ethernet driver and PCI driver with Xenomai3 + * Fix PCI DMA address translation on arm + * V1.4.17 - Fix dma_set_mask_and_coherent() missing in kernels under 3.12.55 + * V1.4.18 - Remove obsolete ARM cycle count register(CCNT) + * Fix PCI driver do registration for all Ethernet network adapters + * Add modul parameter AllowedPciDevices to adjust PCI driver, AllowedPciDevices="" will turn off PCI driver, + * (insmod atemsys AllowedPciDevices="0000:01:00.0;0000:02:00.0") + * V1.4.19 - Fix Xenomai2 ARMv8 32Bit + * V1.4.20 - Fix support for CMA for kernel > 4.9.00 + * V1.4.21 - Add Device Tree Ethernet driver support for CPSW + * Add Device Tree Ethernet driver phy reset + * Fix Device Tree Ethernet on Xenomai3 + * Add HAVE_IRQ_TO_DESC define to handle non-mainstream API variance + * V1.4.22 - Fix Build Warnings + * Fix kernel config depending irq structures + * Fix kernel version 4.12 to 4.15 for handle of dma_coherent bit + * Add IOMMU support, new mapping to userspace active and tested for kernel > 5.4, + * use old mapping with ATEMSYS_LEGACY_DMA=1 define or + * activate new mapping with ATEMSYS_LEGACY_DMA=0 define for older kernel + * V1.4.23 - Fix PCI bars + * V1.4.24 - Add Device Tree Ethernet driver support for STM32mp135 + * V1.4.25 - Add IOCTL_INT_CPU_AFFINITY + * Add Device Tree Ethernet driver support for RockChip + * V1.4.26 - Fix for arm/aarch64 kernel >= 6.00.00, + * Fix version of_dma_configure + * Add ATEMSYS_IOCTL_IOMEM_CMD for Kernel mode access to protected registers + * Add ATEMSYS_IOCTL_CPSWG_CMD to configure K3_UDMA_CPSWG Channels, Flows and Rings + * V1.4.27 - Fix ATEMSYS_IOCTL_CPSWG_CMD kernel version, + * Add Device Tree Ethernet driver support for CPSWG + * V1.4.28 - Fix for PCIe compatibility with Atemsys before V1.3.5, + * - Fix for Kernel > 6.05.00 + * atemsys is shared across EC-Master V2.7+ + + *----------------------------------------------------------------------------*/ + +#ifndef ATEMSYS_H +#define ATEMSYS_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#ifndef EC_ATEMSYSVERSION +#define EC_ATEMSYSVERSION(a,b,c) (((a)<<2*8)+((b)<<1*8)+((c)<<0*8)) +#endif + +#define ATEMSYS_VERSION_STR "1.4.28" +#define ATEMSYS_VERSION_NUM 1,4,28 +#if (defined ATEMSYS_C) +#define USE_ATEMSYS_API_VERSION EC_ATEMSYSVERSION(1,4,28) +#endif + +/* support selection */ + +#if (USE_ATEMSYS_API_VERSION < EC_ATEMSYSVERSION(1,3,5)) || (!defined USE_ATEMSYS_API_VERSION) +/* till v1.3.04 */ +#define ATEMSYS_T_PCI_SELECT_DESC ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 +#define ATEMSYS_T_PCI_MEMBAR ATEMSYS_T_PCI_MEMBAR_v1_0_00 +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00 +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00 + +#elif (USE_ATEMSYS_API_VERSION < EC_ATEMSYSVERSION(1,4,12)) +/* v1.3.05 till v1.4.11 */ +#define ATEMSYS_T_PCI_SELECT_DESC ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 +#define ATEMSYS_T_PCI_MEMBAR ATEMSYS_T_PCI_MEMBAR_v1_3_05 +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_05 +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_05 + +#else /* v1.4.12 and later */ +#define ATEMSYS_T_PCI_SELECT_DESC ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 +#define ATEMSYS_T_PCI_MEMBAR ATEMSYS_T_PCI_MEMBAR_v1_4_12 +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_4_12 +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_4_12 +#endif + +#define DRIVER_SUCCESS 0 + +/* + * The major device number. We can't rely on dynamic + * registration any more, because ioctls need to know + * it. + */ +#define MAJOR_NUM 101 + +#define ATEMSYS_IOCTL_PCI_RELEASE_DEVICE _IO(MAJOR_NUM, 2) +#define ATEMSYS_IOCTL_INT_CONNECT _IOW(MAJOR_NUM, 3, __u32) +#define ATEMSYS_IOCTL_INT_DISCONNECT _IOW(MAJOR_NUM, 4, __u32) +#define ATEMSYS_IOCTL_INT_INFO _IOR(MAJOR_NUM, 5, ATEMSYS_T_INT_INFO) +#define ATEMSYS_IOCTL_MOD_GETVERSION _IOR(MAJOR_NUM, 6, __u32) +#define ATEMSYS_IOCTL_CPU_ENABLE_CYCLE_COUNT _IOW(MAJOR_NUM, 7, __u32) +#define ATEMSYS_IOCTL_GET_MAC_INFO _IOWR(MAJOR_NUM, 8, ATEMSYS_T_MAC_INFO) +#define ATEMSYS_IOCTL_PHY_START_STOP _IOWR(MAJOR_NUM, 9, ATEMSYS_T_PHY_START_STOP_INFO) +#define ATEMSYS_IOCTL_GET_MDIO_ORDER _IOWR(MAJOR_NUM, 10, ATEMSYS_T_MDIO_ORDER) +#define ATEMSYS_IOCTL_RETURN_MDIO_ORDER _IOWR(MAJOR_NUM, 11, ATEMSYS_T_MDIO_ORDER) +#define ATEMSYS_IOCTL_GET_PHY_INFO _IOWR(MAJOR_NUM, 12, ATEMSYS_T_PHY_INFO) +#define ATEMSYS_IOCTL_MOD_SET_API_VERSION _IOR(MAJOR_NUM, 13, __u32) +#define ATEMSYS_IOCTL_PHY_RESET _IOWR(MAJOR_NUM, 14, __u32) +#define ATEMSYS_IOCTL_INT_SET_CPU_AFFINITY _IOWR(MAJOR_NUM, 15, __u32) +#define ATEMSYS_IOCTL_IOMEM_CMD _IOWR(MAJOR_NUM, 16, ATEMSYS_T_IOMEM_CMD) +#define ATEMSYS_IOCTL_CPSWG_CMD _IOWR(MAJOR_NUM, 17, ATEMSYS_T_CPSWG_CMD) + +/* support legacy source code */ +#define IOCTL_PCI_FIND_DEVICE ATEMSYS_IOCTL_PCI_FIND_DEVICE +#define IOCTL_PCI_CONF_DEVICE ATEMSYS_IOCTL_PCI_CONF_DEVICE +#define IOCTL_PCI_RELEASE_DEVICE ATEMSYS_IOCTL_PCI_RELEASE_DEVICE +#define IOCTL_INT_CONNECT ATEMSYS_IOCTL_INT_CONNECT +#define IOCTL_INT_DISCONNECT ATEMSYS_IOCTL_INT_DISCONNECT +#define IOCTL_INT_INFO ATEMSYS_IOCTL_INT_INFO +#define IOCTL_MOD_GETVERSION ATEMSYS_IOCTL_MOD_GETVERSION +#define IOCTL_CPU_ENABLE_CYCLE_COUNT ATEMSYS_IOCTL_CPU_ENABLE_CYCLE_COUNT +#define IOCTL_PCI_FIND_DEVICE_v1_3_04 ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_04 +#define IOCTL_PCI_CONF_DEVICE_v1_3_04 ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_04 +#define USE_PCI_INT ATEMSYS_USE_PCI_INT +#define INT_INFO ATEMSYS_T_INT_INFO +#define PCI_SELECT_DESC ATEMSYS_T_PCI_SELECT_DESC + + +/* + * The name of the device driver + */ +#define ATEMSYS_DEVICE_NAME "atemsys" + +/* CONFIG_XENO_COBALT/CONFIG_XENO_MERCURY defined in xeno_config.h (may not be available when building atemsys.ko) */ +#if (!defined CONFIG_XENO_COBALT) && (!defined CONFIG_XENO_MERCURY) && (defined CONFIG_XENO_VERSION_MAJOR) && (CONFIG_XENO_VERSION_MAJOR >= 3) +#define CONFIG_XENO_COBALT +#endif + +/* + * The name of the device file + */ +#ifdef CONFIG_XENO_COBALT +#define ATEMSYS_FILE_NAME "/dev/rtdm/" ATEMSYS_DEVICE_NAME +#else +#define ATEMSYS_FILE_NAME "/dev/" ATEMSYS_DEVICE_NAME +#endif /* CONFIG_XENO_COBALT */ + +#define ATEMSYS_PCI_MAXBAR (6) +#define ATEMSYS_USE_PCI_INT (0xFFFFFFFF) /* Query the selected PCI device for the assigned IRQ number */ + +typedef struct +{ + __u32 dwInterrupt; +} __attribute__((packed)) ATEMSYS_T_INT_INFO; + + +/* v1_4_12 */ + +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_4_12 _IOWR(MAJOR_NUM, 0, ATEMSYS_T_PCI_SELECT_DESC_v1_4_12) +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_4_12 _IOWR(MAJOR_NUM, 1, ATEMSYS_T_PCI_SELECT_DESC_v1_4_12) + +typedef struct +{ + __u64 qwIOMem; /* [out] IO Memory of PCI card (physical address) */ + __u32 dwIOLen; /* [out] Length of the IO Memory area*/ +} __attribute__((packed)) ATEMSYS_T_PCI_MEMBAR_v1_4_12; + +typedef struct +{ + __s32 nVendID; /* [in] vendor ID */ + __s32 nDevID; /* [in] device ID */ + __s32 nInstance; /* [in] instance to look for (0 is the first instance) */ + __s32 nPciBus; /* [in/out] bus */ + __s32 nPciDev; /* [in/out] device */ + __s32 nPciFun; /* [in/out] function */ + __s32 nBarCnt; /* [out] Number of entries in aBar */ + __u32 dwIrq; /* [out] IRQ or USE_PCI_INT */ + ATEMSYS_T_PCI_MEMBAR_v1_4_12 aBar[ATEMSYS_PCI_MAXBAR]; /* [out] IO memory */ + __s32 nPciDomain; /* [in/out] domain */ +} __attribute__((packed)) ATEMSYS_T_PCI_SELECT_DESC_v1_4_12; + + +/* v1_3_05 */ + +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_05 _IOWR(MAJOR_NUM, 0, ATEMSYS_T_PCI_SELECT_DESC_v1_3_05) +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_05 _IOWR(MAJOR_NUM, 1, ATEMSYS_T_PCI_SELECT_DESC_v1_3_05) + +typedef struct +{ + __u32 dwIOMem; /* [out] IO Memory of PCI card (physical address) */ + __u32 dwIOLen; /* [out] Length of the IO Memory area*/ +} __attribute__((packed)) ATEMSYS_T_PCI_MEMBAR_v1_3_05; + +typedef struct +{ + __s32 nVendID; /* [in] vendor ID */ + __s32 nDevID; /* [in] device ID */ + __s32 nInstance; /* [in] instance to look for (0 is the first instance) */ + __s32 nPciBus; /* [in/out] bus */ + __s32 nPciDev; /* [in/out] device */ + __s32 nPciFun; /* [in/out] function */ + __s32 nBarCnt; /* [out] Number of entries in aBar */ + __u32 dwIrq; /* [out] IRQ or USE_PCI_INT */ + ATEMSYS_T_PCI_MEMBAR_v1_3_05 aBar[ATEMSYS_PCI_MAXBAR]; /* [out] IO memory */ + __s32 nPciDomain; /* [in/out] domain */ +} __attribute__((packed)) ATEMSYS_T_PCI_SELECT_DESC_v1_3_05; + + +/* v1_0_00 */ + +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_04 ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00 +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_04 ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00 + +#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00 _IOWR(MAJOR_NUM, 0, ATEMSYS_T_PCI_SELECT_DESC_v1_0_00) +#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00 _IOWR(MAJOR_NUM, 1, ATEMSYS_T_PCI_SELECT_DESC_v1_0_00) + +typedef struct +{ + __u32 dwIOMem; /* [out] IO Memory of PCI card (physical address) */ + __u32 dwIOLen; /* [out] Length of the IO Memory area*/ +} __attribute__((packed)) ATEMSYS_T_PCI_MEMBAR_v1_0_00; + +typedef struct +{ + __s32 nVendID; /* [in] vendor ID */ + __s32 nDevID; /* [in] device ID */ + __s32 nInstance; /* [in] instance to look for (0 is the first instance) */ + __s32 nPciBus; /* [in/out] bus */ + __s32 nPciDev; /* [in/out] device */ + __s32 nPciFun; /* [in/out] function */ + __s32 nBarCnt; /* [out] Number of entries in aBar */ + __u32 dwIrq; /* [out] IRQ or USE_PCI_INT */ + ATEMSYS_T_PCI_MEMBAR_v1_0_00 aBar[ATEMSYS_PCI_MAXBAR]; /* [out] IO memory */ +} __attribute__((packed)) ATEMSYS_T_PCI_SELECT_DESC_v1_0_00; + +/* must match EC_T_PHYINTERFACE in EcLink.h */ +typedef enum _EC_T_PHYINTERFACE_ATEMSYS +{ + eATEMSYS_PHY_FIXED_LINK = 1 << 0, + eATEMSYS_PHY_MII = 1 << 1, + eATEMSYS_PHY_RMII = 1 << 2, + eATEMSYS_PHY_GMII = 1 << 3, + eATEMSYS_PHY_SGMII = 1 << 4, + eATEMSYS_PHY_RGMII = 1 << 5, + eATEMSYS_PHY_OSDRIVER = 1 << 6, + + /* Borland C++ datatype alignment correction */ + eATEMSYS_PHY_BCppDummy = 0xFFFFFFFF +} ATEMSYS_T_PHYINTERFACE; + + +#define EC_LINKOS_IDENT_MAX_LEN 0x20 /* must match EcLink.h */ +#define PHY_AUTO_ADDR (__u32) -1 /* must match EcPhy.h */ +typedef struct +{ + char szIdent[EC_LINKOS_IDENT_MAX_LEN]; /* [out] Name of Mac e.g. "FslFec" */ + __u32 dwInstance; /* [out] Number of used Mac (in official order!) */ + __u32 dwIndex; /* [in] Index of Mac in atemsys handling */ + __u64 qwRegAddr; /* [in] Hardware register address of mac */ + __u32 dwRegSize; /* [in] Hardware register size of mac */ + __u32 dwStatus; /* [in] Status of mac according to device tree */ + ATEMSYS_T_PHYINTERFACE ePhyMode; /* [in] Phy mac connection mode mii, rmii, rgmii, gmii, sgmii defined in SDK/INC/EcLink.h */ + __u32 bNoMdioBus; /* [in] Mac don't need to run own Mdio Bus */ + __u32 dwPhyAddr; /* [in] Address of PHY on mdio bus */ + __u32 dwErrorCode; /* [in] Error code defined in SDK/INC/EcError.h */ + __u32 bPhyResetSupported; /* [in] Device tree has data for phy reset */ + __u32 dwReserved[15]; +} __attribute__((packed)) ATEMSYS_T_MAC_INFO; + +typedef struct +{ + __u32 dwIndex; /* [out] Index of Mac in atemsys handling */ + __u32 bInUse; /* [in] Descriptor is in use */ + __u32 bInUseByIoctl; /* [in] Descriptor is in use by ATEMSYS_IOCTRLs */ + __u32 bWriteOrder; /* [in/out] Mdio operation - write = 1, read = 0 */ + __u16 wMdioAddr; /* [in/out] Current address */ + __u16 wReg; /* [in/out] Current register */ + __u16 wValue; /* [in/out] Current value read or write */ + __u32 dwTimeoutMsec; /* [in] Timeout in milli seconds */ + __u32 dwErrorCode; /* [in] Error code defined in SDK/INC/EcError.h */ + __u32 dwReserved[4]; +} __attribute__((packed)) ATEMSYS_T_MDIO_ORDER; + +typedef struct +{ + __u32 dwIndex; /* [out] Index of Mac in atemsys handling */ + __u32 dwLink; /* [in] Link defined in /linux/phy.h */ + __u32 dwDuplex; /* [in] Duplex defined in /linux/phy.h (0x00: half, 0x01: full, 0xFF: unknown) */ + __u32 dwSpeed; /* [in] Speed defined in /linux/phy.h */ + __u32 bPhyReady; /* [in] Mdio Bus is currently not active */ + __u32 dwErrorCode; /* [in] Error code defined in SDK/INC/EcError.h */ + __u32 dwReserved[4]; +} __attribute__((packed)) ATEMSYS_T_PHY_INFO; + +typedef struct +{ + __u32 dwIndex; /* [out] Index of Mac in atemsys handling */ + __u32 bStart; /* [out] Start = 1, stop = 0 */ + __u32 dwErrorCode; /* [in] Error code defined in SDK/INC/EcError.h */ + __u32 dwReserved[4]; +} __attribute__((packed)) ATEMSYS_T_PHY_START_STOP_INFO; + + + + +typedef struct +{ + __u32 dwIndex; /* [out] Index of Mac in atemsys handling */ + __u32 dwCmd; /* [out] Id of the command */ +#define ATEMSYS_IOMEM_CMD_MAP_PERMANENT 1 +#define ATEMSYS_IOMEM_CMD_UNMAP_PERMANENT 2 +#define ATEMSYS_IOMEM_CMD_READ 3 +#define ATEMSYS_IOMEM_CMD_WRITE 4 + + __u64 qwPhys; /* [out] physical memory address */ + __u32 dwSize; /* [out] size of the memory area */ + __u32 dwOffset; /* [out] memory offset for read and write command */ + __u32 dwDataSize; /* [out] data size for read and write command */ + __u32 dwData[4]; /* [in/out] data buffer for read and write command */ +} __attribute__((packed)) ATEMSYS_T_IOMEM_CMD; + + +typedef struct +{ + __u32 dwIndex; /* [out] Index of Mac in atemsys handling */ + __u32 dwChannelIdx; /* [out] Index of the internal channel handling */ + __u32 dwCmd; /* [out] Id of the command */ +#define ATEMSYS_CPSWG_CMD_CONFIG_TX 1 +#define ATEMSYS_CPSWG_CMD_CONFIG_RX 2 +#define ATEMSYS_CPSWG_CMD_ENABLE_TX 3 +#define ATEMSYS_CPSWG_CMD_ENABLE_RX 4 +#define ATEMSYS_CPSWG_CMD_DISABLE_TX 5 +#define ATEMSYS_CPSWG_CMD_DISABLE_RX 6 +#define ATEMSYS_CPSWG_CMD_RELEASE_TX 7 +#define ATEMSYS_CPSWG_CMD_RELEASE_RX 8 + + __u64 qwRingDma; /* [in] 1. ring physical memory address */ + __u32 dwRingSize; /* [in/out] 1. ring size / number of elements */ + __u32 dwRingId; /* [in] 1. ring index */ + __u64 qwRingFdqDma; /* [in] 2. ring physical memory address */ + __u32 dwRingFdqSize; /* [in/put] 2. ring size / number of elements */ + __u32 dwRingFdqId; /* [in] 2. ring index */ + __u32 dwChanId; /* [in] 2. ring index */ + __u32 dwFlowIdBase; /* [in] 2. ring index */ + __u32 dwReserved[32]; +} __attribute__((packed)) ATEMSYS_T_CPSWG_CMD; + +#endif /* ATEMSYS_H */ + diff --git a/kernel/drivers/misc/eeprom/at24.c b/kernel/drivers/misc/eeprom/at24.c index d02bf9c..09d976a 100644 --- a/kernel/drivers/misc/eeprom/at24.c +++ b/kernel/drivers/misc/eeprom/at24.c @@ -607,6 +607,59 @@ } EXPORT_SYMBOL(at24_mac1_read); +ssize_t at24_mac2_read(unsigned char* mac) +{ + char buf[20]; + char buf_tmp[12]; + ssize_t ret; + if (at24_private == NULL) + { + printk("zcl: at24_mac_read at24_private==null error"); + return 0; + } + memset(buf, 0x00, 20); + memset(buf_tmp, 0x00, 12); + ret = at24_read_private(at24_private, buf, 0x20, 6); + if (ret > 0) + { + *mac = buf[0]; + *(mac + 1) = buf[1]; + *(mac + 2) = buf[2]; + *(mac + 3) = buf[3]; + *(mac + 4) = buf[4]; + *(mac + 5) = buf[5]; + } + printk("at24_mac2_read ...............\n"); + return ret; +} +EXPORT_SYMBOL(at24_mac2_read); + +ssize_t at24_mac3_read(unsigned char* mac) +{ + char buf[20]; + char buf_tmp[12]; + ssize_t ret; + if (at24_private == NULL) + { + printk("zcl: at24_mac_read at24_private==null error"); + return 0; + } + memset(buf, 0x00, 20); + memset(buf_tmp, 0x00, 12); + ret = at24_read_private(at24_private, buf, 0x30, 6); + if (ret > 0) + { + *mac = buf[0]; + *(mac + 1) = buf[1]; + *(mac + 2) = buf[2]; + *(mac + 3) = buf[3]; + *(mac + 4) = buf[4]; + *(mac + 5) = buf[5]; + } + printk("at24_mac3_read ...............\n"); + return ret; +} +EXPORT_SYMBOL(at24_mac3_read); static int at24_write(void *priv, unsigned int off, void *val, size_t count) { struct at24_data *at24; diff --git a/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c b/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c index 84c4d09..d07e7b0 100644 --- a/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c +++ b/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c @@ -115,6 +115,8 @@ #define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" #define FIRMWARE_8168FP_4 "rtl_nic/rtl8168fp-4.fw" +static int my_id=1; + /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; @@ -24055,12 +24057,18 @@ free_netdev(dev); } +extern ssize_t at24_mac1_read(unsigned char* mac); +extern ssize_t at24_mac2_read(unsigned char* mac); +extern ssize_t at24_mac3_read(unsigned char* mac); + + static int rtl8168_get_mac_address(struct net_device *dev) { struct rtl8168_private *tp = netdev_priv(dev); int i; u8 mac_addr[MAC_ADDR_LEN]; + unsigned char mac[6]; for (i = 0; i < MAC_ADDR_LEN; i++) mac_addr[i] = RTL_R8(tp, MAC0 + i); @@ -24115,6 +24123,32 @@ } } } + + + if (my_id == 1) + at24_mac1_read(mac); + if (my_id == 2) + at24_mac2_read(mac); + if (my_id == 3) + at24_mac3_read(mac); + if ((mac[0] == 0x68) && (mac[1] == 0xed)) + { + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = mac[i]; + my_id+=1; + netif_err(tp, probe, dev, "Get ether addr form at24 %pM\n", + mac_addr); + } + else{ + printk("rtl811h mac read from eeprom error!! \n"); + mac_addr[0] = 0x66; + mac_addr[1] = 0xED; + mac_addr[2] = 0xB5; + mac_addr[3] = 0x64; + mac_addr[4] = 0x72; + mac_addr[5] = my_id; + my_id+=1; + } if (!is_valid_ether_addr(mac_addr)) { netif_err(tp, probe, dev, "Invalid ether addr %pM\n", @@ -28739,5 +28773,6 @@ #endif } -module_init(rtl8168_init_module); +//module_init(rtl8168_init_module); +late_initcall(rtl8168_init_module); module_exit(rtl8168_cleanup_module); diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 27310a8..53bba2a 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -2717,9 +2717,11 @@ { struct rk_priv_data *bsp_priv = priv; struct device *dev = &bsp_priv->pdev->dev; + unsigned char ethaddr[ETH_ALEN * MAX_ETH] = {0}; + int ret, id = bsp_priv->bus_id; int i; -#if 0 +#if 1 if (is_valid_ether_addr(addr)) goto out; @@ -2751,7 +2753,7 @@ } #endif - #if 1 + #if 0 if (at24_mac_read(macaddr) > 0) { printk("ben %s: at24_mac_read Success!! \n", __func__); memcpy(addr, macaddr, 6); @@ -2938,7 +2940,7 @@ .of_match_table = rk_gmac_dwmac_match, }, }; -module_platform_driver1(rk_gmac_dwmac_driver); +module_platform_driver(rk_gmac_dwmac_driver); MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>"); MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer"); diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6f1074a..a9dc4fc 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2353,8 +2353,8 @@ */ static void stmmac_check_ether_addr(struct stmmac_priv *priv) { -// if (!is_valid_ether_addr(priv->dev->dev_addr)) { - if(1) { + if (!is_valid_ether_addr(priv->dev->dev_addr)) { +// if(1) { stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); if (likely(priv->plat->get_eth_addr)) priv->plat->get_eth_addr(priv->plat->bsp_priv, diff --git a/kernel/drivers/pci/controller/dwc/pcie-designware-host.c b/kernel/drivers/pci/controller/dwc/pcie-designware-host.c index 1cb04f4..0b4c19f 100644 --- a/kernel/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/kernel/drivers/pci/controller/dwc/pcie-designware-host.c @@ -44,6 +44,7 @@ .irq_ack = dw_msi_ack_irq, .irq_mask = dw_msi_mask_irq, .irq_unmask = dw_msi_unmask_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static struct msi_domain_info dw_pcie_msi_domain_info = { diff --git a/kernel/drivers/pci/controller/pcie-brcmstb.c b/kernel/drivers/pci/controller/pcie-brcmstb.c index 9c3d298..452e2bb 100644 --- a/kernel/drivers/pci/controller/pcie-brcmstb.c +++ b/kernel/drivers/pci/controller/pcie-brcmstb.c @@ -457,6 +457,7 @@ .irq_ack = irq_chip_ack_parent, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static struct msi_domain_info brcm_msi_domain_info = { @@ -520,6 +521,7 @@ .irq_compose_msi_msg = brcm_msi_compose_msi_msg, .irq_set_affinity = brcm_msi_set_affinity, .irq_ack = brcm_msi_ack_irq, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int brcm_msi_alloc(struct brcm_msi *msi) diff --git a/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 6768b2f..e37dd66 100644 --- a/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -88,7 +88,7 @@ struct pinctrl_desc pctl_desc; struct pinctrl_gpio_range gpio_range; - raw_spinlock_t irq_lock[BCM2835_NUM_BANKS]; + hard_spinlock_t irq_lock[BCM2835_NUM_BANKS]; }; /* pins are just named GPIO0..GPIO53 */ @@ -678,7 +678,7 @@ .irq_mask = bcm2835_gpio_irq_disable, .irq_unmask = bcm2835_gpio_irq_enable, .irq_set_wake = bcm2835_gpio_irq_set_wake, - .flags = IRQCHIP_MASK_ON_SUSPEND, + .flags = IRQCHIP_MASK_ON_SUSPEND|IRQCHIP_PIPELINE_SAFE, }; static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev) diff --git a/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c b/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c index 2ed17cd..80ee69a 100644 --- a/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -562,7 +562,7 @@ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005), * errata #CHT34, for further information. */ -static DEFINE_RAW_SPINLOCK(chv_lock); +static DEFINE_HARD_SPINLOCK(chv_lock); static u32 chv_pctrl_readl(struct intel_pinctrl *pctrl, unsigned int offset) { @@ -1554,7 +1554,8 @@ pctrl->irqchip.irq_mask = chv_gpio_irq_mask; pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask; pctrl->irqchip.irq_set_type = chv_gpio_irq_type; - pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE; + pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_PIPELINE_SAFE; chip->irq.chip = &pctrl->irqchip; chip->irq.init_hw = chv_gpio_irq_init_hw; diff --git a/kernel/drivers/pinctrl/qcom/pinctrl-msm.c b/kernel/drivers/pinctrl/qcom/pinctrl-msm.c index a3cef80..c9e4452 100644 --- a/kernel/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/kernel/drivers/pinctrl/qcom/pinctrl-msm.c @@ -68,7 +68,7 @@ bool intr_target_use_scm; - raw_spinlock_t lock; + hard_spinlock_t lock; DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); @@ -1273,7 +1273,8 @@ pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity; pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED | - IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND; + IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND | + IRQCHIP_PIPELINE_SAFE; np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0); if (np) { diff --git a/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c b/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c index 493079a..7da7f80 100644 --- a/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -58,13 +58,13 @@ unsigned int mask; unsigned long flags; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); mask = readl(bank->eint_base + reg_mask); mask |= 1 << irqd->hwirq; writel(mask, bank->eint_base + reg_mask); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); } static void exynos_irq_ack(struct irq_data *irqd) @@ -97,13 +97,13 @@ if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK) exynos_irq_ack(irqd); - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); mask = readl(bank->eint_base + reg_mask); mask &= ~(1 << irqd->hwirq); writel(mask, bank->eint_base + reg_mask); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); } static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type) @@ -169,14 +169,14 @@ shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); con = readl(bank->pctl_base + reg_con); con &= ~(mask << shift); con |= EXYNOS_PIN_FUNC_EINT << shift; writel(con, bank->pctl_base + reg_con); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); return 0; } @@ -192,14 +192,14 @@ shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); con = readl(bank->pctl_base + reg_con); con &= ~(mask << shift); con |= EXYNOS_PIN_FUNC_INPUT << shift; writel(con, bank->pctl_base + reg_con); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); gpiochip_unlock_as_irq(&bank->gpio_chip, irqd->hwirq); } @@ -216,6 +216,7 @@ .irq_set_type = exynos_irq_set_type, .irq_request_resources = exynos_irq_request_resources, .irq_release_resources = exynos_irq_release_resources, + .flags = IRQCHIP_PIPELINE_SAFE, }, .eint_con = EXYNOS_GPIO_ECON_OFFSET, .eint_mask = EXYNOS_GPIO_EMASK_OFFSET, @@ -287,7 +288,7 @@ } ret = devm_request_irq(dev, d->irq, exynos_eint_gpio_irq, - 0, dev_name(dev), d); + IRQF_OOB, dev_name(dev), d); if (ret) { dev_err(dev, "irq request failed\n"); return -ENXIO; @@ -305,6 +306,7 @@ goto err_domains; } bank->irq_chip->chip.name = bank->name; + bank->irq_chip->chip.flags |= IRQCHIP_PIPELINE_SAFE; bank->irq_domain = irq_domain_add_linear(bank->of_node, bank->nr_pins, &exynos_eint_irqd_ops, bank); @@ -408,6 +410,7 @@ .irq_set_wake = exynos_wkup_irq_set_wake, .irq_request_resources = exynos_irq_request_resources, .irq_release_resources = exynos_irq_release_resources, + .flags = IRQCHIP_PIPELINE_SAFE, }, .eint_con = EXYNOS_WKUP_ECON_OFFSET, .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, @@ -428,6 +431,7 @@ .irq_set_wake = exynos_wkup_irq_set_wake, .irq_request_resources = exynos_irq_request_resources, .irq_release_resources = exynos_irq_release_resources, + .flags = IRQCHIP_PIPELINE_SAFE, }, .eint_con = EXYNOS_WKUP_ECON_OFFSET, .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, @@ -447,6 +451,7 @@ .irq_set_wake = exynos_wkup_irq_set_wake, .irq_request_resources = exynos_irq_request_resources, .irq_release_resources = exynos_irq_release_resources, + .flags = IRQCHIP_PIPELINE_SAFE, }, .eint_con = EXYNOS7_WKUP_ECON_OFFSET, .eint_mask = EXYNOS7_WKUP_EMASK_OFFSET, diff --git a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c index 56fff83..f88f9f9 100644 --- a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -400,14 +400,14 @@ reg += 4; } - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]); data &= ~(mask << shift); data |= func->val << shift; writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); } /* enable a specified pinmux by writing to registers */ @@ -451,7 +451,7 @@ width = type->fld_width[cfg_type]; cfg_reg = type->reg_offset[cfg_type]; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); mask = (1 << width) - 1; shift = pin_offset * width; @@ -468,7 +468,7 @@ *config = PINCFG_PACK(cfg_type, data); } - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); return 0; } @@ -561,9 +561,9 @@ struct samsung_pin_bank *bank = gpiochip_get_data(gc); unsigned long flags; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); samsung_gpio_set_value(gc, offset, value); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); } /* gpiolib gpio_get callback function */ @@ -626,9 +626,9 @@ unsigned long flags; int ret; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); ret = samsung_gpio_set_direction(gc, offset, true); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); return ret; } @@ -640,10 +640,10 @@ unsigned long flags; int ret; - spin_lock_irqsave(&bank->slock, flags); + raw_spin_lock_irqsave(&bank->slock, flags); samsung_gpio_set_value(gc, offset, value); ret = samsung_gpio_set_direction(gc, offset, false); - spin_unlock_irqrestore(&bank->slock, flags); + raw_spin_unlock_irqrestore(&bank->slock, flags); return ret; } @@ -1067,7 +1067,7 @@ bank->eint_offset = bdata->eint_offset; bank->name = bdata->name; - spin_lock_init(&bank->slock); + raw_spin_lock_init(&bank->slock); bank->drvdata = d; bank->pin_base = d->nr_pins; d->nr_pins += bank->nr_pins; diff --git a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h index 379f34a..59ce47a 100644 --- a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -171,7 +171,7 @@ struct gpio_chip gpio_chip; struct pinctrl_gpio_range grange; struct exynos_irq_chip *irq_chip; - spinlock_t slock; + hard_spinlock_t slock; u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/ }; diff --git a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c index e4b41cc..a6170c0 100644 --- a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1086,7 +1086,7 @@ .irq_release_resources = sunxi_pinctrl_irq_release_resources, .irq_set_type = sunxi_pinctrl_irq_set_type, .irq_set_wake = sunxi_pinctrl_irq_set_wake, - .flags = IRQCHIP_MASK_ON_SUSPEND, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE, }; static struct irq_chip sunxi_pinctrl_level_irq_chip = { @@ -1104,7 +1104,8 @@ .irq_set_wake = sunxi_pinctrl_irq_set_wake, .flags = IRQCHIP_EOI_THREADED | IRQCHIP_MASK_ON_SUSPEND | - IRQCHIP_EOI_IF_HANDLED, + IRQCHIP_EOI_IF_HANDLED | + IRQCHIP_PIPELINE_SAFE, }; static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d, diff --git a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h index a32bb5b..a1849aa 100644 --- a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -167,7 +167,7 @@ unsigned ngroups; int *irq; unsigned *irq_array; - raw_spinlock_t lock; + hard_spinlock_t lock; struct pinctrl_dev *pctl_dev; unsigned long variant; }; diff --git a/kernel/drivers/soc/qcom/smp2p.c b/kernel/drivers/soc/qcom/smp2p.c index fb76c8b..9eecb34 100644 --- a/kernel/drivers/soc/qcom/smp2p.c +++ b/kernel/drivers/soc/qcom/smp2p.c @@ -281,6 +281,7 @@ .irq_mask = smp2p_mask_irq, .irq_unmask = smp2p_unmask_irq, .irq_set_type = smp2p_set_irq_type, + .flags = IRQCHIP_PIPELINE_SAFE, }; static int smp2p_irq_map(struct irq_domain *d, diff --git a/kernel/drivers/soc/ti/ti_sci_inta_msi.c b/kernel/drivers/soc/ti/ti_sci_inta_msi.c index 0eb9462..21d222b 100644 --- a/kernel/drivers/soc/ti/ti_sci_inta_msi.c +++ b/kernel/drivers/soc/ti/ti_sci_inta_msi.c @@ -42,6 +42,7 @@ chip->irq_unmask = irq_chip_unmask_parent; chip->irq_mask = irq_chip_mask_parent; chip->irq_ack = irq_chip_ack_parent; + chip->flags |= IRQCHIP_PIPELINE_SAFE; } struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode, diff --git a/kernel/drivers/spi/Kconfig b/kernel/drivers/spi/Kconfig index 7a2cb27..a853a43 100644 --- a/kernel/drivers/spi/Kconfig +++ b/kernel/drivers/spi/Kconfig @@ -32,6 +32,10 @@ Say "yes" to enable debug messaging (like dev_dbg and pr_debug), sysfs, and debugfs support in SPI controller and protocol drivers. +config SPI_OOB + def_bool n + depends on HAS_DMA && DOVETAIL + # # MASTER side ... talking to discrete SPI slave chips including microcontrollers # @@ -138,6 +142,13 @@ is for the regular SPI controller. Slave mode operation is not also not supported. +config SPI_BCM2835_OOB + bool "Out-of-band support for BCM2835 SPI controller" + depends on SPI_BCM2835 && DOVETAIL + select SPI_OOB + help + Enable out-of-band cyclic transfers. + config SPI_BCM2835AUX tristate "BCM2835 SPI auxiliary controller" depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST diff --git a/kernel/drivers/spi/spi-bcm2835.c b/kernel/drivers/spi/spi-bcm2835.c index bb9d838..4a3dbc0 100644 --- a/kernel/drivers/spi/spi-bcm2835.c +++ b/kernel/drivers/spi/spi-bcm2835.c @@ -1079,17 +1079,10 @@ return 0; } -static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, - struct spi_device *spi, - struct spi_transfer *tfr) +static unsigned long bcm2835_get_clkdiv(struct bcm2835_spi *bs, u32 spi_hz, + u32 *effective_speed_hz) { - struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); - unsigned long spi_hz, cdiv; - unsigned long hz_per_byte, byte_limit; - u32 cs = bs->prepare_cs[spi->chip_select]; - - /* set clock */ - spi_hz = tfr->speed_hz; + unsigned long cdiv; if (spi_hz >= bs->clk_hz / 2) { cdiv = 2; /* clk_hz/2 is the fastest we can go */ @@ -1103,7 +1096,25 @@ } else { cdiv = 0; /* 0 is the slowest we can go */ } - tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); + + *effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); + + return cdiv; +} + +static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, + struct spi_transfer *tfr) +{ + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); + unsigned long spi_hz, cdiv; + unsigned long hz_per_byte, byte_limit; + u32 cs = bs->prepare_cs[spi->chip_select]; + + /* set clock */ + spi_hz = tfr->speed_hz; + + cdiv = bcm2835_get_clkdiv(bs, spi_hz, &tfr->effective_speed_hz); bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); /* handle all the 3-wire mode */ @@ -1283,6 +1294,68 @@ return 0; } +#ifdef CONFIG_SPI_BCM2835_OOB + +static int bcm2835_spi_prepare_oob_transfer(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer) +{ + /* + * The size of a transfer is limited by DLEN which is 16-bit + * wide, and we don't want to scatter transfers in out-of-band + * mode, so cap the frame size accordingly. + */ + if (xfer->setup.frame_len > 65532) + return -EINVAL; + + return 0; +} + +static void bcm2835_spi_start_oob_transfer(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer) +{ + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); + struct spi_device *spi = xfer->spi; + u32 cs = bs->prepare_cs[spi->chip_select], effective_speed_hz; + unsigned long cdiv; + + /* See bcm2835_spi_prepare_message(). */ + bcm2835_wr(bs, BCM2835_SPI_CS, cs); + + cdiv = bcm2835_get_clkdiv(bs, xfer->setup.speed_hz, &effective_speed_hz); + xfer->effective_speed_hz = effective_speed_hz; + bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); + bcm2835_wr(bs, BCM2835_SPI_DLEN, xfer->setup.frame_len); + + if (spi->mode & SPI_3WIRE) + cs |= BCM2835_SPI_CS_REN; + bcm2835_wr(bs, BCM2835_SPI_CS, + cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN); +} + +static void bcm2835_spi_pulse_oob_transfer(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer) +{ + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); + + /* Reload DLEN for the next pulse. */ + bcm2835_wr(bs, BCM2835_SPI_DLEN, xfer->setup.frame_len); +} + +static void bcm2835_spi_terminate_oob_transfer(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer) +{ + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); + + bcm2835_spi_reset_hw(bs); +} + +#else +#define bcm2835_spi_prepare_oob_transfer NULL +#define bcm2835_spi_start_oob_transfer NULL +#define bcm2835_spi_pulse_oob_transfer NULL +#define bcm2835_spi_terminate_oob_transfer NULL +#endif + static int bcm2835_spi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -1304,6 +1377,10 @@ ctlr->transfer_one = bcm2835_spi_transfer_one; ctlr->handle_err = bcm2835_spi_handle_err; ctlr->prepare_message = bcm2835_spi_prepare_message; + ctlr->prepare_oob_transfer = bcm2835_spi_prepare_oob_transfer; + ctlr->start_oob_transfer = bcm2835_spi_start_oob_transfer; + ctlr->pulse_oob_transfer = bcm2835_spi_pulse_oob_transfer; + ctlr->terminate_oob_transfer = bcm2835_spi_terminate_oob_transfer; ctlr->dev.of_node = pdev->dev.of_node; bs = spi_controller_get_devdata(ctlr); diff --git a/kernel/drivers/spi/spi.c b/kernel/drivers/spi/spi.c index b1a638d..206e245 100644 --- a/kernel/drivers/spi/spi.c +++ b/kernel/drivers/spi/spi.c @@ -2729,6 +2729,9 @@ spin_lock_init(&ctlr->bus_lock_spinlock); mutex_init(&ctlr->bus_lock_mutex); mutex_init(&ctlr->io_mutex); +#ifdef CONFIG_SPI_OOB + sema_init(&ctlr->bus_oob_lock_sem, 1); +#endif ctlr->bus_lock_flag = 0; init_completion(&ctlr->xfer_completion); if (!ctlr->max_dma_len) @@ -3804,6 +3807,22 @@ * inline functions. */ +static void get_spi_bus(struct spi_controller *ctlr) +{ + mutex_lock(&ctlr->bus_lock_mutex); +#ifdef CONFIG_SPI_OOB + down(&ctlr->bus_oob_lock_sem); +#endif +} + +static void put_spi_bus(struct spi_controller *ctlr) +{ +#ifdef CONFIG_SPI_OOB + up(&ctlr->bus_oob_lock_sem); +#endif + mutex_unlock(&ctlr->bus_lock_mutex); +} + static void spi_complete(void *arg) { complete(arg); @@ -3888,9 +3907,9 @@ { int ret; - mutex_lock(&spi->controller->bus_lock_mutex); + get_spi_bus(spi->controller); ret = __spi_sync(spi, message); - mutex_unlock(&spi->controller->bus_lock_mutex); + put_spi_bus(spi->controller); return ret; } @@ -3937,7 +3956,7 @@ { unsigned long flags; - mutex_lock(&ctlr->bus_lock_mutex); + get_spi_bus(ctlr); spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ctlr->bus_lock_flag = 1; @@ -3966,7 +3985,7 @@ { ctlr->bus_lock_flag = 0; - mutex_unlock(&ctlr->bus_lock_mutex); + put_spi_bus(ctlr); return 0; } @@ -4051,6 +4070,274 @@ } EXPORT_SYMBOL_GPL(spi_write_then_read); +#ifdef CONFIG_SPI_OOB + +static int bus_lock_oob(struct spi_controller *ctlr) +{ + unsigned long flags; + int ret = -EBUSY; + + mutex_lock(&ctlr->bus_lock_mutex); + + spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); + + if (!ctlr->bus_lock_flag && !down_trylock(&ctlr->bus_oob_lock_sem)) { + ctlr->bus_lock_flag = 1; + ret = 0; + } + + spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); + + mutex_unlock(&ctlr->bus_lock_mutex); + + return ret; +} + +static int bus_unlock_oob(struct spi_controller *ctlr) +{ + ctlr->bus_lock_flag = 0; + up(&ctlr->bus_oob_lock_sem); + + return 0; +} + +static int prepare_oob_dma(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer) +{ + struct dma_async_tx_descriptor *desc; + size_t len = xfer->setup.frame_len; + dma_cookie_t cookie; + dma_addr_t addr; + int ret; + + /* TX to second half of I/O buffer. */ + addr = xfer->dma_addr + xfer->aligned_frame_len; + desc = dmaengine_prep_slave_single(ctlr->dma_tx, addr, len, + DMA_MEM_TO_DEV, + DMA_OOB_INTERRUPT|DMA_OOB_PULSE); + if (!desc) + return -EIO; + + xfer->txd = desc; + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) + return ret; + + dma_async_issue_pending(ctlr->dma_tx); + + /* RX to first half of I/O buffer. */ + addr = xfer->dma_addr; + desc = dmaengine_prep_slave_single(ctlr->dma_rx, addr, len, + DMA_DEV_TO_MEM, + DMA_OOB_INTERRUPT|DMA_OOB_PULSE); + if (!desc) { + ret = -EIO; + goto fail_rx; + } + + desc->callback = xfer->setup.xfer_done; + desc->callback_param = xfer; + + xfer->rxd = desc; + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) + goto fail_rx; + + dma_async_issue_pending(ctlr->dma_rx); + + return 0; + +fail_rx: + dmaengine_terminate_sync(ctlr->dma_tx); + + return ret; +} + +static void unprepare_oob_dma(struct spi_controller *ctlr) +{ + dmaengine_terminate_sync(ctlr->dma_rx); + dmaengine_terminate_sync(ctlr->dma_tx); +} + +/* + * A simpler version of __spi_validate() for oob transfers. + */ +static int validate_oob_xfer(struct spi_device *spi, + struct spi_oob_transfer *xfer) +{ + struct spi_controller *ctlr = spi->controller; + struct spi_oob_setup *p = &xfer->setup; + int w_size; + + if (p->frame_len == 0) + return -EINVAL; + + if (!p->bits_per_word) + p->bits_per_word = spi->bits_per_word; + + if (!p->speed_hz) + p->speed_hz = spi->max_speed_hz; + + if (ctlr->max_speed_hz && p->speed_hz > ctlr->max_speed_hz) + p->speed_hz = ctlr->max_speed_hz; + + if (__spi_validate_bits_per_word(ctlr, p->bits_per_word)) + return -EINVAL; + + if (p->bits_per_word <= 8) + w_size = 1; + else if (p->bits_per_word <= 16) + w_size = 2; + else + w_size = 4; + + if (p->frame_len % w_size) + return -EINVAL; + + if (p->speed_hz && ctlr->min_speed_hz && + p->speed_hz < ctlr->min_speed_hz) + return -EINVAL; + + return 0; +} + +int spi_prepare_oob_transfer(struct spi_device *spi, + struct spi_oob_transfer *xfer) +{ + struct spi_controller *ctlr; + dma_addr_t dma_addr; + size_t alen, iolen; + void *iobuf; + int ret; + + /* Controller must support oob transactions. */ + ctlr = spi->controller; + if (!ctlr->prepare_oob_transfer) + return -ENOTSUPP; + + /* Out-of-band transfers require DMA support. */ + if (!ctlr->can_dma) + return -ENODEV; + + ret = validate_oob_xfer(spi, xfer); + if (ret) + return ret; + + alen = L1_CACHE_ALIGN(xfer->setup.frame_len); + /* + * Allocate a single coherent I/O buffer which is twice as + * large as the user specified transfer length, TX data goes + * to the upper half, RX data to the lower half. + */ + iolen = alen * 2; + iobuf = dma_alloc_coherent(ctlr->dev.parent, iolen, + &dma_addr, GFP_KERNEL); + if (iobuf == NULL) + return -ENOMEM; + + xfer->spi = spi; + xfer->dma_addr = dma_addr; + xfer->io_buffer = iobuf; + xfer->aligned_frame_len = alen; + xfer->effective_speed_hz = 0; + + ret = prepare_oob_dma(ctlr, xfer); + if (ret) + goto fail_prep_dma; + + ret = bus_lock_oob(ctlr); + if (ret) + goto fail_bus_lock; + + ret = ctlr->prepare_oob_transfer(ctlr, xfer); + if (ret) + goto fail_prep_xfer; + + return 0; + +fail_prep_xfer: + bus_unlock_oob(ctlr); +fail_bus_lock: + unprepare_oob_dma(ctlr); +fail_prep_dma: + dma_free_coherent(ctlr->dev.parent, iolen, iobuf, dma_addr); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_prepare_oob_transfer); + +void spi_start_oob_transfer(struct spi_oob_transfer *xfer) +{ + struct spi_device *spi = xfer->spi; + struct spi_controller *ctlr = spi->controller; + + ctlr->start_oob_transfer(ctlr, xfer); +} +EXPORT_SYMBOL_GPL(spi_start_oob_transfer); + +int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer) /* oob stage */ +{ + struct spi_device *spi = xfer->spi; + struct spi_controller *ctlr = spi->controller; + int ret; + + if (ctlr->pulse_oob_transfer) + ctlr->pulse_oob_transfer(ctlr, xfer); + + ret = dma_pulse_oob(ctlr->dma_rx); + if (likely(!ret)) + ret = dma_pulse_oob(ctlr->dma_tx); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_pulse_oob_transfer); + +void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer) +{ + struct spi_device *spi = xfer->spi; + struct spi_controller *ctlr = spi->controller; + + if (ctlr->terminate_oob_transfer) + ctlr->terminate_oob_transfer(ctlr, xfer); + + unprepare_oob_dma(ctlr); + bus_unlock_oob(ctlr); + dma_free_coherent(ctlr->dev.parent, xfer->aligned_frame_len * 2, + xfer->io_buffer, xfer->dma_addr); +} +EXPORT_SYMBOL_GPL(spi_terminate_oob_transfer); + +int spi_mmap_oob_transfer(struct vm_area_struct *vma, + struct spi_oob_transfer *xfer) +{ + struct spi_device *spi = xfer->spi; + struct spi_controller *ctlr = spi->controller; + size_t len; + int ret; + + /* + * We may have an IOMMU, rely on dma_mmap_coherent() for + * dealing with the nitty-gritty details of mapping a coherent + * buffer. + */ + len = vma->vm_end - vma->vm_start; + if (spi_get_oob_iolen(xfer) <= len) + ret = dma_mmap_coherent(ctlr->dev.parent, + vma, + xfer->io_buffer, + xfer->dma_addr, + len); + else + ret = -EINVAL; + + return ret; +} +EXPORT_SYMBOL_GPL(spi_mmap_oob_transfer); + +#endif /* SPI_OOB */ + /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF) diff --git a/kernel/drivers/spmi/spmi-pmic-arb.c b/kernel/drivers/spmi/spmi-pmic-arb.c index e6de2ae..1066934 100644 --- a/kernel/drivers/spmi/spmi-pmic-arb.c +++ b/kernel/drivers/spmi/spmi-pmic-arb.c @@ -145,7 +145,7 @@ void __iomem *cnfg; void __iomem *core; resource_size_t core_size; - raw_spinlock_t lock; + hard_spinlock_t lock; u8 channel; int irq; u8 ee; @@ -684,7 +684,7 @@ .irq_set_type = qpnpint_irq_set_type, .irq_set_wake = qpnpint_irq_set_wake, .irq_get_irqchip_state = qpnpint_get_irqchip_state, - .flags = IRQCHIP_MASK_ON_SUSPEND, + .flags = IRQCHIP_MASK_ON_SUSPEND|IRQCHIP_PIPELINE_SAFE, }; static int qpnpint_irq_domain_translate(struct irq_domain *d, diff --git a/kernel/drivers/tty/serial/8250/8250_core.c b/kernel/drivers/tty/serial/8250/8250_core.c index 00f6dc7..da7ba88 100644 --- a/kernel/drivers/tty/serial/8250/8250_core.c +++ b/kernel/drivers/tty/serial/8250/8250_core.c @@ -675,6 +675,48 @@ return -ENODEV; } +#ifdef CONFIG_RAW_PRINTK + +static void raw_write_char(struct uart_8250_port *up, int c) +{ + unsigned int status, tmout = 10000; + + for (;;) { + status = serial_in(up, UART_LSR); + up->lsr_saved_flags |= status & LSR_SAVE_FLAGS; + if ((status & UART_LSR_THRE) == UART_LSR_THRE) + break; + if (--tmout == 0) + break; + cpu_relax(); + } + serial_port_out(&up->port, UART_TX, c); +} + +static void univ8250_console_write_raw(struct console *co, const char *s, + unsigned int count) +{ + struct uart_8250_port *up = &serial8250_ports[co->index]; + unsigned int ier; + + ier = serial_in(up, UART_IER); + + if (up->capabilities & UART_CAP_UUE) + serial_out(up, UART_IER, UART_IER_UUE); + else + serial_out(up, UART_IER, 0); + + while (count-- > 0) { + if (*s == '\n') + raw_write_char(up, '\r'); + raw_write_char(up, *s++); + } + + serial_out(up, UART_IER, ier); +} + +#endif + static struct console univ8250_console = { .name = "ttyS", .write = univ8250_console_write, @@ -682,6 +724,9 @@ .setup = univ8250_console_setup, .exit = univ8250_console_exit, .match = univ8250_console_match, +#ifdef CONFIG_RAW_PRINTK + .write_raw = univ8250_console_write_raw, +#endif .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &serial8250_reg, diff --git a/kernel/drivers/tty/serial/amba-pl011.c b/kernel/drivers/tty/serial/amba-pl011.c index 9900ee3..6ea53e1 100644 --- a/kernel/drivers/tty/serial/amba-pl011.c +++ b/kernel/drivers/tty/serial/amba-pl011.c @@ -1887,6 +1887,8 @@ pl011_disable_uart(uap); + if (IS_ENABLED(CONFIG_RAW_PRINTK)) + clk_disable(uap->clk); /* * Shut down the clock producer */ @@ -2194,6 +2196,37 @@ pl011_write(ch, uap, REG_DR); } +#ifdef CONFIG_RAW_PRINTK + +static void +pl011_console_write_raw(struct console *co, const char *s, unsigned int count) +{ + struct uart_amba_port *uap = amba_ports[co->index]; + unsigned int old_cr = 0, new_cr; + + if (!uap->vendor->always_enabled) { + old_cr = pl011_read(uap, REG_CR); + new_cr = old_cr & ~UART011_CR_CTSEN; + new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; + pl011_write(new_cr, uap, REG_CR); + } + + while (count-- > 0) { + if (*s == '\n') + pl011_console_putchar(&uap->port, '\r'); + pl011_console_putchar(&uap->port, *s++); + } + + while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) + & uap->vendor->fr_busy) + cpu_relax(); + + if (!uap->vendor->always_enabled) + pl011_write(old_cr, uap, REG_CR); +} + +#endif /* !CONFIG_RAW_PRINTK */ + static void pl011_console_write(struct console *co, const char *s, unsigned int count) { @@ -2323,6 +2356,9 @@ pl011_console_get_options(uap, &baud, &parity, &bits); } + if (IS_ENABLED(CONFIG_RAW_PRINTK)) + clk_enable(uap->clk); + return uart_set_options(&uap->port, co, baud, parity, bits, flow); } @@ -2393,6 +2429,9 @@ .device = uart_console_device, .setup = pl011_console_setup, .match = pl011_console_match, +#ifdef CONFIG_RAW_PRINTK + .write_raw = pl011_console_write_raw, +#endif .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &amba_reg, diff --git a/kernel/drivers/tty/serial/imx.c b/kernel/drivers/tty/serial/imx.c index 164597e..fa7d220 100644 --- a/kernel/drivers/tty/serial/imx.c +++ b/kernel/drivers/tty/serial/imx.c @@ -1998,24 +1998,11 @@ imx_uart_writel(sport, ch, URTX0); } -/* - * Interrupts are disabled on entering - */ static void -imx_uart_console_write(struct console *co, const char *s, unsigned int count) +__imx_uart_console_write(struct imx_port *sport, const char *s, unsigned int count) { - struct imx_port *sport = imx_uart_ports[co->index]; struct imx_port_ucrs old_ucr; unsigned int ucr1; - unsigned long flags = 0; - int locked = 1; - - if (sport->port.sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock_irqsave(&sport->port.lock, flags); - else - spin_lock_irqsave(&sport->port.lock, flags); /* * First, save UCR1/2/3 and then disable interrupts @@ -2041,10 +2028,40 @@ while (!(imx_uart_readl(sport, USR2) & USR2_TXDC)); imx_uart_ucrs_restore(sport, &old_ucr); +} + +/* + * Interrupts are disabled on entering + */ +static void +imx_uart_console_write(struct console *co, const char *s, unsigned int count) +{ + struct imx_port *sport = imx_uart_ports[co->index]; + unsigned long flags; + int locked = 1; + + if (sport->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&sport->port.lock, flags); + else + spin_lock_irqsave(&sport->port.lock, flags); + + __imx_uart_console_write(sport, s, count); if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); } + +#ifdef CONFIG_RAW_PRINTK +static void +imx_uart_console_write_raw(struct console *co, const char *s, unsigned int count) +{ + struct imx_port *sport = imx_uart_ports[co->index]; + + return __imx_uart_console_write(sport, s, count); +} +#endif /* * If the port was already initialised (eg, by a boot loader), @@ -2161,6 +2178,9 @@ static struct console imx_uart_console = { .name = DEV_NAME, .write = imx_uart_console_write, +#ifdef CONFIG_RAW_PRINTK + .write_raw = imx_uart_console_write_raw, +#endif .device = uart_console_device, .setup = imx_uart_console_setup, .flags = CON_PRINTBUFFER, diff --git a/kernel/drivers/tty/serial/samsung_tty.c b/kernel/drivers/tty/serial/samsung_tty.c index 263c332..a36d1f4 100644 --- a/kernel/drivers/tty/serial/samsung_tty.c +++ b/kernel/drivers/tty/serial/samsung_tty.c @@ -2367,6 +2367,10 @@ .flags = CON_PRINTBUFFER, .index = -1, .write = s3c24xx_serial_console_write, +#ifdef CONFIG_RAW_PRINTK + /* The common write handler can run from atomic context. */ + .write_raw = s3c24xx_serial_console_write, +#endif .setup = s3c24xx_serial_console_setup, .data = &s3c24xx_uart_drv, }; diff --git a/kernel/drivers/tty/serial/st-asc.c b/kernel/drivers/tty/serial/st-asc.c index 97d36f8..1239fd5 100644 --- a/kernel/drivers/tty/serial/st-asc.c +++ b/kernel/drivers/tty/serial/st-asc.c @@ -908,6 +908,29 @@ spin_unlock_irqrestore(&port->lock, flags); } +#ifdef CONFIG_RAW_PRINTK + +static void asc_console_write_raw(struct console *co, + const char *s, unsigned int count) +{ + struct uart_port *port = &asc_ports[co->index].port; + unsigned long timeout = 1000000; + u32 intenable; + + intenable = asc_in(port, ASC_INTEN); + asc_out(port, ASC_INTEN, 0); + (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */ + + uart_console_write(port, s, count, asc_console_putchar); + + while (timeout-- && !asc_txfifo_is_empty(port)) + cpu_relax(); /* wait shorter */ + + asc_out(port, ASC_INTEN, intenable); +} + +#endif + static int asc_console_setup(struct console *co, char *options) { struct asc_port *ascport; @@ -940,6 +963,9 @@ .name = ASC_SERIAL_NAME, .device = uart_console_device, .write = asc_console_write, +#ifdef CONFIG_RAW_PRINTK + .write_raw = asc_console_write_raw, +#endif .setup = asc_console_setup, .flags = CON_PRINTBUFFER, .index = -1, diff --git a/kernel/drivers/xenomai/Kconfig b/kernel/drivers/xenomai/Kconfig new file mode 120000 index 0000000..1481352 --- /dev/null +++ b/kernel/drivers/xenomai/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/Makefile b/kernel/drivers/xenomai/Makefile new file mode 120000 index 0000000..2865738 --- /dev/null +++ b/kernel/drivers/xenomai/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/Kconfig b/kernel/drivers/xenomai/analogy/Kconfig new file mode 120000 index 0000000..178cf30 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/Makefile b/kernel/drivers/xenomai/analogy/Makefile new file mode 120000 index 0000000..8eb912c --- /dev/null +++ b/kernel/drivers/xenomai/analogy/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/buffer.c b/kernel/drivers/xenomai/analogy/buffer.c new file mode 120000 index 0000000..d9a8285 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/buffer.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/command.c b/kernel/drivers/xenomai/analogy/command.c new file mode 120000 index 0000000..173bc3e --- /dev/null +++ b/kernel/drivers/xenomai/analogy/command.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/device.c b/kernel/drivers/xenomai/analogy/device.c new file mode 120000 index 0000000..f98ec1d --- /dev/null +++ b/kernel/drivers/xenomai/analogy/device.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/driver.c b/kernel/drivers/xenomai/analogy/driver.c new file mode 120000 index 0000000..176187a --- /dev/null +++ b/kernel/drivers/xenomai/analogy/driver.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/driver_facilities.c b/kernel/drivers/xenomai/analogy/driver_facilities.c new file mode 120000 index 0000000..f10c5e4 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/driver_facilities.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/instruction.c b/kernel/drivers/xenomai/analogy/instruction.c new file mode 120000 index 0000000..d560444 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/instruction.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/intel/8255.c b/kernel/drivers/xenomai/analogy/intel/8255.c new file mode 120000 index 0000000..f05950a --- /dev/null +++ b/kernel/drivers/xenomai/analogy/intel/8255.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/intel/8255.h b/kernel/drivers/xenomai/analogy/intel/8255.h new file mode 120000 index 0000000..c03d400 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/intel/8255.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/intel/Kconfig b/kernel/drivers/xenomai/analogy/intel/Kconfig new file mode 120000 index 0000000..c475508 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/intel/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/intel/Makefile b/kernel/drivers/xenomai/analogy/intel/Makefile new file mode 120000 index 0000000..3cd46a8 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/intel/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/intel/parport.c b/kernel/drivers/xenomai/analogy/intel/parport.c new file mode 120000 index 0000000..31342d5 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/intel/parport.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/Kconfig b/kernel/drivers/xenomai/analogy/national_instruments/Kconfig new file mode 120000 index 0000000..dd8b00d --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/Makefile b/kernel/drivers/xenomai/analogy/national_instruments/Makefile new file mode 120000 index 0000000..f474ec0 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/mio_common.c b/kernel/drivers/xenomai/analogy/national_instruments/mio_common.c new file mode 120000 index 0000000..95dd21e --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/mio_common.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/mite.c b/kernel/drivers/xenomai/analogy/national_instruments/mite.c new file mode 120000 index 0000000..f4629c7 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/mite.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/mite.h b/kernel/drivers/xenomai/analogy/national_instruments/mite.h new file mode 120000 index 0000000..23e1b95 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/mite.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c b/kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c new file mode 120000 index 0000000..70875ba --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c b/kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c new file mode 120000 index 0000000..40c9329 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h b/kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h new file mode 120000 index 0000000..f4d092b --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h b/kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h new file mode 120000 index 0000000..45ba968 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h b/kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h new file mode 120000 index 0000000..68ddf7a --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/pcimio.c b/kernel/drivers/xenomai/analogy/national_instruments/pcimio.c new file mode 120000 index 0000000..627dbf7 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/pcimio.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/national_instruments/tio_common.c b/kernel/drivers/xenomai/analogy/national_instruments/tio_common.c new file mode 120000 index 0000000..0510312 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/national_instruments/tio_common.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/proc.h b/kernel/drivers/xenomai/analogy/proc.h new file mode 120000 index 0000000..88c837d --- /dev/null +++ b/kernel/drivers/xenomai/analogy/proc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/rtdm_helpers.c b/kernel/drivers/xenomai/analogy/rtdm_helpers.c new file mode 120000 index 0000000..f78bc88 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/rtdm_helpers.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/rtdm_interface.c b/kernel/drivers/xenomai/analogy/rtdm_interface.c new file mode 120000 index 0000000..7bcf22b --- /dev/null +++ b/kernel/drivers/xenomai/analogy/rtdm_interface.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/sensoray/Kconfig b/kernel/drivers/xenomai/analogy/sensoray/Kconfig new file mode 120000 index 0000000..2131de1 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/sensoray/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/sensoray/Makefile b/kernel/drivers/xenomai/analogy/sensoray/Makefile new file mode 120000 index 0000000..a222ac1 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/sensoray/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/sensoray/s526.c b/kernel/drivers/xenomai/analogy/sensoray/s526.c new file mode 120000 index 0000000..71deae2 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/sensoray/s526.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/subdevice.c b/kernel/drivers/xenomai/analogy/subdevice.c new file mode 120000 index 0000000..8b7fb41 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/subdevice.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/testing/Kconfig b/kernel/drivers/xenomai/analogy/testing/Kconfig new file mode 120000 index 0000000..0e8da90 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/testing/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/testing/Makefile b/kernel/drivers/xenomai/analogy/testing/Makefile new file mode 120000 index 0000000..b6acebc --- /dev/null +++ b/kernel/drivers/xenomai/analogy/testing/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/testing/fake.c b/kernel/drivers/xenomai/analogy/testing/fake.c new file mode 120000 index 0000000..0527926 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/testing/fake.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/testing/loop.c b/kernel/drivers/xenomai/analogy/testing/loop.c new file mode 120000 index 0000000..c25e863 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/testing/loop.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/analogy/transfer.c b/kernel/drivers/xenomai/analogy/transfer.c new file mode 120000 index 0000000..55096e0 --- /dev/null +++ b/kernel/drivers/xenomai/analogy/transfer.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/autotune/Kconfig b/kernel/drivers/xenomai/autotune/Kconfig new file mode 120000 index 0000000..f45b3de --- /dev/null +++ b/kernel/drivers/xenomai/autotune/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/autotune/Makefile b/kernel/drivers/xenomai/autotune/Makefile new file mode 120000 index 0000000..c543ca7 --- /dev/null +++ b/kernel/drivers/xenomai/autotune/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/autotune/autotune.c b/kernel/drivers/xenomai/autotune/autotune.c new file mode 120000 index 0000000..49f359e --- /dev/null +++ b/kernel/drivers/xenomai/autotune/autotune.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/Kconfig b/kernel/drivers/xenomai/can/Kconfig new file mode 120000 index 0000000..40c089f --- /dev/null +++ b/kernel/drivers/xenomai/can/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/Makefile b/kernel/drivers/xenomai/can/Makefile new file mode 120000 index 0000000..7159870 --- /dev/null +++ b/kernel/drivers/xenomai/can/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/Kconfig b/kernel/drivers/xenomai/can/mscan/Kconfig new file mode 120000 index 0000000..d11e2d5 --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/Makefile b/kernel/drivers/xenomai/can/mscan/Makefile new file mode 120000 index 0000000..9ecd45e --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan.c b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.c new file mode 120000 index 0000000..faa04b2 --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan.h b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.h new file mode 120000 index 0000000..9dbbc0f --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c new file mode 120000 index 0000000..25b90a6 --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c new file mode 120000 index 0000000..3f0e38a --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h new file mode 120000 index 0000000..84efea4 --- /dev/null +++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/peak_canfd/Kconfig b/kernel/drivers/xenomai/can/peak_canfd/Kconfig new file mode 120000 index 0000000..15631af --- /dev/null +++ b/kernel/drivers/xenomai/can/peak_canfd/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/peak_canfd/Makefile b/kernel/drivers/xenomai/can/peak_canfd/Makefile new file mode 120000 index 0000000..9f298b5 --- /dev/null +++ b/kernel/drivers/xenomai/can/peak_canfd/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c new file mode 120000 index 0000000..c19dcda --- /dev/null +++ b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h new file mode 120000 index 0000000..ad581ca --- /dev/null +++ b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c new file mode 120000 index 0000000..e390e68 --- /dev/null +++ b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_dev.c b/kernel/drivers/xenomai/can/rtcan_dev.c new file mode 120000 index 0000000..e89026d --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_dev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_dev.h b/kernel/drivers/xenomai/can/rtcan_dev.h new file mode 120000 index 0000000..de5bf5a --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_dev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_flexcan.c b/kernel/drivers/xenomai/can/rtcan_flexcan.c new file mode 120000 index 0000000..3135675 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_flexcan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_internal.h b/kernel/drivers/xenomai/can/rtcan_internal.h new file mode 120000 index 0000000..a059d53 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_internal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_list.h b/kernel/drivers/xenomai/can/rtcan_list.h new file mode 120000 index 0000000..4390be4 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_list.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_module.c b/kernel/drivers/xenomai/can/rtcan_module.c new file mode 120000 index 0000000..dc519dd --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_module.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_raw.c b/kernel/drivers/xenomai/can/rtcan_raw.c new file mode 120000 index 0000000..83098b5 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_raw.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_raw.h b/kernel/drivers/xenomai/can/rtcan_raw.h new file mode 120000 index 0000000..8afed34 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_raw.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_raw_dev.c b/kernel/drivers/xenomai/can/rtcan_raw_dev.c new file mode 120000 index 0000000..dcbcc40 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_raw_dev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_raw_filter.c b/kernel/drivers/xenomai/can/rtcan_raw_filter.c new file mode 120000 index 0000000..1903db0 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_raw_filter.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_socket.c b/kernel/drivers/xenomai/can/rtcan_socket.c new file mode 120000 index 0000000..066d572 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_socket.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_socket.h b/kernel/drivers/xenomai/can/rtcan_socket.h new file mode 120000 index 0000000..a0bfef8 --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_socket.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_version.h b/kernel/drivers/xenomai/can/rtcan_version.h new file mode 120000 index 0000000..fb151ac --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_version.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/rtcan_virt.c b/kernel/drivers/xenomai/can/rtcan_virt.c new file mode 120000 index 0000000..b8622eb --- /dev/null +++ b/kernel/drivers/xenomai/can/rtcan_virt.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/Kconfig b/kernel/drivers/xenomai/can/sja1000/Kconfig new file mode 120000 index 0000000..9a12ec1 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/Makefile b/kernel/drivers/xenomai/can/sja1000/Makefile new file mode 120000 index 0000000..92a39c3 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c new file mode 120000 index 0000000..deea61b --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c new file mode 120000 index 0000000..cb5f886 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c new file mode 120000 index 0000000..aa62ac0 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_isa.c b/kernel/drivers/xenomai/can/sja1000/rtcan_isa.c new file mode 120000 index 0000000..0f086f3 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_isa.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c new file mode 120000 index 0000000..a6ad991 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_mem.c b/kernel/drivers/xenomai/can/sja1000/rtcan_mem.c new file mode 120000 index 0000000..a717566 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_mem.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c new file mode 120000 index 0000000..8c0cac7 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c new file mode 120000 index 0000000..f39a12f --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c new file mode 120000 index 0000000..acdd065 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c new file mode 120000 index 0000000..955104a --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h new file mode 120000 index 0000000..23315ba --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c new file mode 120000 index 0000000..974d2d0 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h new file mode 120000 index 0000000..8bd6676 --- /dev/null +++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/Kconfig b/kernel/drivers/xenomai/gpio/Kconfig new file mode 120000 index 0000000..6555e52 --- /dev/null +++ b/kernel/drivers/xenomai/gpio/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/Makefile b/kernel/drivers/xenomai/gpio/Makefile new file mode 120000 index 0000000..80ac680 --- /dev/null +++ b/kernel/drivers/xenomai/gpio/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-bcm2835.c b/kernel/drivers/xenomai/gpio/gpio-bcm2835.c new file mode 120000 index 0000000..d0f7323 --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-bcm2835.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-cherryview.c b/kernel/drivers/xenomai/gpio/gpio-cherryview.c new file mode 120000 index 0000000..014eadf --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-cherryview.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-core.c b/kernel/drivers/xenomai/gpio/gpio-core.c new file mode 120000 index 0000000..2990f4e --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-core.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-mxc.c b/kernel/drivers/xenomai/gpio/gpio-mxc.c new file mode 120000 index 0000000..14b366f --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-mxc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-omap.c b/kernel/drivers/xenomai/gpio/gpio-omap.c new file mode 120000 index 0000000..aed52b8 --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-omap.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c b/kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c new file mode 120000 index 0000000..a8558d1 --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-xilinx.c b/kernel/drivers/xenomai/gpio/gpio-xilinx.c new file mode 120000 index 0000000..8cb0b56 --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-xilinx.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpio/gpio-zynq7000.c b/kernel/drivers/xenomai/gpio/gpio-zynq7000.c new file mode 120000 index 0000000..268930c --- /dev/null +++ b/kernel/drivers/xenomai/gpio/gpio-zynq7000.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpiopwm/Kconfig b/kernel/drivers/xenomai/gpiopwm/Kconfig new file mode 120000 index 0000000..cb8e52a --- /dev/null +++ b/kernel/drivers/xenomai/gpiopwm/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpiopwm/Makefile b/kernel/drivers/xenomai/gpiopwm/Makefile new file mode 120000 index 0000000..298cfaa --- /dev/null +++ b/kernel/drivers/xenomai/gpiopwm/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/gpiopwm/gpiopwm.c b/kernel/drivers/xenomai/gpiopwm/gpiopwm.c new file mode 120000 index 0000000..b2ccb0a --- /dev/null +++ b/kernel/drivers/xenomai/gpiopwm/gpiopwm.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/Kconfig b/kernel/drivers/xenomai/ipc/Kconfig new file mode 120000 index 0000000..d014c12 --- /dev/null +++ b/kernel/drivers/xenomai/ipc/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/Makefile b/kernel/drivers/xenomai/ipc/Makefile new file mode 120000 index 0000000..a2a14a0 --- /dev/null +++ b/kernel/drivers/xenomai/ipc/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/bufp.c b/kernel/drivers/xenomai/ipc/bufp.c new file mode 120000 index 0000000..45476ee --- /dev/null +++ b/kernel/drivers/xenomai/ipc/bufp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/iddp.c b/kernel/drivers/xenomai/ipc/iddp.c new file mode 120000 index 0000000..0d81c84 --- /dev/null +++ b/kernel/drivers/xenomai/ipc/iddp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/internal.h b/kernel/drivers/xenomai/ipc/internal.h new file mode 120000 index 0000000..386d5da --- /dev/null +++ b/kernel/drivers/xenomai/ipc/internal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/rtipc.c b/kernel/drivers/xenomai/ipc/rtipc.c new file mode 120000 index 0000000..3806453 --- /dev/null +++ b/kernel/drivers/xenomai/ipc/rtipc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/ipc/xddp.c b/kernel/drivers/xenomai/ipc/xddp.c new file mode 120000 index 0000000..c727995 --- /dev/null +++ b/kernel/drivers/xenomai/ipc/xddp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/Kconfig b/kernel/drivers/xenomai/net/Kconfig new file mode 120000 index 0000000..c97ece1 --- /dev/null +++ b/kernel/drivers/xenomai/net/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/Makefile b/kernel/drivers/xenomai/net/Makefile new file mode 120000 index 0000000..23918b1 --- /dev/null +++ b/kernel/drivers/xenomai/net/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/addons/Kconfig b/kernel/drivers/xenomai/net/addons/Kconfig new file mode 120000 index 0000000..6a3ebd6 --- /dev/null +++ b/kernel/drivers/xenomai/net/addons/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/addons/Makefile b/kernel/drivers/xenomai/net/addons/Makefile new file mode 120000 index 0000000..a094573 --- /dev/null +++ b/kernel/drivers/xenomai/net/addons/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/addons/cap.c b/kernel/drivers/xenomai/net/addons/cap.c new file mode 120000 index 0000000..21a6de5 --- /dev/null +++ b/kernel/drivers/xenomai/net/addons/cap.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/addons/proxy.c b/kernel/drivers/xenomai/net/addons/proxy.c new file mode 120000 index 0000000..6c4011d --- /dev/null +++ b/kernel/drivers/xenomai/net/addons/proxy.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/8139too.c b/kernel/drivers/xenomai/net/drivers/8139too.c new file mode 120000 index 0000000..711169f --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/8139too.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/Kconfig b/kernel/drivers/xenomai/net/drivers/Kconfig new file mode 120000 index 0000000..7ba614e --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/Makefile b/kernel/drivers/xenomai/net/drivers/Makefile new file mode 120000 index 0000000..b73d187 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/at91_ether.c b/kernel/drivers/xenomai/net/drivers/at91_ether.c new file mode 120000 index 0000000..fe06f6f --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/at91_ether.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/Makefile b/kernel/drivers/xenomai/net/drivers/e1000/Makefile new file mode 120000 index 0000000..31ae439 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000.h b/kernel/drivers/xenomai/net/drivers/e1000/e1000.h new file mode 120000 index 0000000..5ce29b3 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c new file mode 120000 index 0000000..db7bc01 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h new file mode 120000 index 0000000..f0bd1f2 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c b/kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c new file mode 120000 index 0000000..e94b664 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h b/kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h new file mode 120000 index 0000000..f0ae997 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c b/kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c new file mode 120000 index 0000000..afa14a4 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000/kcompat.h b/kernel/drivers/xenomai/net/drivers/e1000/kcompat.h new file mode 120000 index 0000000..aaea99e --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000/kcompat.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c b/kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c new file mode 120000 index 0000000..decbd6d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/82571.c b/kernel/drivers/xenomai/net/drivers/e1000e/82571.c new file mode 120000 index 0000000..551403a --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/82571.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/Makefile b/kernel/drivers/xenomai/net/drivers/e1000e/Makefile new file mode 120000 index 0000000..490bf68 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/defines.h b/kernel/drivers/xenomai/net/drivers/e1000e/defines.h new file mode 120000 index 0000000..2655b5f --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/defines.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/e1000.h b/kernel/drivers/xenomai/net/drivers/e1000e/e1000.h new file mode 120000 index 0000000..58ddbc8 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/e1000.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/hw.h b/kernel/drivers/xenomai/net/drivers/e1000e/hw.h new file mode 120000 index 0000000..3495bb7 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/hw.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c b/kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c new file mode 120000 index 0000000..620a5a7 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/lib.c b/kernel/drivers/xenomai/net/drivers/e1000e/lib.c new file mode 120000 index 0000000..f7dccb0 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/lib.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/netdev.c b/kernel/drivers/xenomai/net/drivers/e1000e/netdev.c new file mode 120000 index 0000000..de5cec7 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/netdev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/param.c b/kernel/drivers/xenomai/net/drivers/e1000e/param.c new file mode 120000 index 0000000..d4e2427 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/param.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/phy.c b/kernel/drivers/xenomai/net/drivers/e1000e/phy.c new file mode 120000 index 0000000..7f57b2e --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/e1000e/phy.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/eepro100.c b/kernel/drivers/xenomai/net/drivers/eepro100.c new file mode 120000 index 0000000..68f0bd9 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/eepro100.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/eth1394.c b/kernel/drivers/xenomai/net/drivers/eth1394.c new file mode 120000 index 0000000..0d2385c --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/eth1394.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/3c59x.c b/kernel/drivers/xenomai/net/drivers/experimental/3c59x.c new file mode 120000 index 0000000..f392120 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/3c59x.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/Kconfig b/kernel/drivers/xenomai/net/drivers/experimental/Kconfig new file mode 120000 index 0000000..a8b7aa6 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/Makefile b/kernel/drivers/xenomai/net/drivers/experimental/Makefile new file mode 120000 index 0000000..b2cd54b --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile b/kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile new file mode 120000 index 0000000..ef5e157 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h new file mode 120000 index 0000000..5517ffe --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c new file mode 120000 index 0000000..b90773e --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h new file mode 120000 index 0000000..3c91d39 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c new file mode 120000 index 0000000..107b3fc --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c new file mode 120000 index 0000000..af49424 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h new file mode 120000 index 0000000..4a6b12d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c new file mode 120000 index 0000000..b5fece5 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c new file mode 120000 index 0000000..123b677 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h new file mode 120000 index 0000000..f6c618b --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c new file mode 120000 index 0000000..8be311a --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h new file mode 120000 index 0000000..4e913d2 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c new file mode 120000 index 0000000..be7440a --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h new file mode 120000 index 0000000..d8f9c11 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h new file mode 120000 index 0000000..c7b46f5 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c new file mode 120000 index 0000000..56ff658 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h new file mode 120000 index 0000000..a98c043 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c new file mode 120000 index 0000000..24e724c --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h new file mode 120000 index 0000000..af038e9 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c new file mode 120000 index 0000000..b9c80ae --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h new file mode 120000 index 0000000..7b49cc1 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c new file mode 120000 index 0000000..0b4dca5 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c new file mode 120000 index 0000000..592efe5 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h new file mode 120000 index 0000000..195e5a6 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c new file mode 120000 index 0000000..6b1f68b --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h new file mode 120000 index 0000000..7e9c493 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h new file mode 120000 index 0000000..940b211 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c new file mode 120000 index 0000000..d579c65 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c new file mode 120000 index 0000000..1cd2488 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h new file mode 120000 index 0000000..01bc39c --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h new file mode 120000 index 0000000..6e60c43 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h new file mode 120000 index 0000000..042a181 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig new file mode 120000 index 0000000..81d9074 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile new file mode 120000 index 0000000..38e0133 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c new file mode 120000 index 0000000..17f9400 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h new file mode 120000 index 0000000..d2ac490 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h new file mode 120000 index 0000000..110ca4d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c new file mode 120000 index 0000000..0814623 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/freescale/Makefile b/kernel/drivers/xenomai/net/drivers/freescale/Makefile new file mode 120000 index 0000000..22c40e9 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/freescale/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/freescale/fec.h b/kernel/drivers/xenomai/net/drivers/freescale/fec.h new file mode 120000 index 0000000..3bba88c --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/freescale/fec.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/freescale/fec_main.c b/kernel/drivers/xenomai/net/drivers/freescale/fec_main.c new file mode 120000 index 0000000..acf4436 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/freescale/fec_main.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c b/kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c new file mode 120000 index 0000000..95e170b --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/Makefile b/kernel/drivers/xenomai/net/drivers/igb/Makefile new file mode 120000 index 0000000..40d6051 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c new file mode 120000 index 0000000..5289bc3 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h new file mode 120000 index 0000000..09ad7d0 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h new file mode 120000 index 0000000..d771f53 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h new file mode 120000 index 0000000..ded3741 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c new file mode 120000 index 0000000..d9f065f --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h new file mode 120000 index 0000000..2169a38 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c new file mode 120000 index 0000000..8009a7b --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h new file mode 120000 index 0000000..992439d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c new file mode 120000 index 0000000..5890d94 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h new file mode 120000 index 0000000..1c63b6e --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c new file mode 120000 index 0000000..7c68ffe --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h new file mode 120000 index 0000000..ffcb83d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c new file mode 120000 index 0000000..7e0bd20 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h new file mode 120000 index 0000000..ef0bb5d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h new file mode 120000 index 0000000..391d5df --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/igb.h b/kernel/drivers/xenomai/net/drivers/igb/igb.h new file mode 120000 index 0000000..18c03a5 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/igb.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c b/kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c new file mode 120000 index 0000000..72486f0 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/igb/igb_main.c b/kernel/drivers/xenomai/net/drivers/igb/igb_main.c new file mode 120000 index 0000000..d26d2cd --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/igb/igb_main.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/loopback.c b/kernel/drivers/xenomai/net/drivers/loopback.c new file mode 120000 index 0000000..f1ab73a --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/loopback.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/macb.c b/kernel/drivers/xenomai/net/drivers/macb.c new file mode 120000 index 0000000..a227f81 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/macb.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile new file mode 120000 index 0000000..8ded6f9 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c new file mode 120000 index 0000000..3d95c32 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h new file mode 120000 index 0000000..25f9694 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c b/kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c new file mode 120000 index 0000000..3bb85ab --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c b/kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c new file mode 120000 index 0000000..299866a --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c b/kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c new file mode 120000 index 0000000..fae6619 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/natsemi.c b/kernel/drivers/xenomai/net/drivers/natsemi.c new file mode 120000 index 0000000..0f25145 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/natsemi.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/pcnet32.c b/kernel/drivers/xenomai/net/drivers/pcnet32.c new file mode 120000 index 0000000..1cede2f --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/pcnet32.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/r8169.c b/kernel/drivers/xenomai/net/drivers/r8169.c new file mode 120000 index 0000000..56b7ae9 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/r8169.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/rt_at91_ether.h b/kernel/drivers/xenomai/net/drivers/rt_at91_ether.h new file mode 120000 index 0000000..69fb95d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/rt_at91_ether.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/rt_eth1394.h b/kernel/drivers/xenomai/net/drivers/rt_eth1394.h new file mode 120000 index 0000000..5cf6d26 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/rt_eth1394.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/rt_macb.h b/kernel/drivers/xenomai/net/drivers/rt_macb.h new file mode 120000 index 0000000..431147b --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/rt_macb.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/21142.c b/kernel/drivers/xenomai/net/drivers/tulip/21142.c new file mode 120000 index 0000000..266476c --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/21142.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/Makefile b/kernel/drivers/xenomai/net/drivers/tulip/Makefile new file mode 120000 index 0000000..186e9f5 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/eeprom.c b/kernel/drivers/xenomai/net/drivers/tulip/eeprom.c new file mode 120000 index 0000000..c6fa166 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/eeprom.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/interrupt.c b/kernel/drivers/xenomai/net/drivers/tulip/interrupt.c new file mode 120000 index 0000000..89e9c98 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/interrupt.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/media.c b/kernel/drivers/xenomai/net/drivers/tulip/media.c new file mode 120000 index 0000000..041379d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/media.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/pnic.c b/kernel/drivers/xenomai/net/drivers/tulip/pnic.c new file mode 120000 index 0000000..637f52d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/pnic.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/pnic2.c b/kernel/drivers/xenomai/net/drivers/tulip/pnic2.c new file mode 120000 index 0000000..3689226 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/pnic2.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/tulip.h b/kernel/drivers/xenomai/net/drivers/tulip/tulip.h new file mode 120000 index 0000000..097fe09 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/tulip.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c b/kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c new file mode 120000 index 0000000..e894d6d --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/drivers/via-rhine.c b/kernel/drivers/xenomai/net/drivers/via-rhine.c new file mode 120000 index 0000000..393ebe0 --- /dev/null +++ b/kernel/drivers/xenomai/net/drivers/via-rhine.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/Kconfig b/kernel/drivers/xenomai/net/stack/Kconfig new file mode 120000 index 0000000..9b30843 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/Makefile b/kernel/drivers/xenomai/net/stack/Makefile new file mode 120000 index 0000000..00de54e --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/corectl.c b/kernel/drivers/xenomai/net/stack/corectl.c new file mode 120000 index 0000000..673509b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/corectl.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/eth.c b/kernel/drivers/xenomai/net/stack/eth.c new file mode 120000 index 0000000..37a7b72 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/eth.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ethernet/eth.h b/kernel/drivers/xenomai/net/stack/include/ethernet/eth.h new file mode 120000 index 0000000..b9481c0 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ethernet/eth.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h b/kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h new file mode 120000 index 0000000..00c836d --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/arp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/arp.h new file mode 120000 index 0000000..da86d2b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/arp.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h new file mode 120000 index 0000000..a0fb11c --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h new file mode 120000 index 0000000..1c11a2b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h new file mode 120000 index 0000000..62c806d --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h new file mode 120000 index 0000000..e808ec3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h new file mode 120000 index 0000000..d90d3a2 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h b/kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h new file mode 120000 index 0000000..643408a --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/route.h b/kernel/drivers/xenomai/net/stack/include/ipv4/route.h new file mode 120000 index 0000000..d289da0 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/route.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h new file mode 120000 index 0000000..43f4287 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/udp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/udp.h new file mode 120000 index 0000000..b65b654 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4/udp.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h b/kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h new file mode 120000 index 0000000..b31a416 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h b/kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h new file mode 120000 index 0000000..366c1b2 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h new file mode 120000 index 0000000..7e2a1fd --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h new file mode 120000 index 0000000..45e3024 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h new file mode 120000 index 0000000..2339ba4 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h new file mode 120000 index 0000000..7e12020 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h new file mode 120000 index 0000000..86f06b7 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h new file mode 120000 index 0000000..6e4e6f9 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h new file mode 120000 index 0000000..17fe163 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h new file mode 120000 index 0000000..804f5f3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h new file mode 120000 index 0000000..57e6e2b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h b/kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h new file mode 120000 index 0000000..a8453a6 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtdev.h b/kernel/drivers/xenomai/net/stack/include/rtdev.h new file mode 120000 index 0000000..3c4d68d --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtdev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h b/kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h new file mode 120000 index 0000000..e594fb0 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac.h b/kernel/drivers/xenomai/net/stack/include/rtmac.h new file mode 120000 index 0000000..476f312 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h new file mode 120000 index 0000000..88578ef --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h new file mode 120000 index 0000000..569606f --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h new file mode 120000 index 0000000..9a0ad0a --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h new file mode 120000 index 0000000..284e21b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h new file mode 120000 index 0000000..e562573 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h new file mode 120000 index 0000000..3876ab3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h new file mode 120000 index 0000000..88fee6b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h new file mode 120000 index 0000000..504f7a5 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h new file mode 120000 index 0000000..a332fc0 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h new file mode 120000 index 0000000..00e9e92 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h new file mode 120000 index 0000000..4d3d2db --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h new file mode 120000 index 0000000..d93a3dc --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h new file mode 120000 index 0000000..38db4a3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h b/kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h new file mode 120000 index 0000000..ff42b33 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h b/kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h new file mode 120000 index 0000000..d22aed3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_internal.h b/kernel/drivers/xenomai/net/stack/include/rtnet_internal.h new file mode 120000 index 0000000..424b223 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_internal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h b/kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h new file mode 120000 index 0000000..4c1e22b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_port.h b/kernel/drivers/xenomai/net/stack/include/rtnet_port.h new file mode 120000 index 0000000..eface39 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_port.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h b/kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h new file mode 120000 index 0000000..cc7a107 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_socket.h b/kernel/drivers/xenomai/net/stack/include/rtnet_socket.h new file mode 120000 index 0000000..20aeea1 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtnet_socket.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtskb.h b/kernel/drivers/xenomai/net/stack/include/rtskb.h new file mode 120000 index 0000000..3289f85 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtskb.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h b/kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h new file mode 120000 index 0000000..418a2f5 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtwlan.h b/kernel/drivers/xenomai/net/stack/include/rtwlan.h new file mode 120000 index 0000000..560fc39 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtwlan.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/rtwlan_io.h b/kernel/drivers/xenomai/net/stack/include/rtwlan_io.h new file mode 120000 index 0000000..e90bc6e --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/rtwlan_io.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/stack_mgr.h b/kernel/drivers/xenomai/net/stack/include/stack_mgr.h new file mode 120000 index 0000000..a8caad8 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/stack_mgr.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h b/kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h new file mode 120000 index 0000000..c6e60fc --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/iovec.c b/kernel/drivers/xenomai/net/stack/iovec.c new file mode 120000 index 0000000..45a892b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/iovec.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/Kconfig b/kernel/drivers/xenomai/net/stack/ipv4/Kconfig new file mode 120000 index 0000000..7f181fb --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/Makefile b/kernel/drivers/xenomai/net/stack/ipv4/Makefile new file mode 120000 index 0000000..bc89433 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/af_inet.c b/kernel/drivers/xenomai/net/stack/ipv4/af_inet.c new file mode 120000 index 0000000..d887393 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/af_inet.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/arp.c b/kernel/drivers/xenomai/net/stack/ipv4/arp.c new file mode 120000 index 0000000..81a5aa3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/arp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/icmp.c b/kernel/drivers/xenomai/net/stack/ipv4/icmp.c new file mode 120000 index 0000000..8673d88 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/icmp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c new file mode 120000 index 0000000..805c5b4 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_input.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_input.c new file mode 120000 index 0000000..981f17c --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_input.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_output.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_output.c new file mode 120000 index 0000000..75cecb0 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_output.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c new file mode 120000 index 0000000..8b59934 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/protocol.c b/kernel/drivers/xenomai/net/stack/ipv4/protocol.c new file mode 120000 index 0000000..634f4e4 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/protocol.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/route.c b/kernel/drivers/xenomai/net/stack/ipv4/route.c new file mode 120000 index 0000000..94f66a2 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/route.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig new file mode 120000 index 0000000..b7e9ae1 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile new file mode 120000 index 0000000..3d49e2d --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c b/kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c new file mode 120000 index 0000000..23bd53e --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c new file mode 120000 index 0000000..0bf7b27 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h new file mode 120000 index 0000000..5b8a527 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig b/kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig new file mode 120000 index 0000000..cf6793b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile b/kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile new file mode 120000 index 0000000..9e6bdf6 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c b/kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c new file mode 120000 index 0000000..ac053dc --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/packet/Kconfig b/kernel/drivers/xenomai/net/stack/packet/Kconfig new file mode 120000 index 0000000..7eed8ca --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/packet/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/packet/Makefile b/kernel/drivers/xenomai/net/stack/packet/Makefile new file mode 120000 index 0000000..8542ff3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/packet/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/packet/af_packet.c b/kernel/drivers/xenomai/net/stack/packet/af_packet.c new file mode 120000 index 0000000..c460e64 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/packet/af_packet.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/Kconfig b/kernel/drivers/xenomai/net/stack/rtcfg/Kconfig new file mode 120000 index 0000000..2f4060e --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/Makefile b/kernel/drivers/xenomai/net/stack/rtcfg/Makefile new file mode 120000 index 0000000..98efb5e --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c new file mode 120000 index 0000000..e1443a5 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c new file mode 120000 index 0000000..c87c3be --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c new file mode 120000 index 0000000..40b0e21 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c new file mode 120000 index 0000000..a283f7d --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c new file mode 120000 index 0000000..47b9f15 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c new file mode 120000 index 0000000..3b8a27f --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c new file mode 120000 index 0000000..fe38d0b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c new file mode 120000 index 0000000..0bfb928 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c new file mode 120000 index 0000000..de1d308 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtdev.c b/kernel/drivers/xenomai/net/stack/rtdev.c new file mode 120000 index 0000000..d8c935c --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtdev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtdev_mgr.c b/kernel/drivers/xenomai/net/stack/rtdev_mgr.c new file mode 120000 index 0000000..fdcb124 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtdev_mgr.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/Kconfig b/kernel/drivers/xenomai/net/stack/rtmac/Kconfig new file mode 120000 index 0000000..a603e39 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/Makefile b/kernel/drivers/xenomai/net/stack/rtmac/Makefile new file mode 120000 index 0000000..b2433a5 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig new file mode 120000 index 0000000..11eabd8 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile new file mode 120000 index 0000000..a268292 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c new file mode 120000 index 0000000..8cacc9a --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c new file mode 120000 index 0000000..a5429a9 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c new file mode 120000 index 0000000..49f992e --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c new file mode 120000 index 0000000..4717f17 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c new file mode 120000 index 0000000..7e5725b --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c new file mode 120000 index 0000000..2905432 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c new file mode 120000 index 0000000..406357f --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c new file mode 120000 index 0000000..d4095f8 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c new file mode 120000 index 0000000..13ba402 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c new file mode 120000 index 0000000..d6e8a3c --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig new file mode 120000 index 0000000..5cec7e1 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile new file mode 120000 index 0000000..50923b3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c new file mode 120000 index 0000000..2c0fbb0 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c new file mode 120000 index 0000000..f9a94c9 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c new file mode 120000 index 0000000..824d271 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c new file mode 120000 index 0000000..9b337d7 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c new file mode 120000 index 0000000..0896aee --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtnet_chrdev.c b/kernel/drivers/xenomai/net/stack/rtnet_chrdev.c new file mode 120000 index 0000000..9803648 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtnet_chrdev.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtnet_module.c b/kernel/drivers/xenomai/net/stack/rtnet_module.c new file mode 120000 index 0000000..446daa3 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtnet_module.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtnet_rtpc.c b/kernel/drivers/xenomai/net/stack/rtnet_rtpc.c new file mode 120000 index 0000000..dd7b978 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtnet_rtpc.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtskb.c b/kernel/drivers/xenomai/net/stack/rtskb.c new file mode 120000 index 0000000..34fda4a --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtskb.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/rtwlan.c b/kernel/drivers/xenomai/net/stack/rtwlan.c new file mode 120000 index 0000000..b8b1f7c --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/rtwlan.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/socket.c b/kernel/drivers/xenomai/net/stack/socket.c new file mode 120000 index 0000000..901d381 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/socket.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/net/stack/stack_mgr.c b/kernel/drivers/xenomai/net/stack/stack_mgr.c new file mode 120000 index 0000000..a8e5ad1 --- /dev/null +++ b/kernel/drivers/xenomai/net/stack/stack_mgr.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/16550A.c b/kernel/drivers/xenomai/serial/16550A.c new file mode 120000 index 0000000..1a3cb18 --- /dev/null +++ b/kernel/drivers/xenomai/serial/16550A.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/16550A_io.h b/kernel/drivers/xenomai/serial/16550A_io.h new file mode 120000 index 0000000..42d9558 --- /dev/null +++ b/kernel/drivers/xenomai/serial/16550A_io.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/16550A_pci.h b/kernel/drivers/xenomai/serial/16550A_pci.h new file mode 120000 index 0000000..006336c --- /dev/null +++ b/kernel/drivers/xenomai/serial/16550A_pci.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/16550A_pnp.h b/kernel/drivers/xenomai/serial/16550A_pnp.h new file mode 120000 index 0000000..ba90fce --- /dev/null +++ b/kernel/drivers/xenomai/serial/16550A_pnp.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/Kconfig b/kernel/drivers/xenomai/serial/Kconfig new file mode 120000 index 0000000..9366f46 --- /dev/null +++ b/kernel/drivers/xenomai/serial/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/Makefile b/kernel/drivers/xenomai/serial/Makefile new file mode 120000 index 0000000..eb08892 --- /dev/null +++ b/kernel/drivers/xenomai/serial/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/mpc52xx_uart.c b/kernel/drivers/xenomai/serial/mpc52xx_uart.c new file mode 120000 index 0000000..4c548ef --- /dev/null +++ b/kernel/drivers/xenomai/serial/mpc52xx_uart.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/serial/rt_imx_uart.c b/kernel/drivers/xenomai/serial/rt_imx_uart.c new file mode 120000 index 0000000..cc37903 --- /dev/null +++ b/kernel/drivers/xenomai/serial/rt_imx_uart.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/Kconfig b/kernel/drivers/xenomai/spi/Kconfig new file mode 120000 index 0000000..d0bf131 --- /dev/null +++ b/kernel/drivers/xenomai/spi/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/Makefile b/kernel/drivers/xenomai/spi/Makefile new file mode 120000 index 0000000..a62ea3b --- /dev/null +++ b/kernel/drivers/xenomai/spi/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-bcm2835.c b/kernel/drivers/xenomai/spi/spi-bcm2835.c new file mode 120000 index 0000000..8fb2df6 --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-bcm2835.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-device.c b/kernel/drivers/xenomai/spi/spi-device.c new file mode 120000 index 0000000..47dc016 --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-device.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-device.h b/kernel/drivers/xenomai/spi/spi-device.h new file mode 120000 index 0000000..1dd400a --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-device.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-master.c b/kernel/drivers/xenomai/spi/spi-master.c new file mode 120000 index 0000000..d6f160d --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-master.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-master.h b/kernel/drivers/xenomai/spi/spi-master.h new file mode 120000 index 0000000..343d478 --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-master.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c b/kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c new file mode 120000 index 0000000..82aec04 --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/spi/spi-sun6i.c b/kernel/drivers/xenomai/spi/spi-sun6i.c new file mode 120000 index 0000000..885e4e0 --- /dev/null +++ b/kernel/drivers/xenomai/spi/spi-sun6i.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/testing/Kconfig b/kernel/drivers/xenomai/testing/Kconfig new file mode 120000 index 0000000..6c82f99 --- /dev/null +++ b/kernel/drivers/xenomai/testing/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/testing/Makefile b/kernel/drivers/xenomai/testing/Makefile new file mode 120000 index 0000000..137da61 --- /dev/null +++ b/kernel/drivers/xenomai/testing/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/testing/heapcheck.c b/kernel/drivers/xenomai/testing/heapcheck.c new file mode 120000 index 0000000..b912a63 --- /dev/null +++ b/kernel/drivers/xenomai/testing/heapcheck.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/testing/rtdmtest.c b/kernel/drivers/xenomai/testing/rtdmtest.c new file mode 120000 index 0000000..873a071 --- /dev/null +++ b/kernel/drivers/xenomai/testing/rtdmtest.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/testing/switchtest.c b/kernel/drivers/xenomai/testing/switchtest.c new file mode 120000 index 0000000..cd9cbf8 --- /dev/null +++ b/kernel/drivers/xenomai/testing/switchtest.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/testing/timerbench.c b/kernel/drivers/xenomai/testing/timerbench.c new file mode 120000 index 0000000..8568fdb --- /dev/null +++ b/kernel/drivers/xenomai/testing/timerbench.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c \ No newline at end of file diff --git a/kernel/drivers/xenomai/udd/Kconfig b/kernel/drivers/xenomai/udd/Kconfig new file mode 120000 index 0000000..1cf8e77 --- /dev/null +++ b/kernel/drivers/xenomai/udd/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig \ No newline at end of file diff --git a/kernel/drivers/xenomai/udd/Makefile b/kernel/drivers/xenomai/udd/Makefile new file mode 120000 index 0000000..daeb2ea --- /dev/null +++ b/kernel/drivers/xenomai/udd/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile \ No newline at end of file diff --git a/kernel/drivers/xenomai/udd/udd.c b/kernel/drivers/xenomai/udd/udd.c new file mode 120000 index 0000000..b5e71ff --- /dev/null +++ b/kernel/drivers/xenomai/udd/udd.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c \ No newline at end of file diff --git a/kernel/fs/eventfd.c b/kernel/fs/eventfd.c index 4a14295..cb99868 100644 --- a/kernel/fs/eventfd.c +++ b/kernel/fs/eventfd.c @@ -266,17 +266,17 @@ return sizeof(ucnt); } -static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, - loff_t *ppos) +static ssize_t eventfd_write(struct kiocb *iocb, struct iov_iter *from) { + struct file *file = iocb->ki_filp; struct eventfd_ctx *ctx = file->private_data; ssize_t res; __u64 ucnt; DECLARE_WAITQUEUE(wait, current); - if (count < sizeof(ucnt)) + if (iov_iter_count(from) < sizeof(ucnt)) return -EINVAL; - if (copy_from_user(&ucnt, buf, sizeof(ucnt))) + if (copy_from_iter(&ucnt, sizeof(ucnt), from) != sizeof(ucnt)) return -EFAULT; if (ucnt == ULLONG_MAX) return -EINVAL; @@ -333,7 +333,7 @@ .release = eventfd_release, .poll = eventfd_poll, .read_iter = eventfd_read, - .write = eventfd_write, + .write_iter = eventfd_write, .llseek = noop_llseek, }; diff --git a/kernel/fs/exec.c b/kernel/fs/exec.c index b798885..30f75c0 100644 --- a/kernel/fs/exec.c +++ b/kernel/fs/exec.c @@ -34,6 +34,7 @@ #include <linux/swap.h> #include <linux/string.h> #include <linux/init.h> +#include <linux/irq_pipeline.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/signal.h> @@ -68,6 +69,7 @@ #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlb.h> +#include <asm/dovetail.h> #include <trace/events/task.h> #include "internal.h" @@ -981,6 +983,7 @@ struct task_struct *tsk; struct mm_struct *old_mm, *active_mm; int ret; + unsigned long flags; /* Notify parent that we're no longer interested in the old VM */ tsk = current; @@ -1013,6 +1016,7 @@ local_irq_disable(); active_mm = tsk->active_mm; + protect_inband_mm(flags); tsk->active_mm = mm; tsk->mm = mm; /* @@ -1021,10 +1025,17 @@ * lazy tlb mm refcounting when these are updated by context * switches. Not all architectures can handle irqs off over * activate_mm yet. + * + * irq_pipeline: activate_mm() allowing irqs off context is a + * requirement. e.g. TLB shootdown must not involve IPIs. We + * make sure protect_inband_mm() is in effect while switching + * in and activating the new mm by forcing + * CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM on. */ if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) local_irq_enable(); activate_mm(active_mm, mm); + unprotect_inband_mm(flags); if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM)) local_irq_enable(); tsk->mm->vmacache_seqnum = 0; @@ -1302,6 +1313,9 @@ if (retval) goto out_unlock; + /* Tell Dovetail about the ongoing exec(). */ + arch_dovetail_exec_prepare(); + /* * Ensure that the uaccess routines can actually operate on userspace * pointers: diff --git a/kernel/fs/fcntl.c b/kernel/fs/fcntl.c index fcf34f8..0b131a8 100644 --- a/kernel/fs/fcntl.c +++ b/kernel/fs/fcntl.c @@ -1044,7 +1044,7 @@ * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ - BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != + BUILD_BUG_ON(22 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | __FMODE_EXEC | __FMODE_NONOTIFY)); diff --git a/kernel/fs/file.c b/kernel/fs/file.c index 97a0cd3..6d84335 100644 --- a/kernel/fs/file.c +++ b/kernel/fs/file.c @@ -429,6 +429,7 @@ if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) { + uninstall_inband_fd(i, file, files); filp_close(file, files); cond_resched(); } @@ -646,6 +647,7 @@ fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); + install_inband_fd(fd, file, files); spin_unlock(&files->file_lock); return; } @@ -654,6 +656,7 @@ fdt = rcu_dereference_sched(files->fdt); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); + install_inband_fd(fd, file, files); rcu_read_unlock_sched(); } @@ -682,6 +685,7 @@ goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); + uninstall_inband_fd(fd, file, files); out_unlock: spin_unlock(&files->file_lock); @@ -799,6 +803,8 @@ goto out_err; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); + uninstall_inband_fd(fd, file, files); + spin_unlock(&files->file_lock); get_file(file); *res = file; return 0; @@ -850,6 +856,7 @@ continue; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); + uninstall_inband_fd(fd, file, files); spin_unlock(&files->file_lock); filp_close(file, files); cond_resched(); @@ -1088,6 +1095,7 @@ __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); + replace_inband_fd(fd, file, files); spin_unlock(&files->file_lock); if (tofree) diff --git a/kernel/fs/ioctl.c b/kernel/fs/ioctl.c index ac3b386..ad1d738 100644 --- a/kernel/fs/ioctl.c +++ b/kernel/fs/ioctl.c @@ -790,6 +790,22 @@ } EXPORT_SYMBOL(compat_ptr_ioctl); +/** + * compat_ptr_oob_ioctl - generic implementation of .compat_oob_ioctl file operation + * + * The equivalent of compat_ptr_ioctl, dealing with out-of-band ioctl + * calls. Management of this handler is delegated to the code + * implementing the out-of-band ioctl() syscall in the companion core. + */ +long compat_ptr_oob_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + if (!file->f_op->oob_ioctl) + return -ENOIOCTLCMD; + + return file->f_op->oob_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +EXPORT_SYMBOL(compat_ptr_oob_ioctl); + COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { diff --git a/kernel/fs/udf/inode.c b/kernel/fs/udf/inode.c index d32b836..e94a18b 100644 --- a/kernel/fs/udf/inode.c +++ b/kernel/fs/udf/inode.c @@ -438,6 +438,12 @@ iinfo->i_next_alloc_goal++; } + /* + * Block beyond EOF and prealloc extents? Just discard preallocation + * as it is not useful and complicates things. + */ + if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents) + udf_discard_prealloc(inode); udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); if (!phys) @@ -487,8 +493,6 @@ uint32_t add; int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); struct super_block *sb = inode->i_sb; - struct kernel_lb_addr prealloc_loc = {}; - uint32_t prealloc_len = 0; struct udf_inode_info *iinfo; int err; @@ -507,19 +511,6 @@ iinfo->i_lenExtents = (iinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); - } - - /* Last extent are just preallocated blocks? */ - if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == - EXT_NOT_RECORDED_ALLOCATED) { - /* Save the extent so that we can reattach it to the end */ - prealloc_loc = last_ext->extLocation; - prealloc_len = last_ext->extLength; - /* Mark the extent as a hole */ - last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); - last_ext->extLocation.logicalBlockNum = 0; - last_ext->extLocation.partitionReferenceNum = 0; } /* Can we merge with the previous extent? */ @@ -549,7 +540,7 @@ * more extents, we may need to enter possible following * empty indirect extent. */ - if (new_block_bytes || prealloc_len) + if (new_block_bytes) udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); } @@ -583,17 +574,6 @@ } out: - /* Do we have some preallocated blocks saved? */ - if (prealloc_len) { - err = udf_add_aext(inode, last_pos, &prealloc_loc, - prealloc_len, 1); - if (err) - return err; - last_ext->extLocation = prealloc_loc; - last_ext->extLength = prealloc_len; - count++; - } - /* last_pos should point to the last written extent... */ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) last_pos->offset -= sizeof(struct short_ad); @@ -609,13 +589,17 @@ static void udf_do_extend_final_block(struct inode *inode, struct extent_position *last_pos, struct kernel_long_ad *last_ext, - uint32_t final_block_len) + uint32_t new_elen) { - struct super_block *sb = inode->i_sb; uint32_t added_bytes; - added_bytes = final_block_len - - (last_ext->extLength & (sb->s_blocksize - 1)); + /* + * Extent already large enough? It may be already rounded up to block + * size... + */ + if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) + return; + added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen; last_ext->extLength += added_bytes; UDF_I(inode)->i_lenExtents += added_bytes; @@ -632,12 +616,12 @@ int8_t etype; struct super_block *sb = inode->i_sb; sector_t first_block = newsize >> sb->s_blocksize_bits, offset; - unsigned long partial_final_block; + loff_t new_elen; int adsize; struct udf_inode_info *iinfo = UDF_I(inode); struct kernel_long_ad extent; int err = 0; - int within_final_block; + bool within_last_ext; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) adsize = sizeof(struct short_ad); @@ -646,8 +630,17 @@ else BUG(); + /* + * When creating hole in file, just don't bother with preserving + * preallocation. It likely won't be very useful anyway. + */ + udf_discard_prealloc(inode); + etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); - within_final_block = (etype != -1); + within_last_ext = (etype != -1); + /* We don't expect extents past EOF... */ + WARN_ON_ONCE(within_last_ext && + elen > ((loff_t)offset + 1) << inode->i_blkbits); if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { @@ -663,19 +656,17 @@ extent.extLength |= etype << 30; } - partial_final_block = newsize & (sb->s_blocksize - 1); + new_elen = ((loff_t)offset << inode->i_blkbits) | + (newsize & (sb->s_blocksize - 1)); /* File has extent covering the new size (could happen when extending * inside a block)? */ - if (within_final_block) { + if (within_last_ext) { /* Extending file within the last file block */ - udf_do_extend_final_block(inode, &epos, &extent, - partial_final_block); + udf_do_extend_final_block(inode, &epos, &extent, new_elen); } else { - loff_t add = ((loff_t)offset << sb->s_blocksize_bits) | - partial_final_block; - err = udf_do_extend_file(inode, &epos, &extent, add); + err = udf_do_extend_file(inode, &epos, &extent, new_elen); } if (err < 0) @@ -776,10 +767,11 @@ goto out_free; } - /* Are we beyond EOF? */ + /* Are we beyond EOF and preallocated extent? */ if (etype == -1) { int ret; loff_t hole_len; + isBeyondEOF = true; if (count) { if (c) diff --git a/kernel/fs/udf/truncate.c b/kernel/fs/udf/truncate.c index 532cda9..036ebd8 100644 --- a/kernel/fs/udf/truncate.c +++ b/kernel/fs/udf/truncate.c @@ -120,60 +120,42 @@ void udf_discard_prealloc(struct inode *inode) { - struct extent_position epos = { NULL, 0, {0, 0} }; + struct extent_position epos = {}; + struct extent_position prev_epos = {}; struct kernel_lb_addr eloc; uint32_t elen; uint64_t lbcount = 0; int8_t etype = -1, netype; - int adsize; struct udf_inode_info *iinfo = UDF_I(inode); + int bsize = 1 << inode->i_blkbits; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || - inode->i_size == iinfo->i_lenExtents) + ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize)) return; - - if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) - adsize = sizeof(struct short_ad); - else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) - adsize = sizeof(struct long_ad); - else - adsize = 0; epos.block = iinfo->i_location; /* Find the last extent in the file */ - while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { - etype = netype; + while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) { + brelse(prev_epos.bh); + prev_epos = epos; + if (prev_epos.bh) + get_bh(prev_epos.bh); + + etype = udf_next_aext(inode, &epos, &eloc, &elen, 1); lbcount += elen; } if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { - epos.offset -= adsize; lbcount -= elen; - extent_trunc(inode, &epos, &eloc, etype, elen, 0); - if (!epos.bh) { - iinfo->i_lenAlloc = - epos.offset - - udf_file_entry_alloc_offset(inode); - mark_inode_dirty(inode); - } else { - struct allocExtDesc *aed = - (struct allocExtDesc *)(epos.bh->b_data); - aed->lengthAllocDescs = - cpu_to_le32(epos.offset - - sizeof(struct allocExtDesc)); - if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || - UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) - udf_update_tag(epos.bh->b_data, epos.offset); - else - udf_update_tag(epos.bh->b_data, - sizeof(struct allocExtDesc)); - mark_buffer_dirty_inode(epos.bh, inode); - } + udf_delete_aext(inode, prev_epos); + udf_free_blocks(inode->i_sb, inode, &eloc, 0, + DIV_ROUND_UP(elen, 1 << inode->i_blkbits)); } /* This inode entry is in-memory only and thus we don't have to mark * the inode dirty */ iinfo->i_lenExtents = lbcount; brelse(epos.bh); + brelse(prev_epos.bh); } static void udf_update_alloc_ext_desc(struct inode *inode, diff --git a/kernel/include/asm-generic/atomic.h b/kernel/include/asm-generic/atomic.h index 11f96f4..34100ba 100644 --- a/kernel/include/asm-generic/atomic.h +++ b/kernel/include/asm-generic/atomic.h @@ -76,9 +76,9 @@ { \ unsigned long flags; \ \ - raw_local_irq_save(flags); \ + flags = hard_local_irq_save(); \ v->counter = v->counter c_op i; \ - raw_local_irq_restore(flags); \ + hard_local_irq_restore(flags); \ } #define ATOMIC_OP_RETURN(op, c_op) \ @@ -87,9 +87,9 @@ unsigned long flags; \ int ret; \ \ - raw_local_irq_save(flags); \ + flags = hard_local_irq_save(); \ ret = (v->counter = v->counter c_op i); \ - raw_local_irq_restore(flags); \ + hard_local_irq_restore(flags); \ \ return ret; \ } @@ -100,10 +100,10 @@ unsigned long flags; \ int ret; \ \ - raw_local_irq_save(flags); \ + flags = hard_local_irq_save(); \ ret = v->counter; \ v->counter = v->counter c_op i; \ - raw_local_irq_restore(flags); \ + hard_local_irq_restore(flags); \ \ return ret; \ } diff --git a/kernel/include/asm-generic/cmpxchg-local.h b/kernel/include/asm-generic/cmpxchg-local.h index f17f14f..67d712f 100644 --- a/kernel/include/asm-generic/cmpxchg-local.h +++ b/kernel/include/asm-generic/cmpxchg-local.h @@ -23,7 +23,7 @@ if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); - raw_local_irq_save(flags); + flags = hard_local_irq_save(); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) @@ -44,7 +44,7 @@ default: wrong_size_cmpxchg(ptr); } - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return prev; } @@ -57,11 +57,11 @@ u64 prev; unsigned long flags; - raw_local_irq_save(flags); + flags = hard_local_irq_save(); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return prev; } diff --git a/kernel/include/asm-generic/cmpxchg.h b/kernel/include/asm-generic/cmpxchg.h index 9a24510..475206b 100644 --- a/kernel/include/asm-generic/cmpxchg.h +++ b/kernel/include/asm-generic/cmpxchg.h @@ -32,10 +32,10 @@ #ifdef __xchg_u8 return __xchg_u8(x, ptr); #else - local_irq_save(flags); + flags = hard_local_irq_save(); ret = *(volatile u8 *)ptr; *(volatile u8 *)ptr = x; - local_irq_restore(flags); + hard_local_irq_restore(flags); return ret; #endif /* __xchg_u8 */ @@ -43,10 +43,10 @@ #ifdef __xchg_u16 return __xchg_u16(x, ptr); #else - local_irq_save(flags); + flags = hard_local_irq_save(); ret = *(volatile u16 *)ptr; *(volatile u16 *)ptr = x; - local_irq_restore(flags); + hard_local_irq_restore(flags); return ret; #endif /* __xchg_u16 */ @@ -54,10 +54,10 @@ #ifdef __xchg_u32 return __xchg_u32(x, ptr); #else - local_irq_save(flags); + flags = hard_local_irq_save(); ret = *(volatile u32 *)ptr; *(volatile u32 *)ptr = x; - local_irq_restore(flags); + hard_local_irq_restore(flags); return ret; #endif /* __xchg_u32 */ @@ -66,10 +66,10 @@ #ifdef __xchg_u64 return __xchg_u64(x, ptr); #else - local_irq_save(flags); + flags = hard_local_irq_save(); ret = *(volatile u64 *)ptr; *(volatile u64 *)ptr = x; - local_irq_restore(flags); + hard_local_irq_restore(flags); return ret; #endif /* __xchg_u64 */ #endif /* CONFIG_64BIT */ diff --git a/kernel/include/asm-generic/irq_pipeline.h b/kernel/include/asm-generic/irq_pipeline.h new file mode 100644 index 0000000..0f81ed0 --- /dev/null +++ b/kernel/include/asm-generic/irq_pipeline.h @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef __ASM_GENERIC_IRQ_PIPELINE_H +#define __ASM_GENERIC_IRQ_PIPELINE_H + +#include <linux/kconfig.h> +#include <linux/types.h> + +#ifdef CONFIG_IRQ_PIPELINE + +unsigned long inband_irq_save(void); +void inband_irq_restore(unsigned long flags); +void inband_irq_enable(void); +void inband_irq_disable(void); +int inband_irqs_disabled(void); + +#define hard_cond_local_irq_enable() hard_local_irq_enable() +#define hard_cond_local_irq_disable() hard_local_irq_disable() +#define hard_cond_local_irq_save() hard_local_irq_save() +#define hard_cond_local_irq_restore(__flags) hard_local_irq_restore(__flags) + +#define hard_local_irq_save() native_irq_save() +#define hard_local_irq_restore(__flags) native_irq_restore(__flags) +#define hard_local_irq_enable() native_irq_enable() +#define hard_local_irq_disable() native_irq_disable() +#define hard_local_save_flags() native_save_flags() + +#define hard_irqs_disabled() native_irqs_disabled() +#define hard_irqs_disabled_flags(__flags) native_irqs_disabled_flags(__flags) + +void irq_pipeline_nmi_enter(void); +void irq_pipeline_nmi_exit(void); + +/* Swap then merge virtual and hardware interrupt states. */ +#define irqs_merge_flags(__flags, __stalled) \ + ({ \ + unsigned long __combo = \ + arch_irqs_virtual_to_native_flags(__stalled) | \ + arch_irqs_native_to_virtual_flags(__flags); \ + __combo; \ + }) + +/* Extract swap virtual and hardware interrupt states. */ +#define irqs_split_flags(__combo, __stall_r) \ + ({ \ + unsigned long __virt = (__combo); \ + *(__stall_r) = hard_irqs_disabled_flags(__combo); \ + __virt &= ~arch_irqs_virtual_to_native_flags(*(__stall_r)); \ + arch_irqs_virtual_to_native_flags(__virt); \ + }) + +#define hard_local_irq_sync() native_irq_sync() + +#else /* !CONFIG_IRQ_PIPELINE */ + +#define hard_local_save_flags() ({ unsigned long __flags; \ + raw_local_save_flags(__flags); __flags; }) +#define hard_local_irq_enable() raw_local_irq_enable() +#define hard_local_irq_disable() raw_local_irq_disable() +#define hard_local_irq_save() ({ unsigned long __flags; \ + raw_local_irq_save(__flags); __flags; }) +#define hard_local_irq_restore(__flags) raw_local_irq_restore(__flags) + +#define hard_cond_local_irq_enable() do { } while(0) +#define hard_cond_local_irq_disable() do { } while(0) +#define hard_cond_local_irq_save() 0 +#define hard_cond_local_irq_restore(__flags) do { (void)(__flags); } while(0) + +#define hard_irqs_disabled() irqs_disabled() +#define hard_irqs_disabled_flags(__flags) raw_irqs_disabled_flags(__flags) + +static inline void irq_pipeline_nmi_enter(void) { } +static inline void irq_pipeline_nmi_exit(void) { } + +#define hard_local_irq_sync() do { } while (0) + +#endif /* !CONFIG_IRQ_PIPELINE */ + +#ifdef CONFIG_DEBUG_IRQ_PIPELINE +void check_inband_stage(void); +#define check_hard_irqs_disabled() \ + WARN_ON_ONCE(!hard_irqs_disabled()) +#else +static inline void check_inband_stage(void) { } +static inline int check_hard_irqs_disabled(void) { return 0; } +#endif + +extern bool irq_pipeline_oopsing; + +static __always_inline bool irqs_pipelined(void) +{ + return IS_ENABLED(CONFIG_IRQ_PIPELINE); +} + +static __always_inline bool irq_pipeline_debug(void) +{ + return IS_ENABLED(CONFIG_DEBUG_IRQ_PIPELINE) && + !irq_pipeline_oopsing; +} + +static __always_inline bool irq_pipeline_debug_locking(void) +{ + return IS_ENABLED(CONFIG_DEBUG_HARD_LOCKS); +} + +#endif /* __ASM_GENERIC_IRQ_PIPELINE_H */ diff --git a/kernel/include/asm-generic/percpu.h b/kernel/include/asm-generic/percpu.h index 6432a7f..8a35f48 100644 --- a/kernel/include/asm-generic/percpu.h +++ b/kernel/include/asm-generic/percpu.h @@ -125,9 +125,9 @@ ({ \ typeof(pcp) ___ret; \ unsigned long ___flags; \ - raw_local_irq_save(___flags); \ + ___flags = hard_local_irq_save(); \ ___ret = raw_cpu_generic_read(pcp); \ - raw_local_irq_restore(___flags); \ + hard_local_irq_restore(___flags); \ ___ret; \ }) @@ -144,9 +144,9 @@ #define this_cpu_generic_to_op(pcp, val, op) \ do { \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __flags = hard_local_irq_save(); \ raw_cpu_generic_to_op(pcp, val, op); \ - raw_local_irq_restore(__flags); \ + hard_local_irq_restore(__flags); \ } while (0) @@ -154,9 +154,9 @@ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __flags = hard_local_irq_save(); \ __ret = raw_cpu_generic_add_return(pcp, val); \ - raw_local_irq_restore(__flags); \ + hard_local_irq_restore(__flags); \ __ret; \ }) @@ -164,9 +164,9 @@ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __flags = hard_local_irq_save(); \ __ret = raw_cpu_generic_xchg(pcp, nval); \ - raw_local_irq_restore(__flags); \ + hard_local_irq_restore(__flags); \ __ret; \ }) @@ -174,9 +174,9 @@ ({ \ typeof(pcp) __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __flags = hard_local_irq_save(); \ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ - raw_local_irq_restore(__flags); \ + hard_local_irq_restore(__flags); \ __ret; \ }) @@ -184,10 +184,10 @@ ({ \ int __ret; \ unsigned long __flags; \ - raw_local_irq_save(__flags); \ + __flags = hard_local_irq_save(); \ __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ oval1, oval2, nval1, nval2); \ - raw_local_irq_restore(__flags); \ + hard_local_irq_restore(__flags); \ __ret; \ }) diff --git a/kernel/include/asm-generic/xenomai/dovetail/thread.h b/kernel/include/asm-generic/xenomai/dovetail/thread.h new file mode 120000 index 0000000..3e37870 --- /dev/null +++ b/kernel/include/asm-generic/xenomai/dovetail/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h \ No newline at end of file diff --git a/kernel/include/asm-generic/xenomai/ipipe/thread.h b/kernel/include/asm-generic/xenomai/ipipe/thread.h new file mode 120000 index 0000000..e113f79 --- /dev/null +++ b/kernel/include/asm-generic/xenomai/ipipe/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h \ No newline at end of file diff --git a/kernel/include/asm-generic/xenomai/machine.h b/kernel/include/asm-generic/xenomai/machine.h new file mode 120000 index 0000000..0b119c7 --- /dev/null +++ b/kernel/include/asm-generic/xenomai/machine.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h \ No newline at end of file diff --git a/kernel/include/asm-generic/xenomai/pci_ids.h b/kernel/include/asm-generic/xenomai/pci_ids.h new file mode 120000 index 0000000..fb9916d --- /dev/null +++ b/kernel/include/asm-generic/xenomai/pci_ids.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h \ No newline at end of file diff --git a/kernel/include/asm-generic/xenomai/syscall.h b/kernel/include/asm-generic/xenomai/syscall.h new file mode 120000 index 0000000..7f6597c --- /dev/null +++ b/kernel/include/asm-generic/xenomai/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h \ No newline at end of file diff --git a/kernel/include/asm-generic/xenomai/syscall32.h b/kernel/include/asm-generic/xenomai/syscall32.h new file mode 120000 index 0000000..8f1d676 --- /dev/null +++ b/kernel/include/asm-generic/xenomai/syscall32.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h \ No newline at end of file diff --git a/kernel/include/asm-generic/xenomai/wrappers.h b/kernel/include/asm-generic/xenomai/wrappers.h new file mode 120000 index 0000000..07d4764 --- /dev/null +++ b/kernel/include/asm-generic/xenomai/wrappers.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h \ No newline at end of file diff --git a/kernel/include/dovetail/irq.h b/kernel/include/dovetail/irq.h new file mode 100644 index 0000000..ac8b531 --- /dev/null +++ b/kernel/include/dovetail/irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DOVETAIL_IRQ_H +#define _DOVETAIL_IRQ_H + +/* Placeholders for pre- and post-IRQ handling. */ + +static inline void irq_enter_pipeline(void) { } + +static inline void irq_exit_pipeline(void) { } + +#endif /* !_DOVETAIL_IRQ_H */ diff --git a/kernel/include/dovetail/mm_info.h b/kernel/include/dovetail/mm_info.h new file mode 100644 index 0000000..504bd1d --- /dev/null +++ b/kernel/include/dovetail/mm_info.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DOVETAIL_MM_INFO_H +#define _DOVETAIL_MM_INFO_H + +/* + * Placeholder for per-mm state information defined by the co-kernel. + */ + +struct oob_mm_state { +}; + +#endif /* !_DOVETAIL_MM_INFO_H */ diff --git a/kernel/include/dovetail/netdevice.h b/kernel/include/dovetail/netdevice.h new file mode 100644 index 0000000..06e8205 --- /dev/null +++ b/kernel/include/dovetail/netdevice.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DOVETAIL_NETDEVICE_H +#define _DOVETAIL_NETDEVICE_H + +/* + * Placeholder for per-device state information defined by the + * out-of-band network stack. + */ + +struct oob_netdev_state { +}; + +#endif /* !_DOVETAIL_NETDEVICE_H */ diff --git a/kernel/include/dovetail/poll.h b/kernel/include/dovetail/poll.h new file mode 100644 index 0000000..d15b14f --- /dev/null +++ b/kernel/include/dovetail/poll.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DOVETAIL_POLL_H +#define _DOVETAIL_POLL_H + +/* + * Placeholder for the out-of-band poll operation descriptor. + */ + +struct oob_poll_wait { +}; + +#endif /* !_DOVETAIL_POLL_H */ diff --git a/kernel/include/dovetail/spinlock.h b/kernel/include/dovetail/spinlock.h new file mode 100644 index 0000000..381031a --- /dev/null +++ b/kernel/include/dovetail/spinlock.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DOVETAIL_SPINLOCK_H +#define _DOVETAIL_SPINLOCK_H + +/* Placeholders for hard/hybrid spinlock modifiers. */ + +struct raw_spinlock; + +static inline void hard_spin_lock_prepare(struct raw_spinlock *lock) +{ } + +static inline void hard_spin_unlock_finish(struct raw_spinlock *lock) +{ } + +static inline void hard_spin_trylock_prepare(struct raw_spinlock *lock) +{ } + +static inline void hard_spin_trylock_fail(struct raw_spinlock *lock) +{ } + +#endif /* !_DOVETAIL_SPINLOCK_H */ diff --git a/kernel/include/dovetail/thread_info.h b/kernel/include/dovetail/thread_info.h new file mode 100644 index 0000000..4dea8bf --- /dev/null +++ b/kernel/include/dovetail/thread_info.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DOVETAIL_THREAD_INFO_H +#define _DOVETAIL_THREAD_INFO_H + +/* + * Placeholder for per-thread state information defined by the + * co-kernel. + */ + +struct oob_thread_state { +}; + +#endif /* !_DOVETAIL_THREAD_INFO_H */ diff --git a/kernel/include/linux/clockchips.h b/kernel/include/linux/clockchips.h index 8ae9a95..bda5d7d 100644 --- a/kernel/include/linux/clockchips.h +++ b/kernel/include/linux/clockchips.h @@ -15,6 +15,7 @@ # include <linux/cpumask.h> # include <linux/ktime.h> # include <linux/notifier.h> +# include <linux/irqstage.h> struct clock_event_device; struct module; @@ -31,6 +32,7 @@ * from DETACHED or SHUTDOWN. * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily * stopped. + * RESERVED: Device is controlled by an out-of-band core via a proxy. */ enum clock_event_state { CLOCK_EVT_STATE_DETACHED, @@ -38,6 +40,7 @@ CLOCK_EVT_STATE_PERIODIC, CLOCK_EVT_STATE_ONESHOT, CLOCK_EVT_STATE_ONESHOT_STOPPED, + CLOCK_EVT_STATE_RESERVED, }; /* @@ -67,6 +70,17 @@ */ # define CLOCK_EVT_FEAT_HRTIMER 0x000080 +/* + * Interrupt pipeline support: + * + * - Clockevent device can work with pipelined timer events (i.e. proxied). + * - Device currently delivers high-precision events via out-of-band interrupts. + * - Device acts as a proxy for timer interrupt pipelining. + */ +# define CLOCK_EVT_FEAT_PIPELINE 0x000100 +# define CLOCK_EVT_FEAT_OOB 0x000200 +# define CLOCK_EVT_FEAT_PROXY 0x000400 + /** * struct clock_event_device - clock event device descriptor * @event_handler: Assigned by the framework to be called by the low @@ -91,7 +105,7 @@ * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration * @name: ptr to clock event name * @rating: variable to rate clock event devices - * @irq: IRQ number (only for non CPU local devices) + * @irq: IRQ number (only for non CPU local devices, or pipelined timers) * @bound_on: Bound on CPU * @cpumask: cpumask to indicate for which CPUs this device works * @list: list head for the management code @@ -137,6 +151,11 @@ return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; } +static inline bool clockevent_state_reserved(struct clock_event_device *dev) +{ + return dev->state_use_accessors == CLOCK_EVT_STATE_RESERVED; +} + static inline bool clockevent_state_shutdown(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; @@ -155,6 +174,11 @@ static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) { return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; +} + +static inline bool clockevent_is_oob(struct clock_event_device *dev) +{ + return !!(dev->features & CLOCK_EVT_FEAT_OOB); } /* @@ -186,6 +210,8 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta); +extern void clockevents_switch_state(struct clock_event_device *dev, + enum clock_event_state state); extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); @@ -215,6 +241,49 @@ static inline void tick_setup_hrtimer_broadcast(void) { } # endif +#ifdef CONFIG_IRQ_PIPELINE + +struct clock_proxy_device { + struct clock_event_device proxy_device; + struct clock_event_device *real_device; + void (*handle_oob_event)(struct clock_event_device *dev); + void (*__setup_handler)(struct clock_proxy_device *dev); + void (*__original_handler)(struct clock_event_device *dev); +}; + +void tick_notify_proxy(void); + +static inline +void clockevents_handle_event(struct clock_event_device *ced) +{ + /* + * If called from the in-band stage, or for delivering a + * high-precision timer event to the out-of-band stage, call + * the event handler immediately. + * + * Otherwise, ced is still the in-band tick device for the + * current CPU, so just relay the incoming tick to the in-band + * stage via tick_notify_proxy(). This situation can happen + * when all CPUs receive the same out-of-band IRQ from a given + * clock event device, but only a subset of the online CPUs has + * enabled a proxy. + */ + if (clockevent_is_oob(ced) || running_inband()) + ced->event_handler(ced); + else + tick_notify_proxy(); +} + +#else + +static inline +void clockevents_handle_event(struct clock_event_device *ced) +{ + ced->event_handler(ced); +} + +#endif /* !CONFIG_IRQ_PIPELINE */ + #else /* !CONFIG_GENERIC_CLOCKEVENTS: */ static inline void clockevents_suspend(void) { } diff --git a/kernel/include/linux/clocksource.h b/kernel/include/linux/clocksource.h index 83a3ebf..9665974 100644 --- a/kernel/include/linux/clocksource.h +++ b/kernel/include/linux/clocksource.h @@ -13,12 +13,15 @@ #include <linux/timex.h> #include <linux/time.h> #include <linux/list.h> +#include <linux/hashtable.h> #include <linux/cache.h> #include <linux/timer.h> +#include <linux/cdev.h> #include <linux/init.h> #include <linux/of.h> #include <asm/div64.h> #include <asm/io.h> +#include <uapi/linux/clocksource.h> struct clocksource; struct module; @@ -28,7 +31,14 @@ #include <asm/clocksource.h> #endif + #include <vdso/clocksource.h> + +enum clocksource_vdso_type { + CLOCKSOURCE_VDSO_NONE = 0, + CLOCKSOURCE_VDSO_ARCHITECTED, + CLOCKSOURCE_VDSO_MMIO, /* <= Must be last. */ +}; /** * struct clocksource - hardware abstraction for a free running counter @@ -101,6 +111,7 @@ struct list_head list; int rating; enum vdso_clock_mode vdso_clock_mode; + enum clocksource_vdso_type vdso_type; unsigned long flags; int (*enable)(struct clocksource *cs); @@ -118,6 +129,36 @@ u64 wd_last; #endif struct module *owner; +}; + +struct clocksource_mmio { + void __iomem *reg; + struct clocksource clksrc; +}; + +struct clocksource_user_mmio { + struct clocksource_mmio mmio; + void __iomem *reg_upper; + unsigned int bits_lower; + unsigned int mask_lower; + unsigned int mask_upper; + enum clksrc_user_mmio_type type; + unsigned long phys_lower; + unsigned long phys_upper; + unsigned int id; + struct device *dev; + struct cdev cdev; + DECLARE_HASHTABLE(mappings, 10); + struct spinlock lock; + struct list_head link; +}; + +struct clocksource_mmio_regs { + void __iomem *reg_upper; + void __iomem *reg_lower; + unsigned int bits_upper; + unsigned int bits_lower; + unsigned long (*revmap)(void *); }; /* @@ -264,10 +305,21 @@ extern u64 clocksource_mmio_readl_down(struct clocksource *); extern u64 clocksource_mmio_readw_up(struct clocksource *); extern u64 clocksource_mmio_readw_down(struct clocksource *); +extern u64 clocksource_dual_mmio_readw_up(struct clocksource *); +extern u64 clocksource_dual_mmio_readl_up(struct clocksource *); extern int clocksource_mmio_init(void __iomem *, const char *, unsigned long, int, unsigned, u64 (*)(struct clocksource *)); +extern int clocksource_user_mmio_init(struct clocksource_user_mmio *ucs, + const struct clocksource_mmio_regs *regs, + unsigned long hz); + +extern int clocksource_user_single_mmio_init( + void __iomem *base, const char *name, + unsigned long hz, int rating, unsigned int bits, + u64 (*read)(struct clocksource *)); + extern int clocksource_i8253_init(void); #define TIMER_OF_DECLARE(name, compat, fn) \ diff --git a/kernel/include/linux/console.h b/kernel/include/linux/console.h index 4b1e26c..1413a45 100644 --- a/kernel/include/linux/console.h +++ b/kernel/include/linux/console.h @@ -141,6 +141,7 @@ struct console { char name[16]; void (*write)(struct console *, const char *, unsigned); + void (*write_raw)(struct console *, const char *, unsigned); int (*read)(struct console *, char *, unsigned); struct tty_driver *(*device)(struct console *, int *); void (*unblank)(void); diff --git a/kernel/include/linux/context_tracking_state.h b/kernel/include/linux/context_tracking_state.h index 65a60d3..814b57d 100644 --- a/kernel/include/linux/context_tracking_state.h +++ b/kernel/include/linux/context_tracking_state.h @@ -28,7 +28,7 @@ static __always_inline bool context_tracking_enabled(void) { - return static_branch_unlikely(&context_tracking_key); + return static_branch_unlikely(&context_tracking_key) && running_inband(); } static __always_inline bool context_tracking_enabled_cpu(int cpu) diff --git a/kernel/include/linux/dmaengine.h b/kernel/include/linux/dmaengine.h index 08537ef..e8bc400 100644 --- a/kernel/include/linux/dmaengine.h +++ b/kernel/include/linux/dmaengine.h @@ -62,6 +62,7 @@ DMA_ASYNC_TX, DMA_SLAVE, DMA_CYCLIC, + DMA_OOB, DMA_INTERLEAVE, DMA_COMPLETION_NO_ORDER, DMA_REPEAT, @@ -191,6 +192,13 @@ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction * to never be processed and stay in the issued queue forever. The flag is * ignored if the previous transaction is not a repeated transaction. + * @DMA_OOB_INTERRUPT - if DMA_OOB is supported, handle the completion + * interrupt for this transaction from the out-of-band stage (implies + * DMA_PREP_INTERRUPT). This includes calling the completion callback routine + * from such context if defined for the transaction. + * @DMA_OOB_PULSE - if DMA_OOB is supported, (slave) transactions on the + * out-of-band channel should be triggered manually by a call to + * dma_pulse_oob() (implies DMA_OOB_INTERRUPT). */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), @@ -203,6 +211,8 @@ DMA_PREP_CMD = (1 << 7), DMA_PREP_REPEAT = (1 << 8), DMA_PREP_LOAD_EOT = (1 << 9), + DMA_OOB_INTERRUPT = (1 << 10), + DMA_OOB_PULSE = (1 << 11), }; /** @@ -940,6 +950,7 @@ dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); + int (*device_pulse_oob)(struct dma_chan *chan); void (*device_release)(struct dma_device *dev); /* debugfs support */ #ifdef CONFIG_DEBUG_FS @@ -983,11 +994,22 @@ dir, flags, NULL); } +static inline bool dmaengine_oob_valid(struct dma_chan *chan, + unsigned long flags) +{ + return !(dovetailing() && + flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE) && + !test_bit(DMA_OOB, chan->device->cap_mask.bits)); +} + static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_slave_sg) + return NULL; + + if (!dmaengine_oob_valid(chan, flags)) return NULL; return chan->device->device_prep_slave_sg(chan, sgl, sg_len, @@ -1015,6 +1037,9 @@ unsigned long flags) { if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic) + return NULL; + + if (!dmaengine_oob_valid(chan, flags)) return NULL; return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, @@ -1422,6 +1447,22 @@ } /** + * dma_pulse_oob - manual trigger of an out-of-band transaction + * @chan: target DMA channel + * + * Trigger the next out-of-band transaction immediately. + */ +static inline int dma_pulse_oob(struct dma_chan *chan) +{ + int ret = -ENOTSUPP; + + if (chan->device->device_pulse_oob) + ret = chan->device->device_pulse_oob(chan); + + return ret; +} + +/** * dma_async_is_tx_complete - poll for transaction completion * @chan: DMA channel * @cookie: transaction identifier to check status of diff --git a/kernel/include/linux/dovetail.h b/kernel/include/linux/dovetail.h new file mode 100644 index 0000000..9dcbfc5 --- /dev/null +++ b/kernel/include/linux/dovetail.h @@ -0,0 +1,325 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef _LINUX_DOVETAIL_H +#define _LINUX_DOVETAIL_H + +#ifdef CONFIG_DOVETAIL + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/thread_info.h> +#include <linux/irqstage.h> +#include <uapi/asm-generic/dovetail.h> +#include <asm/dovetail.h> + +struct pt_regs; +struct task_struct; +struct file; +struct files_struct; + +enum inband_event_type { + INBAND_TASK_SIGNAL, + INBAND_TASK_MIGRATION, + INBAND_TASK_EXIT, + INBAND_TASK_RETUSER, + INBAND_TASK_PTSTEP, + INBAND_TASK_PTSTOP, + INBAND_TASK_PTCONT, + INBAND_PROCESS_CLEANUP, +}; + +struct dovetail_migration_data { + struct task_struct *task; + int dest_cpu; +}; + +struct dovetail_altsched_context { + struct task_struct *task; + struct mm_struct *active_mm; + bool borrowed_mm; +}; + +#define protect_inband_mm(__flags) \ + do { \ + (__flags) = hard_cond_local_irq_save(); \ + barrier(); \ + } while (0) \ + +#define unprotect_inband_mm(__flags) \ + do { \ + barrier(); \ + hard_cond_local_irq_restore(__flags); \ + } while (0) \ + +void inband_task_init(struct task_struct *p); + +int pipeline_syscall(unsigned int nr, struct pt_regs *regs); + +void __oob_trap_notify(unsigned int exception, + struct pt_regs *regs); + +static __always_inline void oob_trap_notify(unsigned int exception, + struct pt_regs *regs) +{ + if (running_oob() && !test_thread_local_flags(_TLF_OOBTRAP)) + __oob_trap_notify(exception, regs); +} + +void __oob_trap_unwind(unsigned int exception, + struct pt_regs *regs); + +static __always_inline void oob_trap_unwind(unsigned int exception, + struct pt_regs *regs) +{ + if (test_thread_local_flags(_TLF_OOBTRAP)) + __oob_trap_unwind(exception, regs); +} + +void inband_event_notify(enum inband_event_type, + void *data); + +void inband_clock_was_set(void); + +static inline void inband_signal_notify(struct task_struct *p) +{ + if (test_ti_local_flags(task_thread_info(p), _TLF_DOVETAIL)) + inband_event_notify(INBAND_TASK_SIGNAL, p); +} + +static inline void inband_migration_notify(struct task_struct *p, int cpu) +{ + if (test_ti_local_flags(task_thread_info(p), _TLF_DOVETAIL)) { + struct dovetail_migration_data d = { + .task = p, + .dest_cpu = cpu, + }; + inband_event_notify(INBAND_TASK_MIGRATION, &d); + } +} + +static inline void inband_exit_notify(void) +{ + inband_event_notify(INBAND_TASK_EXIT, NULL); +} + +static inline void inband_cleanup_notify(struct mm_struct *mm) +{ + /* + * Notify regardless of _TLF_DOVETAIL: current may have + * resources to clean up although it might not be interested + * in other kernel events. + */ + inband_event_notify(INBAND_PROCESS_CLEANUP, mm); +} + +static inline void inband_ptstop_notify(void) +{ + if (test_thread_local_flags(_TLF_DOVETAIL)) + inband_event_notify(INBAND_TASK_PTSTOP, current); +} + +static inline void inband_ptcont_notify(void) +{ + if (test_thread_local_flags(_TLF_DOVETAIL)) + inband_event_notify(INBAND_TASK_PTCONT, current); +} + +static inline void inband_ptstep_notify(struct task_struct *tracee) +{ + if (test_ti_local_flags(task_thread_info(tracee), _TLF_DOVETAIL)) + inband_event_notify(INBAND_TASK_PTSTEP, tracee); +} + +static inline +void prepare_inband_switch(struct task_struct *next) +{ + struct task_struct *prev = current; + + if (test_ti_local_flags(task_thread_info(next), _TLF_DOVETAIL)) + __this_cpu_write(irq_pipeline.rqlock_owner, prev); +} + +void inband_retuser_notify(void); + +bool inband_switch_tail(void); + +void oob_trampoline(void); + +void arch_inband_task_init(struct task_struct *p); + +int dovetail_start(void); + +void dovetail_stop(void); + +void dovetail_init_altsched(struct dovetail_altsched_context *p); + +void dovetail_start_altsched(void); + +void dovetail_stop_altsched(void); + +__must_check int dovetail_leave_inband(void); + +static inline void dovetail_leave_oob(void) +{ + clear_thread_local_flags(_TLF_OOB|_TLF_OFFSTAGE); + clear_thread_flag(TIF_MAYDAY); +} + +void dovetail_resume_inband(void); + +bool dovetail_context_switch(struct dovetail_altsched_context *out, + struct dovetail_altsched_context *in, + bool leave_inband); + +static inline +struct oob_thread_state *dovetail_current_state(void) +{ + return ¤t_thread_info()->oob_state; +} + +static inline +struct oob_thread_state *dovetail_task_state(struct task_struct *p) +{ + return &task_thread_info(p)->oob_state; +} + +static inline +struct oob_mm_state *dovetail_mm_state(void) +{ + if (current->flags & PF_KTHREAD) + return NULL; + + return ¤t->mm->oob_state; +} + +void dovetail_call_mayday(struct pt_regs *regs); + +static inline void dovetail_send_mayday(struct task_struct *castaway) +{ + struct thread_info *ti = task_thread_info(castaway); + + if (test_ti_local_flags(ti, _TLF_DOVETAIL)) + set_ti_thread_flag(ti, TIF_MAYDAY); +} + +static inline void dovetail_request_ucall(struct task_struct *task) +{ + struct thread_info *ti = task_thread_info(task); + + if (test_ti_local_flags(ti, _TLF_DOVETAIL)) + set_ti_thread_flag(ti, TIF_RETUSER); +} + +static inline void dovetail_clear_ucall(void) +{ + if (test_thread_flag(TIF_RETUSER)) + clear_thread_flag(TIF_RETUSER); +} + +void install_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files); + +void uninstall_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files); + +void replace_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files); + +#else /* !CONFIG_DOVETAIL */ + +/* We may have arch-specific placeholders. */ +#include <asm/dovetail.h> + +struct files_struct; + +#define protect_inband_mm(__flags) \ + do { (void)(__flags); } while (0) + +#define unprotect_inband_mm(__flags) \ + do { (void)(__flags); } while (0) + +static inline +void inband_task_init(struct task_struct *p) { } + +static inline void arch_dovetail_exec_prepare(void) +{ } + +/* + * Keep the trap helpers as macros, we might not be able to resolve + * trap numbers if CONFIG_DOVETAIL is off. + */ +#define oob_trap_notify(__exception, __regs) do { } while (0) +#define oob_trap_unwind(__exception, __regs) do { } while (0) + +static inline +int pipeline_syscall(unsigned int nr, struct pt_regs *regs) +{ + return 0; +} + +static inline void inband_signal_notify(struct task_struct *p) { } + +static inline +void inband_migration_notify(struct task_struct *p, int cpu) { } + +static inline void inband_exit_notify(void) { } + +static inline void inband_cleanup_notify(struct mm_struct *mm) { } + +static inline void inband_retuser_notify(void) { } + +static inline void inband_ptstop_notify(void) { } + +static inline void inband_ptcont_notify(void) { } + +static inline void inband_ptstep_notify(struct task_struct *tracee) { } + +static inline void oob_trampoline(void) { } + +static inline void prepare_inband_switch(struct task_struct *next) { } + +static inline bool inband_switch_tail(void) +{ + /* Matches converse disabling in prepare_task_switch(). */ + hard_cond_local_irq_enable(); + return false; +} + +static inline void dovetail_request_ucall(struct task_struct *task) { } + +static inline void dovetail_clear_ucall(void) { } + +static inline void inband_clock_was_set(void) { } + +static inline +void install_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files) { } + +static inline +void uninstall_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files) { } + +static inline +void replace_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files) { } + +#endif /* !CONFIG_DOVETAIL */ + +static __always_inline bool dovetailing(void) +{ + return IS_ENABLED(CONFIG_DOVETAIL); +} + +static __always_inline bool dovetail_debug(void) +{ + return IS_ENABLED(CONFIG_DEBUG_DOVETAIL); +} + +#ifndef arch_dovetail_is_syscall +#define arch_dovetail_is_syscall(__nr) ((__nr) == __NR_prctl) +#endif + +#endif /* _LINUX_DOVETAIL_H */ diff --git a/kernel/include/linux/dw_apb_timer.h b/kernel/include/linux/dw_apb_timer.h index 82ebf92..d69dbd0 100644 --- a/kernel/include/linux/dw_apb_timer.h +++ b/kernel/include/linux/dw_apb_timer.h @@ -30,7 +30,7 @@ struct dw_apb_clocksource { struct dw_apb_timer timer; - struct clocksource cs; + struct clocksource_user_mmio ummio; }; void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); diff --git a/kernel/include/linux/entry-common.h b/kernel/include/linux/entry-common.h index 46c4247..3d7d78c 100644 --- a/kernel/include/linux/entry-common.h +++ b/kernel/include/linux/entry-common.h @@ -72,6 +72,14 @@ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) +/* + * Status codes of syscall entry when Dovetail is enabled. Must not + * conflict with valid syscall numbers. And with -1 which seccomp uses + * to skip an syscall. + */ +#define EXIT_SYSCALL_OOB (-2) +#define EXIT_SYSCALL_TAIL (-3) + /** * arch_check_user_regs - Architecture specific sanity check for user mode regs * @regs: Pointer to currents pt_regs @@ -181,7 +189,7 @@ #ifndef local_irq_enable_exit_to_user static inline void local_irq_enable_exit_to_user(unsigned long ti_work) { - local_irq_enable(); + local_irq_enable_full(); } #endif @@ -196,7 +204,7 @@ #ifndef local_irq_disable_exit_to_user static inline void local_irq_disable_exit_to_user(void) { - local_irq_disable(); + local_irq_disable_full(); } #endif @@ -341,6 +349,12 @@ */ void irqentry_exit_to_user_mode(struct pt_regs *regs); +enum irqentry_info { + IRQENTRY_INBAND_UNSTALLED = 0, + IRQENTRY_INBAND_STALLED, + IRQENTRY_OOB, +}; + #ifndef irqentry_state /** * struct irqentry_state - Opaque object for exception state storage @@ -348,6 +362,7 @@ * exit path has to invoke rcu_irq_exit(). * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that * lockdep state is restored correctly on exit from nmi. + * @stage_info: Information about pipeline state and current stage on IRQ entry. * * This opaque object is filled in by the irqentry_*_enter() functions and * must be passed back into the corresponding irqentry_*_exit() functions @@ -362,6 +377,9 @@ bool exit_rcu; bool lockdep; }; +#ifdef CONFIG_IRQ_PIPELINE + enum irqentry_info stage_info; +#endif } irqentry_state_t; #endif diff --git a/kernel/include/linux/fcntl.h b/kernel/include/linux/fcntl.h index 766fcd9..5cb2aa2 100644 --- a/kernel/include/linux/fcntl.h +++ b/kernel/include/linux/fcntl.h @@ -10,7 +10,7 @@ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \ O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ - O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) + O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE | O_OOB) /* List of all valid flags for the how->upgrade_mask argument: */ #define VALID_UPGRADE_FLAGS \ diff --git a/kernel/include/linux/fs.h b/kernel/include/linux/fs.h index 7297765..d167e43 100644 --- a/kernel/include/linux/fs.h +++ b/kernel/include/linux/fs.h @@ -56,6 +56,7 @@ struct kobject; struct pipe_inode_info; struct poll_table_struct; +struct oob_poll_wait; struct kstatfs; struct vm_area_struct; struct vfsmount; @@ -963,6 +964,7 @@ #endif /* needed for tty driver, and maybe others */ void *private_data; + void *oob_data; #ifdef CONFIG_EPOLL /* Used by fs/eventpoll.c to link all the hooks to this file */ @@ -1800,8 +1802,11 @@ #ifdef CONFIG_COMPAT extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +extern long compat_ptr_oob_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); #else #define compat_ptr_ioctl NULL +#define compat_ptr_oob_ioctl NULL #endif /* @@ -1888,6 +1893,11 @@ __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + ssize_t (*oob_read) (struct file *, char __user *, size_t); + ssize_t (*oob_write) (struct file *, const char __user *, size_t); + long (*oob_ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_oob_ioctl) (struct file *, unsigned int, unsigned long); + __poll_t (*oob_poll) (struct file *, struct oob_poll_wait *); int (*mmap) (struct file *, struct vm_area_struct *); unsigned long mmap_supported_flags; int (*open) (struct inode *, struct file *); diff --git a/kernel/include/linux/hardirq.h b/kernel/include/linux/hardirq.h index 754f67a..955b6ce 100644 --- a/kernel/include/linux/hardirq.h +++ b/kernel/include/linux/hardirq.h @@ -7,6 +7,7 @@ #include <linux/lockdep.h> #include <linux/ftrace_irq.h> #include <linux/vtime.h> +#include <asm-generic/irq_pipeline.h> #include <asm/hardirq.h> extern void synchronize_irq(unsigned int irq); @@ -122,6 +123,7 @@ #define nmi_enter() \ do { \ + irq_pipeline_nmi_enter(); \ __nmi_enter(); \ lockdep_hardirq_enter(); \ rcu_nmi_enter(); \ @@ -147,6 +149,22 @@ rcu_nmi_exit(); \ lockdep_hardirq_exit(); \ __nmi_exit(); \ + irq_pipeline_nmi_exit(); \ } while (0) +static inline bool start_irq_flow(void) +{ + return !irqs_pipelined() || in_pipeline(); +} + +static inline bool on_pipeline_entry(void) +{ + return irqs_pipelined() && in_pipeline(); +} + +static inline bool in_hard_irq(void) +{ + return irqs_pipelined() ? in_pipeline() : in_irq(); +} + #endif /* LINUX_HARDIRQ_H */ diff --git a/kernel/include/linux/intel-iommu.h b/kernel/include/linux/intel-iommu.h index 142ec79..c1be3c0 100644 --- a/kernel/include/linux/intel-iommu.h +++ b/kernel/include/linux/intel-iommu.h @@ -576,7 +576,7 @@ u64 ecap; u64 vccap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ - raw_spinlock_t register_lock; /* protect register handling */ + hard_spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ diff --git a/kernel/include/linux/interrupt.h b/kernel/include/linux/interrupt.h index 386ddf4..c89728f 100644 --- a/kernel/include/linux/interrupt.h +++ b/kernel/include/linux/interrupt.h @@ -61,6 +61,12 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_OOB - Interrupt is attached to an out-of-band handler living + * on the heading stage of the interrupt pipeline + * (CONFIG_IRQ_PIPELINE). It may be delivered to the + * handler any time interrupts are enabled in the CPU, + * regardless of the (virtualized) interrupt state + * maintained by local_irq_save/disable(). */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +80,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_OOB 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -514,9 +521,29 @@ * to ensure that after a local_irq_disable(), interrupts have * really been disabled in hardware. Such architectures need to * implement the following hook. + * + * Those cases also apply when interrupt pipelining is in effect, + * since we are virtualizing the interrupt disable state here too. */ #ifndef hard_irq_disable -#define hard_irq_disable() do { } while(0) +#define hard_irq_disable() hard_cond_local_irq_disable() +#endif + +/* + * Unlike other virtualized interrupt disabling schemes may assume, we + * can't expect local_irq_restore() to turn hard interrupts on when + * pipelining. hard_irq_enable() is introduced to be paired with + * hard_irq_disable(), for unconditionally turning them on. The only + * sane sequence mixing virtual and real disable state manipulation + * is: + * + * 1. local_irq_save/disable + * 2. hard_irq_disable + * 3. hard_irq_enable + * 4. local_irq_restore/enable + */ +#ifndef hard_irq_enable +#define hard_irq_enable() hard_cond_local_irq_enable() #endif /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high diff --git a/kernel/include/linux/irq.h b/kernel/include/linux/irq.h index b2b956d..44bd457 100644 --- a/kernel/include/linux/irq.h +++ b/kernel/include/linux/irq.h @@ -16,6 +16,7 @@ #include <linux/irqhandler.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> +#include <linux/irq_work.h> #include <linux/topology.h> #include <linux/io.h> #include <linux/slab.h> @@ -73,6 +74,11 @@ * IRQ_DISABLE_UNLAZY - Disable lazy irq disable * IRQ_HIDDEN - Don't show up in /proc/interrupts * IRQ_RAW - Skip tick management and irqtime accounting + * IRQ_OOB - Interrupt can be delivered to the out-of-band handler + * when pipelining is enabled (CONFIG_IRQ_PIPELINE), + * regardless of the (virtualized) interrupt state + * maintained by local_irq_save/disable(). + * IRQ_CHAINED - Interrupt is chained. */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -101,13 +107,15 @@ IRQ_DISABLE_UNLAZY = (1 << 19), IRQ_HIDDEN = (1 << 20), IRQ_RAW = (1 << 21), + IRQ_OOB = (1 << 22), + IRQ_CHAINED = (1 << 23), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN | IRQ_OOB) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) @@ -173,6 +181,7 @@ * irq_domain * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations + * @move_work: irq_work for setaffinity deferral when pipelining irqs */ struct irq_data { u32 mask; @@ -183,6 +192,9 @@ struct irq_domain *domain; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_data *parent_data; +#endif +#if defined(CONFIG_IRQ_PIPELINE) && defined(CONFIG_GENERIC_PENDING_IRQ) + struct irq_work move_work; #endif void *chip_data; }; @@ -221,6 +233,7 @@ * irq_chip::irq_set_affinity() when deactivated. * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. + * IRQD_SETAFFINITY_BLOCKED - Pending affinity setting on hold (IRQ_PIPELINE) */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -247,6 +260,7 @@ IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30), + IRQD_SETAFFINITY_BLOCKED = (1 << 31), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -254,6 +268,21 @@ static inline bool irqd_is_setaffinity_pending(struct irq_data *d) { return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING; +} + +static inline void irqd_set_move_blocked(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_SETAFFINITY_BLOCKED; +} + +static inline void irqd_clr_move_blocked(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_SETAFFINITY_BLOCKED; +} + +static inline bool irqd_is_setaffinity_blocked(struct irq_data *d) +{ + return irqs_pipelined() && __irqd_to_state(d) & IRQD_SETAFFINITY_BLOCKED; } static inline bool irqd_is_per_cpu(struct irq_data *d) @@ -570,6 +599,7 @@ * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs * in the suspend path if they are in disabled state * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup + * IRQCHIP_PIPELINE_SAFE: Chip can work in pipelined mode */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -583,6 +613,7 @@ IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), + IRQCHIP_PIPELINE_SAFE = (1 << 11), }; #include <linux/irqdesc.h> @@ -660,6 +691,7 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); +extern void handle_synthetic_irq(struct irq_desc *desc); extern void handle_fasteoi_nmi(struct irq_desc *desc); extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); @@ -813,7 +845,13 @@ extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry); -extern struct irq_data *irq_get_irq_data(unsigned int irq); + +static inline struct irq_data *irq_get_irq_data(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + return desc ? &desc->irq_data : NULL; +} static inline struct irq_chip *irq_get_chip(unsigned int irq) { @@ -1056,7 +1094,7 @@ * different flow mechanisms (level/edge) for it. */ struct irq_chip_generic { - raw_spinlock_t lock; + hard_spinlock_t lock; void __iomem *reg_base; u32 (*reg_readl)(void __iomem *addr); void (*reg_writel)(u32 val, void __iomem *addr); @@ -1183,6 +1221,12 @@ #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) +#ifdef CONFIG_IRQ_PIPELINE + +int irq_switch_oob(unsigned int irq, bool on); + +#endif /* !CONFIG_IRQ_PIPELINE */ + #ifdef CONFIG_SMP static inline void irq_gc_lock(struct irq_chip_generic *gc) { diff --git a/kernel/include/linux/irq_pipeline.h b/kernel/include/linux/irq_pipeline.h new file mode 100644 index 0000000..cbeb010 --- /dev/null +++ b/kernel/include/linux/irq_pipeline.h @@ -0,0 +1,145 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2002 Philippe Gerum <rpm@xenomai.org>. + * 2006 Gilles Chanteperdrix. + * 2007 Jan Kiszka. + */ +#ifndef _LINUX_IRQ_PIPELINE_H +#define _LINUX_IRQ_PIPELINE_H + +struct cpuidle_device; +struct cpuidle_state; +struct irq_desc; + +#ifdef CONFIG_IRQ_PIPELINE + +#include <linux/compiler.h> +#include <linux/irqdomain.h> +#include <linux/percpu.h> +#include <linux/interrupt.h> +#include <linux/irqstage.h> +#include <linux/thread_info.h> +#include <asm/irqflags.h> + +void irq_pipeline_init_early(void); + +void irq_pipeline_init(void); + +void arch_irq_pipeline_init(void); + +void generic_pipeline_irq_desc(struct irq_desc *desc, + struct pt_regs *regs); + +int irq_inject_pipeline(unsigned int irq); + +void synchronize_pipeline(void); + +static __always_inline void synchronize_pipeline_on_irq(void) +{ + /* + * Optimize if we preempted the high priority oob stage: we + * don't need to synchronize the pipeline unless there is a + * pending interrupt for it. + */ + if (running_inband() || + stage_irqs_pending(this_oob_staged())) + synchronize_pipeline(); +} + +bool handle_oob_irq(struct irq_desc *desc); + +void arch_do_IRQ_pipelined(struct irq_desc *desc); + +#ifdef CONFIG_SMP +void irq_send_oob_ipi(unsigned int ipi, + const struct cpumask *cpumask); +#endif /* CONFIG_SMP */ + +void irq_pipeline_oops(void); + +bool irq_cpuidle_enter(struct cpuidle_device *dev, + struct cpuidle_state *state); + +int run_oob_call(int (*fn)(void *arg), void *arg); + +extern bool irq_pipeline_active; + +static inline bool inband_unsafe(void) +{ + return running_oob() || + (hard_irqs_disabled() && irq_pipeline_active); +} + +static inline bool inband_irq_pending(void) +{ + check_hard_irqs_disabled(); + + return stage_irqs_pending(this_inband_staged()); +} + +struct irq_stage_data * +handle_irq_pipelined_prepare(struct pt_regs *regs); + +int handle_irq_pipelined_finish(struct irq_stage_data *prevd, + struct pt_regs *regs); + +int handle_irq_pipelined(struct pt_regs *regs); + +void sync_inband_irqs(void); + +extern struct irq_domain *synthetic_irq_domain; + +#else /* !CONFIG_IRQ_PIPELINE */ + +#include <linux/irqstage.h> +#include <linux/hardirq.h> + +static inline +void irq_pipeline_init_early(void) { } + +static inline +void irq_pipeline_init(void) { } + +static inline +void irq_pipeline_oops(void) { } + +static inline int +generic_pipeline_irq_desc(struct irq_desc *desc, + struct pt_regs *regs) +{ + return 0; +} + +static inline bool handle_oob_irq(struct irq_desc *desc) +{ + return false; +} + +static inline bool irq_cpuidle_enter(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + return true; +} + +static inline bool inband_unsafe(void) +{ + return false; +} + +static inline bool inband_irq_pending(void) +{ + return false; +} + +static inline void sync_inband_irqs(void) { } + +#endif /* !CONFIG_IRQ_PIPELINE */ + +#if !defined(CONFIG_IRQ_PIPELINE) || !defined(CONFIG_SPARSE_IRQ) +static inline void uncache_irq_desc(unsigned int irq) { } +#else +void uncache_irq_desc(unsigned int irq); +#endif + +#endif /* _LINUX_IRQ_PIPELINE_H */ diff --git a/kernel/include/linux/irqdesc.h b/kernel/include/linux/irqdesc.h index 5745491..f134909 100644 --- a/kernel/include/linux/irqdesc.h +++ b/kernel/include/linux/irqdesc.h @@ -68,7 +68,7 @@ unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; - raw_spinlock_t lock; + hybrid_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; #ifdef CONFIG_SMP @@ -154,6 +154,8 @@ int generic_handle_irq(unsigned int irq); +void generic_pipeline_irq(unsigned int irq, struct pt_regs *regs); + #ifdef CONFIG_HANDLE_DOMAIN_IRQ /* * Convert a HW interrupt number to a logical one using a IRQ domain, @@ -164,11 +166,26 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, bool lookup, struct pt_regs *regs); +#ifdef CONFIG_IRQ_PIPELINE +unsigned int irq_find_mapping(struct irq_domain *host, + irq_hw_number_t hwirq); + +static inline int handle_domain_irq(struct irq_domain *domain, + unsigned int hwirq, struct pt_regs *regs) +{ + unsigned int irq = irq_find_mapping(domain, hwirq); + + generic_pipeline_irq(irq, regs); + + return 0; +} +#else static inline int handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, struct pt_regs *regs) { return __handle_domain_irq(domain, hwirq, true, regs); } +#endif /* !CONFIG_IRQ_PIPELINE */ #ifdef CONFIG_IRQ_DOMAIN int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, @@ -252,6 +269,14 @@ return desc->status_use_accessors & IRQ_PER_CPU_DEVID; } +static inline int irq_is_oob(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + return desc->status_use_accessors & IRQ_OOB; +} + static inline void irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, struct lock_class_key *request_class) diff --git a/kernel/include/linux/irqflags.h b/kernel/include/linux/irqflags.h index 3ed4e87..051c727 100644 --- a/kernel/include/linux/irqflags.h +++ b/kernel/include/linux/irqflags.h @@ -13,6 +13,7 @@ #define _LINUX_TRACE_IRQFLAGS_H #include <linux/typecheck.h> +#include <asm-generic/irq_pipeline.h> #include <asm/irqflags.h> #include <asm/percpu.h> @@ -52,7 +53,9 @@ extern void trace_hardirqs_on_prepare(void); extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); +extern void trace_hardirqs_on_pipelined(void); extern void trace_hardirqs_off(void); +extern void trace_hardirqs_off_pipelined(void); # define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) @@ -122,7 +125,9 @@ # define trace_hardirqs_on_prepare() do { } while (0) # define trace_hardirqs_off_finish() do { } while (0) # define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_on_pipelined() do { } while (0) # define trace_hardirqs_off() do { } while (0) +# define trace_hardirqs_off_pipelined() do { } while (0) # define lockdep_hardirq_context() 0 # define lockdep_softirq_context(p) 0 # define lockdep_hardirqs_enabled() 0 @@ -228,6 +233,38 @@ #endif /* CONFIG_TRACE_IRQFLAGS */ +#ifdef CONFIG_IRQ_PIPELINE +#define local_irq_enable_full() \ + do { \ + hard_local_irq_enable(); \ + local_irq_enable(); \ + } while (0) + +#define local_irq_disable_full() \ + do { \ + hard_local_irq_disable(); \ + local_irq_disable(); \ + } while (0) + +#define local_irq_save_full(__flags) \ + do { \ + hard_local_irq_disable(); \ + local_irq_save(__flags); \ + } while (0) + +#define local_irq_restore_full(__flags) \ + do { \ + if (!irqs_disabled_flags(__flags)) \ + hard_local_irq_enable(); \ + local_irq_restore(__flags); \ + } while (0) +#else +#define local_irq_enable_full() local_irq_enable() +#define local_irq_disable_full() local_irq_disable() +#define local_irq_save_full(__flags) local_irq_save(__flags) +#define local_irq_restore_full(__flags) local_irq_restore(__flags) +#endif + #define local_save_flags(flags) raw_local_save_flags(flags) /* diff --git a/kernel/include/linux/irqstage.h b/kernel/include/linux/irqstage.h new file mode 100644 index 0000000..46bfb84 --- /dev/null +++ b/kernel/include/linux/irqstage.h @@ -0,0 +1,398 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016, 2019 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef _LINUX_IRQSTAGE_H +#define _LINUX_IRQSTAGE_H + +#ifdef CONFIG_IRQ_PIPELINE + +#include <linux/percpu.h> +#include <linux/bitops.h> +#include <linux/preempt.h> +#include <linux/sched.h> +#include <asm/irq_pipeline.h> + +struct kvm_oob_notifier; + +struct irq_stage { + int index; + const char *name; +}; + +extern struct irq_stage inband_stage; + +extern struct irq_stage oob_stage; + +struct irq_event_map; + +struct irq_log { + unsigned long index_0; + struct irq_event_map *map; +}; + +/* Per-CPU, per-stage data. */ +struct irq_stage_data { + struct irq_log log; + struct irq_stage *stage; +#ifdef CONFIG_DEBUG_IRQ_PIPELINE + int cpu; +#endif +}; + +/* Per-CPU pipeline descriptor. */ +struct irq_pipeline_data { + struct irq_stage_data stages[2]; + struct pt_regs tick_regs; +#ifdef CONFIG_DOVETAIL + struct task_struct *task_inflight; + struct task_struct *rqlock_owner; +#ifdef CONFIG_KVM + struct kvm_oob_notifier *vcpu_notify; +#endif +#endif +}; + +DECLARE_PER_CPU(struct irq_pipeline_data, irq_pipeline); + +/* + * The low-level stall bit accessors. Should be used by the Dovetail + * core implementation exclusively, inband_irq_*() and oob_irq_*() + * accessors are available to common code. + */ + +#define INBAND_STALL_BIT 0 +#define OOB_STALL_BIT 1 + +static __always_inline void init_task_stall_bits(struct task_struct *p) +{ + __set_bit(INBAND_STALL_BIT, &p->stall_bits); + __clear_bit(OOB_STALL_BIT, &p->stall_bits); +} + +static __always_inline void stall_inband_nocheck(void) +{ + __set_bit(INBAND_STALL_BIT, ¤t->stall_bits); + barrier(); +} + +static __always_inline void stall_inband(void) +{ + WARN_ON_ONCE(irq_pipeline_debug() && running_oob()); + stall_inband_nocheck(); +} + +static __always_inline void unstall_inband_nocheck(void) +{ + barrier(); + __clear_bit(INBAND_STALL_BIT, ¤t->stall_bits); +} + +static __always_inline void unstall_inband(void) +{ + WARN_ON_ONCE(irq_pipeline_debug() && running_oob()); + unstall_inband_nocheck(); +} + +static __always_inline int test_and_stall_inband_nocheck(void) +{ + return __test_and_set_bit(INBAND_STALL_BIT, ¤t->stall_bits); +} + +static __always_inline int test_and_stall_inband(void) +{ + WARN_ON_ONCE(irq_pipeline_debug() && running_oob()); + return test_and_stall_inband_nocheck(); +} + +static __always_inline int test_inband_stall(void) +{ + return test_bit(INBAND_STALL_BIT, ¤t->stall_bits); +} + +static __always_inline void stall_oob(void) +{ + __set_bit(OOB_STALL_BIT, ¤t->stall_bits); + barrier(); +} + +static __always_inline void unstall_oob(void) +{ + barrier(); + __clear_bit(OOB_STALL_BIT, ¤t->stall_bits); +} + +static __always_inline int test_and_stall_oob(void) +{ + return __test_and_set_bit(OOB_STALL_BIT, ¤t->stall_bits); +} + +static __always_inline int test_oob_stall(void) +{ + return test_bit(OOB_STALL_BIT, ¤t->stall_bits); +} + +/** + * this_staged - IRQ stage data on the current CPU + * + * Return the address of @stage's data on the current CPU. IRQs must + * be hard disabled to prevent CPU migration. + */ +static __always_inline +struct irq_stage_data *this_staged(struct irq_stage *stage) +{ + return &raw_cpu_ptr(irq_pipeline.stages)[stage->index]; +} + +/** + * percpu_inband_staged - IRQ stage data on specified CPU + * + * Return the address of @stage's data on @cpu. + * + * This is the slowest accessor, use it carefully. Prefer + * this_staged() for requests referring to the current + * CPU. Additionally, if the target stage is known at build time, + * consider using this_{inband, oob}_staged() instead. + */ +static __always_inline +struct irq_stage_data *percpu_inband_staged(struct irq_stage *stage, int cpu) +{ + return &per_cpu(irq_pipeline.stages, cpu)[stage->index]; +} + +/** + * this_inband_staged - return the address of the pipeline context + * data for the inband stage on the current CPU. CPU migration must be + * disabled. + * + * This accessor is recommended when the stage we refer to is known at + * build time to be the inband one. + */ +static __always_inline struct irq_stage_data *this_inband_staged(void) +{ + return raw_cpu_ptr(&irq_pipeline.stages[0]); +} + +/** + * this_oob_staged - return the address of the pipeline context data + * for the registered oob stage on the current CPU. CPU migration must + * be disabled. + * + * This accessor is recommended when the stage we refer to is known at + * build time to be the registered oob stage. This address is always + * different from the context data of the inband stage, even in + * absence of registered oob stage. + */ +static __always_inline struct irq_stage_data *this_oob_staged(void) +{ + return raw_cpu_ptr(&irq_pipeline.stages[1]); +} + +static __always_inline struct irq_stage_data *__current_irq_staged(void) +{ + return &raw_cpu_ptr(irq_pipeline.stages)[stage_level()]; +} + +/** + * current_irq_staged - return the address of the pipeline context + * data for the current stage. CPU migration must be disabled. + */ +#define current_irq_staged __current_irq_staged() + +static __always_inline +void check_staged_locality(struct irq_stage_data *pd) +{ +#ifdef CONFIG_DEBUG_IRQ_PIPELINE + /* + * Setting our context with another processor's is a really + * bad idea, our caller definitely went loopy. + */ + WARN_ON_ONCE(raw_smp_processor_id() != pd->cpu); +#endif +} + +/** + * switch_oob(), switch_inband() - switch the current CPU to the + * specified stage context. CPU migration must be disabled. + * + * Calling these routines is the only sane and safe way to change the + * interrupt stage for the current CPU. Don't bypass them, ever. + * Really. + */ +static __always_inline +void switch_oob(struct irq_stage_data *pd) +{ + check_staged_locality(pd); + if (!(preempt_count() & STAGE_MASK)) + preempt_count_add(STAGE_OFFSET); +} + +static __always_inline +void switch_inband(struct irq_stage_data *pd) +{ + check_staged_locality(pd); + if (preempt_count() & STAGE_MASK) + preempt_count_sub(STAGE_OFFSET); +} + +static __always_inline +void set_current_irq_staged(struct irq_stage_data *pd) +{ + if (pd->stage == &inband_stage) + switch_inband(pd); + else + switch_oob(pd); +} + +static __always_inline struct irq_stage *__current_irq_stage(void) +{ + /* + * We don't have to hard disable irqs while accessing the + * per-CPU stage data here, because there is no way we could + * switch stage and CPU at the same time. + */ + return __current_irq_staged()->stage; +} + +#define current_irq_stage __current_irq_stage() + +static __always_inline bool oob_stage_present(void) +{ + return oob_stage.index != 0; +} + +/** + * stage_irqs_pending() - Whether we have interrupts pending + * (i.e. logged) on the current CPU for the given stage. Hard IRQs + * must be disabled. + */ +static __always_inline int stage_irqs_pending(struct irq_stage_data *pd) +{ + return pd->log.index_0 != 0; +} + +void sync_current_irq_stage(void); + +void sync_irq_stage(struct irq_stage *top); + +void irq_post_stage(struct irq_stage *stage, + unsigned int irq); + +static __always_inline void irq_post_oob(unsigned int irq) +{ + irq_post_stage(&oob_stage, irq); +} + +static __always_inline void irq_post_inband(unsigned int irq) +{ + irq_post_stage(&inband_stage, irq); +} + +static __always_inline void oob_irq_disable(void) +{ + hard_local_irq_disable(); + stall_oob(); +} + +static __always_inline unsigned long oob_irq_save(void) +{ + hard_local_irq_disable(); + return test_and_stall_oob(); +} + +static __always_inline int oob_irqs_disabled(void) +{ + return test_oob_stall(); +} + +void oob_irq_enable(void); + +void __oob_irq_restore(unsigned long x); + +static __always_inline void oob_irq_restore(unsigned long x) +{ + if ((x ^ test_oob_stall()) & 1) + __oob_irq_restore(x); +} + +bool stage_disabled(void); + +unsigned long test_and_lock_stage(int *irqsoff); + +void unlock_stage(unsigned long irqstate); + +#define stage_save_flags(__irqstate) \ + do { \ + unsigned long __flags = hard_local_save_flags(); \ + (__irqstate) = irqs_merge_flags(__flags, \ + irqs_disabled()); \ + } while (0) + +int enable_oob_stage(const char *name); + +int arch_enable_oob_stage(void); + +void disable_oob_stage(void); + +#else /* !CONFIG_IRQ_PIPELINE */ + +#include <linux/irqflags.h> + +void call_is_nop_without_pipelining(void); + +static __always_inline void stall_inband(void) { } + +static __always_inline void unstall_inband(void) { } + +static __always_inline int test_and_stall_inband(void) +{ + return false; +} + +static __always_inline int test_inband_stall(void) +{ + return false; +} + +static __always_inline bool oob_stage_present(void) +{ + return false; +} + +static __always_inline bool stage_disabled(void) +{ + return irqs_disabled(); +} + +static __always_inline void irq_post_inband(unsigned int irq) +{ + call_is_nop_without_pipelining(); +} + +#define test_and_lock_stage(__irqsoff) \ + ({ \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + *(__irqsoff) = irqs_disabled_flags(__flags); \ + __flags; \ + }) + +#define unlock_stage(__flags) raw_local_irq_restore(__flags) + +#define stage_save_flags(__flags) raw_local_save_flags(__flags) + +static __always_inline void stall_inband_nocheck(void) +{ } + +static __always_inline void unstall_inband_nocheck(void) +{ } + +static __always_inline int test_and_stall_inband_nocheck(void) +{ + return irqs_disabled(); +} + +#endif /* !CONFIG_IRQ_PIPELINE */ + +#endif /* !_LINUX_IRQSTAGE_H */ diff --git a/kernel/include/linux/kernel.h b/kernel/include/linux/kernel.h index c333dc6..b45aaf3 100644 --- a/kernel/include/linux/kernel.h +++ b/kernel/include/linux/kernel.h @@ -15,6 +15,7 @@ #include <linux/typecheck.h> #include <linux/printk.h> #include <linux/build_bug.h> +#include <asm-generic/irq_pipeline.h> #include <asm/byteorder.h> #include <asm/div64.h> #include <uapi/linux/kernel.h> @@ -195,9 +196,12 @@ #ifdef CONFIG_PREEMPT_VOLUNTARY extern int _cond_resched(void); -# define might_resched() _cond_resched() +# define might_resched() do { \ + check_inband_stage(); \ + _cond_resched(); \ + } while (0) #else -# define might_resched() do { } while (0) +# define might_resched() check_inband_stage() #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP diff --git a/kernel/include/linux/kvm_host.h b/kernel/include/linux/kvm_host.h index 9cb0a3d..36d741e 100644 --- a/kernel/include/linux/kvm_host.h +++ b/kernel/include/linux/kvm_host.h @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/mmu_notifier.h> #include <linux/preempt.h> +#include <linux/dovetail.h> #include <linux/msi.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -260,10 +261,23 @@ unsigned len; }; +/* + * Called when the host is about to leave the inband stage. Typically + * used for switching the current vcpu out of guest mode before a + * companion core reinstates an oob task context. + */ +struct kvm_oob_notifier { + void (*handler)(struct kvm_oob_notifier *nfy); + bool put_vcpu; +}; + struct kvm_vcpu { struct kvm *kvm; #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier preempt_notifier; +#endif +#ifdef CONFIG_DOVETAIL + struct kvm_oob_notifier oob_notifier; #endif int cpu; int vcpu_id; /* id given by userspace at creation */ @@ -1502,6 +1516,47 @@ } #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ +#if defined(CONFIG_DOVETAIL) && defined(CONFIG_KVM) +static inline void inband_init_vcpu(struct kvm_vcpu *vcpu, + void (*preempt_handler)(struct kvm_oob_notifier *nfy)) +{ + vcpu->oob_notifier.handler = preempt_handler; + vcpu->oob_notifier.put_vcpu = false; +} + +static inline void inband_enter_guest(struct kvm_vcpu *vcpu) +{ + struct irq_pipeline_data *p = raw_cpu_ptr(&irq_pipeline); + WRITE_ONCE(p->vcpu_notify, &vcpu->oob_notifier); +} + +static inline void inband_exit_guest(void) +{ + struct irq_pipeline_data *p = raw_cpu_ptr(&irq_pipeline); + WRITE_ONCE(p->vcpu_notify, NULL); +} + +static inline void inband_set_vcpu_release_state(struct kvm_vcpu *vcpu, + bool pending) +{ + vcpu->oob_notifier.put_vcpu = pending; +} +#else +static inline void inband_init_vcpu(struct kvm_vcpu *vcpu, + void (*preempt_handler)(struct kvm_oob_notifier *nfy)) +{ } + +static inline void inband_enter_guest(struct kvm_vcpu *vcpu) +{ } + +static inline void inband_exit_guest(void) +{ } + +static inline void inband_set_vcpu_release_state(struct kvm_vcpu *vcpu, + bool pending) +{ } +#endif + typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, diff --git a/kernel/include/linux/lockdep.h b/kernel/include/linux/lockdep.h index 2c25863..3a6b855 100644 --- a/kernel/include/linux/lockdep.h +++ b/kernel/include/linux/lockdep.h @@ -214,29 +214,30 @@ * of dependencies wrong: they are either too broad (they need a class-split) * or they are too narrow (they suffer from a false class-split): */ -#define lockdep_set_class(lock, key) \ - lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer, \ - (lock)->dep_map.lock_type) +#define lockdep_set_class(lock, key) \ + lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), #key, key, 0, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer, \ + LOCKDEP_ALT_DEPMAP(lock)->lock_type) -#define lockdep_set_class_and_name(lock, key, name) \ - lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer, \ - (lock)->dep_map.lock_type) +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), name, key, 0, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer, \ + LOCKDEP_ALT_DEPMAP(lock)->lock_type) -#define lockdep_set_class_and_subclass(lock, key, sub) \ - lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer, \ - (lock)->dep_map.lock_type) +#define lockdep_set_class_and_subclass(lock, key, sub) \ + lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), #key, key, sub, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer, \ + LOCKDEP_ALT_DEPMAP(lock)->lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ - (lock)->dep_map.wait_type_inner, \ - (lock)->dep_map.wait_type_outer, \ - (lock)->dep_map.lock_type) + lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), #lock, \ + LOCKDEP_ALT_DEPMAP(lock)->key, sub, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner, \ + LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer, \ + LOCKDEP_ALT_DEPMAP(lock)->lock_type) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) @@ -244,7 +245,8 @@ /* * Compare locking classes */ -#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) +#define lockdep_match_class(lock, key) \ + lockdep_match_key(LOCKDEP_ALT_DEPMAP(lock), key) static inline int lockdep_match_key(struct lockdep_map *lock, struct lock_class_key *key) @@ -282,8 +284,8 @@ return lock_is_held_type(lock, -1); } -#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) -#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) +#define lockdep_is_held(lock) lock_is_held(LOCKDEP_ALT_DEPMAP(lock)) +#define lockdep_is_held_type(lock, r) lock_is_held_type(LOCKDEP_ALT_DEPMAP(lock), (r)) extern void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, @@ -306,26 +308,27 @@ #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held(l)); \ + WARN_ON(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held(l))); \ } while (0) #define lockdep_assert_held_write(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ + WARN_ON(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held_type(l, 0))); \ } while (0) #define lockdep_assert_held_read(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ + WARN_ON(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held_type(l, 1))); \ } while (0) #define lockdep_assert_held_once(l) do { \ - WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ + WARN_ON_ONCE(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held(l))); \ } while (0) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) -#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) -#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) -#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) +#define lockdep_pin_lock(l) LOCKDEP_HARD_DEBUG_RET(l, ({ struct pin_cookie cookie; cookie;} ), \ + lock_pin_lock(LOCKDEP_ALT_DEPMAP(l))) +#define lockdep_repin_lock(l,c) LOCKDEP_HARD_DEBUG(l,, lock_repin_lock(LOCKDEP_ALT_DEPMAP(l), (c))) +#define lockdep_unpin_lock(l,c) LOCKDEP_HARD_DEBUG(l,, lock_unpin_lock(LOCKDEP_ALT_DEPMAP(l), (c))) #else /* !CONFIG_LOCKDEP */ @@ -552,22 +555,22 @@ #ifdef CONFIG_PROVE_LOCKING # define might_lock(lock) \ do { \ - typecheck(struct lockdep_map *, &(lock)->dep_map); \ - lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ - lock_release(&(lock)->dep_map, _THIS_IP_); \ + typecheck(struct lockdep_map *, LOCKDEP_ALT_DEPMAP(lock)); \ + lock_acquire(LOCKDEP_ALT_DEPMAP(lock), 0, 0, 0, 1, NULL, _THIS_IP_); \ + lock_release(LOCKDEP_ALT_DEPMAP(lock), _THIS_IP_); \ } while (0) # define might_lock_read(lock) \ do { \ - typecheck(struct lockdep_map *, &(lock)->dep_map); \ - lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ - lock_release(&(lock)->dep_map, _THIS_IP_); \ + typecheck(struct lockdep_map *, LOCKDEP_ALT_DEPMAP(lock)); \ + lock_acquire(LOCKDEP_ALT_DEPMAP(lock), 0, 0, 1, 1, NULL, _THIS_IP_); \ + lock_release(LOCKDEP_ALT_DEPMAP(lock), _THIS_IP_); \ } while (0) # define might_lock_nested(lock, subclass) \ do { \ - typecheck(struct lockdep_map *, &(lock)->dep_map); \ - lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ + typecheck(struct lockdep_map *, LOCKDEP_ALT_DEPMAP(lock)); \ + lock_acquire(LOCKDEP_ALT_DEPMAP(lock), subclass, 0, 1, 1, NULL, \ _THIS_IP_); \ - lock_release(&(lock)->dep_map, _THIS_IP_); \ + lock_release(LOCKDEP_ALT_DEPMAP(lock), _THIS_IP_); \ } while (0) DECLARE_PER_CPU(int, hardirqs_enabled); @@ -576,14 +579,32 @@ #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) +#define __lockdep_check_irqs_enabled() \ + ({ !hard_irqs_disabled() && \ + (running_oob() || this_cpu_read(hardirqs_enabled)); }) + #define lockdep_assert_irqs_enabled() \ -do { \ - WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ -} while (0) + do { \ + WARN_ON_ONCE(__lockdep_enabled && \ + !__lockdep_check_irqs_enabled()); \ + } while (0) + +#define __lockdep_check_irqs_disabled() \ + ({ hard_irqs_disabled() || \ + (running_inband() && !this_cpu_read(hardirqs_enabled)); }) #define lockdep_assert_irqs_disabled() \ + do { \ + WARN_ON_ONCE(__lockdep_enabled && \ + !__lockdep_check_irqs_disabled()); \ + } while (0) + +#define lockdep_read_irqs_state() \ + ({ this_cpu_read(hardirqs_enabled); }) + +#define lockdep_write_irqs_state(__state) \ do { \ - WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ + this_cpu_write(hardirqs_enabled, __state); \ } while (0) #define lockdep_assert_in_irq() \ @@ -596,7 +617,7 @@ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ __lockdep_enabled && \ (preempt_count() != 0 || \ - !this_cpu_read(hardirqs_enabled))); \ + __lockdep_check_irqs_disabled())); \ } while (0) #define lockdep_assert_preemption_disabled() \ @@ -604,7 +625,7 @@ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ __lockdep_enabled && \ (preempt_count() == 0 && \ - this_cpu_read(hardirqs_enabled))); \ + __lockdep_check_irqs_enabled())); \ } while (0) #else @@ -614,6 +635,8 @@ # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) +# define lockdep_read_irqs_state() 0 +# define lockdep_write_irqs_state(__state) do { (void)(__state); } while (0) # define lockdep_assert_in_irq() do { } while (0) # define lockdep_assert_preemption_enabled() do { } while (0) diff --git a/kernel/include/linux/mm.h b/kernel/include/linux/mm.h index dfefcfa..7e23752 100644 --- a/kernel/include/linux/mm.h +++ b/kernel/include/linux/mm.h @@ -20,6 +20,7 @@ #include <linux/pfn.h> #include <linux/percpu-refcount.h> #include <linux/bit_spinlock.h> +#include <linux/dovetail.h> #include <linux/shrinker.h> #include <linux/resource.h> #include <linux/page_ext.h> diff --git a/kernel/include/linux/mm_types.h b/kernel/include/linux/mm_types.h index c853f61..5820ea3 100644 --- a/kernel/include/linux/mm_types.h +++ b/kernel/include/linux/mm_types.h @@ -19,6 +19,8 @@ #include <asm/mmu.h> +#include <dovetail/mm_info.h> + #ifndef AT_VECTOR_SIZE_ARCH #define AT_VECTOR_SIZE_ARCH 0 #endif @@ -593,6 +595,9 @@ #ifdef CONFIG_HUGETLB_PAGE atomic_long_t hugetlb_usage; #endif +#ifdef CONFIG_DOVETAIL + struct oob_mm_state oob_state; +#endif struct work_struct async_put_work; #ifdef CONFIG_IOMMU_SUPPORT diff --git a/kernel/include/linux/net.h b/kernel/include/linux/net.h index e201a7f..f963695 100644 --- a/kernel/include/linux/net.h +++ b/kernel/include/linux/net.h @@ -79,6 +79,7 @@ #ifndef SOCK_NONBLOCK #define SOCK_NONBLOCK O_NONBLOCK #endif +#define SOCK_OOB O_OOB #endif /* ARCH_HAS_SOCKET_TYPES */ diff --git a/kernel/include/linux/netdevice.h b/kernel/include/linux/netdevice.h index 7e7a003..d25e9aa 100644 --- a/kernel/include/linux/netdevice.h +++ b/kernel/include/linux/netdevice.h @@ -41,6 +41,7 @@ #endif #include <net/netprio_cgroup.h> #include <net/xdp.h> +#include <net/netoob.h> #include <linux/netdev_features.h> #include <linux/neighbour.h> @@ -296,6 +297,7 @@ __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, __LINK_STATE_TESTING, + __LINK_STATE_OOB, }; @@ -1534,6 +1536,13 @@ ANDROID_KABI_RESERVE(6); ANDROID_KABI_RESERVE(7); ANDROID_KABI_RESERVE(8); +#ifdef CONFIG_NET_OOB + struct sk_buff * (*ndo_alloc_oob_skb)(struct net_device *dev, + dma_addr_t *dma_addr); + void (*ndo_free_oob_skb)(struct net_device *dev, + struct sk_buff *skb, + dma_addr_t dma_addr); +#endif }; /** @@ -1725,6 +1734,7 @@ * @tlsdev_ops: Transport Layer Security offload operations * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. + * @net_oob_context: Out-of-band networking context (oob stage diversion) * * @flags: Interface flags (a la BSD) * @priv_flags: Like 'flags' but invisible to userspace, @@ -1982,6 +1992,10 @@ #if IS_ENABLED(CONFIG_TLS_DEVICE) const struct tlsdev_ops *tlsdev_ops; +#endif + +#ifdef CONFIG_NET_OOB + struct oob_netdev_context oob_context; #endif const struct header_ops *header_ops; @@ -4190,6 +4204,86 @@ void netif_device_attach(struct net_device *dev); +#ifdef CONFIG_NET_OOB + +static inline bool netif_oob_diversion(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_OOB, &dev->state); +} + +static inline void netif_enable_oob_diversion(struct net_device *dev) +{ + return set_bit(__LINK_STATE_OOB, &dev->state); +} + +static inline void netif_disable_oob_diversion(struct net_device *dev) +{ + clear_bit(__LINK_STATE_OOB, &dev->state); + smp_mb__after_atomic(); +} + +int netif_xmit_oob(struct sk_buff *skb); + +static inline bool netdev_is_oob_capable(struct net_device *dev) +{ + return !!(dev->oob_context.flags & IFF_OOB_CAPABLE); +} + +static inline void netdev_enable_oob_port(struct net_device *dev) +{ + dev->oob_context.flags |= IFF_OOB_PORT; +} + +static inline void netdev_disable_oob_port(struct net_device *dev) +{ + dev->oob_context.flags &= ~IFF_OOB_PORT; +} + +static inline bool netdev_is_oob_port(struct net_device *dev) +{ + return !!(dev->oob_context.flags & IFF_OOB_PORT); +} + +static inline struct sk_buff *netdev_alloc_oob_skb(struct net_device *dev, + dma_addr_t *dma_addr) +{ + return dev->netdev_ops->ndo_alloc_oob_skb(dev, dma_addr); +} + +static inline void netdev_free_oob_skb(struct net_device *dev, + struct sk_buff *skb, + dma_addr_t dma_addr) +{ + dev->netdev_ops->ndo_free_oob_skb(dev, skb, dma_addr); +} + +#else + +static inline bool netif_oob_diversion(const struct net_device *dev) +{ + return false; +} + +static inline bool netdev_is_oob_capable(struct net_device *dev) +{ + return false; +} + +static inline void netdev_enable_oob_port(struct net_device *dev) +{ +} + +static inline void netdev_disable_oob_port(struct net_device *dev) +{ +} + +static inline bool netdev_is_oob_port(struct net_device *dev) +{ + return false; +} + +#endif + /* * Network interface message level settings */ diff --git a/kernel/include/linux/poll.h b/kernel/include/linux/poll.h index 1cdc32b..2701db2 100644 --- a/kernel/include/linux/poll.h +++ b/kernel/include/linux/poll.h @@ -10,6 +10,7 @@ #include <linux/fs.h> #include <linux/sysctl.h> #include <linux/uaccess.h> +#include <dovetail/poll.h> #include <uapi/linux/poll.h> #include <uapi/linux/eventpoll.h> diff --git a/kernel/include/linux/preempt.h b/kernel/include/linux/preempt.h index 7d9c1c0..58c21bc 100644 --- a/kernel/include/linux/preempt.h +++ b/kernel/include/linux/preempt.h @@ -27,17 +27,23 @@ * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 * NMI_MASK: 0x00f00000 + * PIPELINE_MASK: 0x01000000 + * STAGE_MASK: 0x02000000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 #define NMI_BITS 4 +#define PIPELINE_BITS 1 +#define STAGE_BITS 1 #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) +#define PIPELINE_SHIFT (NMI_SHIFT + NMI_BITS) +#define STAGE_SHIFT (PIPELINE_SHIFT + PIPELINE_BITS) #define __IRQ_MASK(x) ((1UL << (x))-1) @@ -45,11 +51,15 @@ #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) +#define PIPELINE_MASK (__IRQ_MASK(PIPELINE_BITS) << PIPELINE_SHIFT) +#define STAGE_MASK (__IRQ_MASK(STAGE_BITS) << STAGE_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) +#define PIPELINE_OFFSET (1UL << PIPELINE_SHIFT) +#define STAGE_OFFSET (1UL << STAGE_SHIFT) #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) @@ -82,6 +92,9 @@ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) +/* The current IRQ stage level: 0=inband, 1=oob */ +#define stage_level() ((preempt_count() & STAGE_MASK) >> STAGE_SHIFT) + /* * Are we doing bottom half or hardware interrupt processing? * @@ -91,6 +104,7 @@ * in_serving_softirq() - We're in softirq context * in_nmi() - We're in NMI context * in_task() - We're in task context + * in_pipeline() - We're on pipeline entry * * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really * should not be used in new code. @@ -102,6 +116,7 @@ #define in_nmi() (preempt_count() & NMI_MASK) #define in_task() (!(preempt_count() & \ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) +#define in_pipeline() (preempt_count() & PIPELINE_MASK) /* * The preempt_count offset after preempt_disable(); @@ -180,7 +195,8 @@ #define preempt_enable_no_resched() sched_preempt_enable_no_resched() -#define preemptible() (preempt_count() == 0 && !irqs_disabled()) +#define preemptible() (preempt_count() == 0 && \ + !hard_irqs_disabled() && !irqs_disabled()) #ifdef CONFIG_PREEMPTION #define preempt_enable() \ @@ -352,4 +368,43 @@ preempt_enable(); } +#ifdef CONFIG_IRQ_PIPELINE + +static __always_inline bool running_inband(void) +{ + return stage_level() == 0; +} + +static __always_inline bool running_oob(void) +{ + return !running_inband(); +} + +unsigned long hard_preempt_disable(void); +void hard_preempt_enable(unsigned long flags); + +#else + +static __always_inline bool running_inband(void) +{ + return true; +} + +static __always_inline bool running_oob(void) +{ + return false; +} + +#define hard_preempt_disable() \ +({ \ + preempt_disable(); \ + 0; \ +}) +#define hard_preempt_enable(__flags) \ + do { \ + preempt_enable(); \ + (void)(__flags); \ + } while (0) +#endif + #endif /* __LINUX_PREEMPT_H */ diff --git a/kernel/include/linux/printk.h b/kernel/include/linux/printk.h index 14d13ec..270fa48 100644 --- a/kernel/include/linux/printk.h +++ b/kernel/include/linux/printk.h @@ -160,7 +160,22 @@ static inline void printk_nmi_direct_exit(void) { } #endif /* PRINTK_NMI */ + struct dev_printk_info; + +#ifdef CONFIG_RAW_PRINTK +void raw_puts(const char *s, size_t len); +void raw_vprintk(const char *fmt, va_list ap); +asmlinkage __printf(1, 2) +void raw_printk(const char *fmt, ...); +#else +static inline __cold +void raw_puts(const char *s, size_t len) { } +static inline __cold +void raw_vprintk(const char *s, va_list ap) { } +static inline __printf(1, 2) __cold +void raw_printk(const char *s, ...) { } +#endif #ifdef CONFIG_PRINTK asmlinkage __printf(4, 0) @@ -512,7 +527,7 @@ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ \ - if (__ratelimit(&_rs)) \ + if (running_oob() || __ratelimit(&_rs)) \ printk(fmt, ##__VA_ARGS__); \ }) #else diff --git a/kernel/include/linux/rcupdate.h b/kernel/include/linux/rcupdate.h index 095b3b3..a4388ef 100644 --- a/kernel/include/linux/rcupdate.h +++ b/kernel/include/linux/rcupdate.h @@ -118,6 +118,14 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ +#ifdef CONFIG_IRQ_PIPELINE +void rcu_oob_prepare_lock(void); +void rcu_oob_finish_lock(void); +#else +#define rcu_oob_prepare_lock() do { } while (0) +#define rcu_oob_finish_lock() do { } while (0) +#endif + /** * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. @@ -290,7 +298,7 @@ static inline int rcu_read_lock_sched_held(void) { - return !preemptible(); + return !running_inband() || !preemptible(); } static inline int rcu_read_lock_any_held(void) @@ -646,6 +654,7 @@ */ static __always_inline void rcu_read_lock(void) { + rcu_oob_prepare_lock(); __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); @@ -702,6 +711,7 @@ "rcu_read_unlock() used illegally while idle"); __release(RCU); __rcu_read_unlock(); + rcu_oob_finish_lock(); rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ } @@ -755,6 +765,7 @@ static inline void rcu_read_lock_sched(void) { preempt_disable(); + rcu_oob_prepare_lock(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), @@ -779,6 +790,7 @@ "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); + rcu_oob_finish_lock(); preempt_enable(); } diff --git a/kernel/include/linux/regmap.h b/kernel/include/linux/regmap.h index 751ca38..4df7952 100644 --- a/kernel/include/linux/regmap.h +++ b/kernel/include/linux/regmap.h @@ -369,6 +369,7 @@ int (*reg_write)(void *context, unsigned int reg, unsigned int val); bool fast_io; + bool oob_io; unsigned int max_register; const struct regmap_access_table *wr_table; diff --git a/kernel/include/linux/sched.h b/kernel/include/linux/sched.h index d3cc279..38cac8c 100644 --- a/kernel/include/linux/sched.h +++ b/kernel/include/linux/sched.h @@ -119,6 +119,12 @@ #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) +#ifdef CONFIG_DOVETAIL +#define task_is_off_stage(task) test_ti_local_flags(task_thread_info(task), _TLF_OFFSTAGE) +#else +#define task_is_off_stage(task) 0 +#endif + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP /* @@ -1052,6 +1058,10 @@ int irq_config; #endif +#ifdef CONFIG_IRQ_PIPELINE + unsigned long stall_bits; +#endif + #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL u64 curr_chain_key; diff --git a/kernel/include/linux/sched/coredump.h b/kernel/include/linux/sched/coredump.h index dfd82ea..0b06940 100644 --- a/kernel/include/linux/sched/coredump.h +++ b/kernel/include/linux/sched/coredump.h @@ -74,6 +74,7 @@ #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ #define MMF_MULTIPROCESS 27 /* mm is shared between processes */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) +#define MMF_DOVETAILED 31 /* mm belongs to a dovetailed process */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ MMF_DISABLE_THP_MASK) diff --git a/kernel/include/linux/skbuff.h b/kernel/include/linux/skbuff.h index f73efb3..3556cf6 100644 --- a/kernel/include/linux/skbuff.h +++ b/kernel/include/linux/skbuff.h @@ -793,6 +793,12 @@ #ifdef CONFIG_SKB_EXTENSIONS __u8 active_extensions; #endif +#ifdef CONFIG_NET_OOB + __u8 oob:1; + __u8 oob_clone:1; + __u8 oob_cloned:1; +#endif + /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ @@ -1102,6 +1108,69 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size); struct sk_buff *build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size); +#ifdef CONFIG_NET_OOB + +static inline void __skb_oob_copy(struct sk_buff *new, + const struct sk_buff *old) +{ + new->oob = old->oob; + new->oob_clone = old->oob_clone; + new->oob_cloned = old->oob_cloned; +} + +static inline bool skb_is_oob(const struct sk_buff *skb) +{ + return skb->oob; +} + +static inline bool skb_is_oob_clone(const struct sk_buff *skb) +{ + return skb->oob_clone; +} + +static inline bool skb_has_oob_clone(const struct sk_buff *skb) +{ + return skb->oob_cloned; +} + +struct sk_buff *__netdev_alloc_oob_skb(struct net_device *dev, + size_t len, size_t headroom, + gfp_t gfp_mask); +void __netdev_free_oob_skb(struct net_device *dev, struct sk_buff *skb); +void netdev_reset_oob_skb(struct net_device *dev, struct sk_buff *skb, + size_t headroom); +struct sk_buff *skb_alloc_oob_head(gfp_t gfp_mask); +void skb_morph_oob_skb(struct sk_buff *n, struct sk_buff *skb); +bool skb_release_oob_skb(struct sk_buff *skb, int *dref); + +static inline bool recycle_oob_skb(struct sk_buff *skb) +{ + bool skb_oob_recycle(struct sk_buff *skb); + + if (!skb->oob) + return false; + + return skb_oob_recycle(skb); +} + +#else /* !CONFIG_NET_OOB */ + +static inline void __skb_oob_copy(struct sk_buff *new, + const struct sk_buff *old) +{ +} + +static inline bool skb_is_oob(const struct sk_buff *skb) +{ + return false; +} + +static inline bool recycle_oob_skb(struct sk_buff *skb) +{ + return false; +} + +#endif /* !CONFIG_NET_OOB */ /** * alloc_skb - allocate a network buffer diff --git a/kernel/include/linux/smp.h b/kernel/include/linux/smp.h index 7ce15c3..44888c0 100644 --- a/kernel/include/linux/smp.h +++ b/kernel/include/linux/smp.h @@ -241,6 +241,21 @@ #define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) #define put_cpu() preempt_enable() +#ifdef CONFIG_IRQ_PIPELINE +#define hard_get_cpu(flags) ({ \ + (flags) = hard_preempt_disable(); \ + raw_smp_processor_id(); \ + }) +#define hard_put_cpu(flags) hard_preempt_enable(flags) +#else +#define hard_get_cpu(flags) ({ (void)(flags); get_cpu(); }) +#define hard_put_cpu(flags) \ + do { \ + (void)(flags); \ + put_cpu(); \ + } while (0) +#endif + /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/kernel/include/linux/socket.h b/kernel/include/linux/socket.h index c3b35d1..eb8bf6a 100644 --- a/kernel/include/linux/socket.h +++ b/kernel/include/linux/socket.h @@ -223,8 +223,9 @@ * reuses AF_INET address family */ #define AF_XDP 44 /* XDP sockets */ +#define AF_OOB 45 /* Out-of-band domain sockets */ -#define AF_MAX 45 /* For now.. */ +#define AF_MAX 46 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -274,6 +275,7 @@ #define PF_QIPCRTR AF_QIPCRTR #define PF_SMC AF_SMC #define PF_XDP AF_XDP +#define PF_OOB AF_OOB #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/kernel/include/linux/spi/spi.h b/kernel/include/linux/spi/spi.h index f50c766..04f35c4 100644 --- a/kernel/include/linux/spi/spi.h +++ b/kernel/include/linux/spi/spi.h @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> +#include <linux/dmaengine.h> #include <linux/kthread.h> #include <linux/completion.h> #include <linux/scatterlist.h> @@ -253,6 +254,7 @@ struct spi_message; struct spi_transfer; +struct spi_oob_transfer; /** * struct spi_driver - Host side "protocol" driver @@ -352,6 +354,7 @@ * @io_mutex: mutex for physical bus access * @bus_lock_spinlock: spinlock for SPI bus locking * @bus_lock_mutex: mutex for exclusion of multiple callers + * @bus_oob_lock_sem: semaphore for exclusion during oob operations * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This @@ -534,6 +537,10 @@ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; +#ifdef CONFIG_SPI_OOB + struct semaphore bus_oob_lock_sem; +#endif + /* flag indicating that the SPI bus is locked for exclusive use */ bool bus_lock_flag; @@ -626,6 +633,14 @@ int (*unprepare_message)(struct spi_controller *ctlr, struct spi_message *message); int (*slave_abort)(struct spi_controller *ctlr); + int (*prepare_oob_transfer)(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer); + void (*start_oob_transfer)(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer); + void (*pulse_oob_transfer)(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer); + void (*terminate_oob_transfer)(struct spi_controller *ctlr, + struct spi_oob_transfer *xfer); /* * These hooks are for drivers that use a generic implementation @@ -1137,6 +1152,90 @@ kfree(m); } +struct spi_oob_transfer { + struct spi_device *spi; + dma_addr_t dma_addr; + size_t aligned_frame_len; + void *io_buffer; /* 2 x aligned_frame_len */ + struct dma_async_tx_descriptor *txd; + struct dma_async_tx_descriptor *rxd; + u32 effective_speed_hz; + /* + * Caller-defined settings for the transfer. + */ + struct spi_oob_setup { + u32 frame_len; + u32 speed_hz; + u8 bits_per_word; + dma_async_tx_callback xfer_done; + } setup; +}; + +static inline off_t spi_get_oob_rxoff(struct spi_oob_transfer *xfer) +{ + /* RX area is in first half of the I/O buffer. */ + return 0; +} + +static inline off_t spi_get_oob_txoff(struct spi_oob_transfer *xfer) +{ + /* TX area is in second half of the I/O buffer. */ + return xfer->aligned_frame_len; +} + +static inline size_t spi_get_oob_iolen(struct spi_oob_transfer *xfer) +{ + return xfer->aligned_frame_len * 2; +} + +#ifdef CONFIG_SPI_OOB + +struct vm_area_struct; + +int spi_prepare_oob_transfer(struct spi_device *spi, + struct spi_oob_transfer *xfer); + +void spi_start_oob_transfer(struct spi_oob_transfer *xfer); + +int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer); + +void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer); + +int spi_mmap_oob_transfer(struct vm_area_struct *vma, + struct spi_oob_transfer *xfer); + +#else + +static inline +int spi_prepare_oob_transfer(struct spi_device *spi, + struct spi_oob_transfer *xfer) +{ + return -ENOTSUPP; +} + +static inline +void spi_start_oob_transfer(struct spi_oob_transfer *xfer) +{ } + +static inline +int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer) +{ + return -EIO; +} + +static inline +void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer) +{ } + +static inline +int spi_mmap_oob_transfer(struct vm_area_struct *vma, + struct spi_oob_transfer *xfer) +{ + return -ENXIO; +} + +#endif + extern int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup, struct spi_delay *hold, diff --git a/kernel/include/linux/spinlock.h b/kernel/include/linux/spinlock.h index 7989784..311854f 100644 --- a/kernel/include/linux/spinlock.h +++ b/kernel/include/linux/spinlock.h @@ -97,21 +97,27 @@ struct lock_class_key *key, short inner); # define raw_spin_lock_init(lock) \ + LOCK_ALTERNATIVES(lock, spin_lock_init, \ do { \ static struct lock_class_key __key; \ \ - __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ -} while (0) + __raw_spin_lock_init(__RAWLOCK(lock), #lock, &__key, LD_WAIT_SPIN); \ +} while (0)) #else # define raw_spin_lock_init(lock) \ - do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) + LOCK_ALTERNATIVES(lock, spin_lock_init, \ + do { *(__RAWLOCK(lock)) = __RAW_SPIN_LOCK_UNLOCKED(__RAWLOCK(lock)); } while (0)) #endif -#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) +#define raw_spin_is_locked(lock) \ + LOCK_ALTERNATIVES_RET(lock, spin_is_locked, \ + arch_spin_is_locked(&(__RAWLOCK(lock))->raw_lock)) #ifdef arch_spin_is_contended -#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) +#define raw_spin_is_contended(lock) \ + LOCK_ALTERNATIVES_RET(lock, spin_is_contended, \ + arch_spin_is_contended(&(__RAWLOCK(lock))->raw_lock)) #else #define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ @@ -220,13 +226,19 @@ * various methods are defined as nops in the case they are not * required. */ -#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) +#define raw_spin_trylock(lock) \ + __cond_lock(lock, \ + LOCK_ALTERNATIVES_RET(lock, \ + spin_trylock, _raw_spin_trylock(__RAWLOCK(lock)))) -#define raw_spin_lock(lock) _raw_spin_lock(lock) +#define raw_spin_lock(lock) \ + LOCK_ALTERNATIVES(lock, spin_lock, _raw_spin_lock(__RAWLOCK(lock))) #ifdef CONFIG_DEBUG_LOCK_ALLOC + # define raw_spin_lock_nested(lock, subclass) \ - _raw_spin_lock_nested(lock, subclass) + LOCK_ALTERNATIVES(lock, spin_lock_nested, \ + _raw_spin_lock_nested(__RAWLOCK(lock), subclass), subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -239,18 +251,20 @@ * warns about set-but-not-used variables when building with * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. */ -# define raw_spin_lock_nested(lock, subclass) \ - _raw_spin_lock(((void)(subclass), (lock))) +# define raw_spin_lock_nested(lock, subclass) \ + LOCK_ALTERNATIVES(lock, spin_lock_nested, \ + _raw_spin_lock(((void)(subclass), __RAWLOCK(lock))), subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -#define raw_spin_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - flags = _raw_spin_lock_irqsave(lock); \ - } while (0) +#define raw_spin_lock_irqsave(lock, flags) \ + LOCK_ALTERNATIVES(lock, spin_lock_irqsave, \ + do { \ + typecheck(unsigned long, flags); \ + flags = _raw_spin_lock_irqsave(__RAWLOCK(lock)); \ + } while (0), flags) #ifdef CONFIG_DEBUG_LOCK_ALLOC #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ @@ -268,45 +282,55 @@ #else -#define raw_spin_lock_irqsave(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _raw_spin_lock_irqsave(lock, flags); \ - } while (0) +#define raw_spin_lock_irqsave(lock, flags) \ + LOCK_ALTERNATIVES(lock, spin_lock_irqsave, \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_lock_irqsave(__RAWLOCK(lock), flags); \ + } while (0), flags) #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ raw_spin_lock_irqsave(lock, flags) #endif -#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) +#define raw_spin_lock_irq(lock) \ + LOCK_ALTERNATIVES(lock, spin_lock_irq, \ + _raw_spin_lock_irq(__RAWLOCK(lock))) #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) -#define raw_spin_unlock(lock) _raw_spin_unlock(lock) -#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) +#define raw_spin_unlock(lock) \ + LOCK_ALTERNATIVES(lock, spin_unlock, \ + _raw_spin_unlock(__RAWLOCK(lock))) +#define raw_spin_unlock_irq(lock) \ + LOCK_ALTERNATIVES(lock, spin_unlock_irq, \ + _raw_spin_unlock_irq(__RAWLOCK(lock))) -#define raw_spin_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ - _raw_spin_unlock_irqrestore(lock, flags); \ - } while (0) +#define raw_spin_unlock_irqrestore(lock, flags) \ + LOCK_ALTERNATIVES(lock, spin_unlock_irqrestore, \ + do { \ + typecheck(unsigned long, flags); \ + _raw_spin_unlock_irqrestore(__RAWLOCK(lock), flags); \ + } while (0), flags) #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) #define raw_spin_trylock_bh(lock) \ __cond_lock(lock, _raw_spin_trylock_bh(lock)) #define raw_spin_trylock_irq(lock) \ + LOCK_ALTERNATIVES_RET(lock, spin_trylock_irq, \ ({ \ local_irq_disable(); \ - raw_spin_trylock(lock) ? \ + raw_spin_trylock(__RAWLOCK(lock)) ? \ 1 : ({ local_irq_enable(); 0; }); \ -}) +})) #define raw_spin_trylock_irqsave(lock, flags) \ + LOCK_ALTERNATIVES_RET(lock, spin_trylock_irqsave, \ ({ \ local_irq_save(flags); \ - raw_spin_trylock(lock) ? \ + raw_spin_trylock(__RAWLOCK(lock)) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ -}) +}), flags) /* Include rwlock functions */ #include <linux/rwlock.h> @@ -320,12 +344,20 @@ # include <linux/spinlock_api_up.h> #endif +/* Pull the lock types specific to the IRQ pipeline. */ +#ifdef CONFIG_IRQ_PIPELINE +#include <linux/spinlock_pipeline.h> +#else +static inline void check_spinlock_context(void) { } +#endif + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) { + check_spinlock_context(); return &lock->rlock; } diff --git a/kernel/include/linux/spinlock_api_up.h b/kernel/include/linux/spinlock_api_up.h index d0d1888..6895779 100644 --- a/kernel/include/linux/spinlock_api_up.h +++ b/kernel/include/linux/spinlock_api_up.h @@ -30,20 +30,32 @@ #define __LOCK(lock) \ do { preempt_disable(); ___LOCK(lock); } while (0) +#define __HARD_LOCK(lock) \ + do { ___LOCK(lock); } while (0) + #define __LOCK_BH(lock) \ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) #define __LOCK_IRQ(lock) \ do { local_irq_disable(); __LOCK(lock); } while (0) +#define __HARD_LOCK_IRQ(lock) \ + do { hard_local_irq_disable(); __HARD_LOCK(lock); } while (0) + #define __LOCK_IRQSAVE(lock, flags) \ do { local_irq_save(flags); __LOCK(lock); } while (0) + +#define __HARD_LOCK_IRQSAVE(lock, flags) \ + do { flags = hard_local_irq_save(); __HARD_LOCK(lock); } while (0) #define ___UNLOCK(lock) \ do { __release(lock); (void)(lock); } while (0) #define __UNLOCK(lock) \ do { preempt_enable(); ___UNLOCK(lock); } while (0) + +#define __HARD_UNLOCK(lock) \ + do { ___UNLOCK(lock); } while (0) #define __UNLOCK_BH(lock) \ do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \ @@ -52,9 +64,15 @@ #define __UNLOCK_IRQ(lock) \ do { local_irq_enable(); __UNLOCK(lock); } while (0) +#define __HARD_UNLOCK_IRQ(lock) \ + do { hard_local_irq_enable(); __HARD_UNLOCK(lock); } while (0) + #define __UNLOCK_IRQRESTORE(lock, flags) \ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) +#define __HARD_UNLOCK_IRQRESTORE(lock, flags) \ + do { hard_local_irq_restore(flags); __HARD_UNLOCK(lock); } while (0) + #define _raw_spin_lock(lock) __LOCK(lock) #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) #define _raw_read_lock(lock) __LOCK(lock) diff --git a/kernel/include/linux/spinlock_pipeline.h b/kernel/include/linux/spinlock_pipeline.h new file mode 100644 index 0000000..1652735 --- /dev/null +++ b/kernel/include/linux/spinlock_pipeline.h @@ -0,0 +1,387 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#ifndef __LINUX_SPINLOCK_PIPELINE_H +#define __LINUX_SPINLOCK_PIPELINE_H + +#ifndef __LINUX_SPINLOCK_H +# error "Please don't include this file directly. Use spinlock.h." +#endif + +#include <dovetail/spinlock.h> + +#define hard_spin_lock_irqsave(__rlock, __flags) \ + do { \ + (__flags) = __hard_spin_lock_irqsave(__rlock); \ + } while (0) + +#define hard_spin_trylock_irqsave(__rlock, __flags) \ + ({ \ + int __locked; \ + (__flags) = __hard_spin_trylock_irqsave(__rlock, &__locked); \ + __locked; \ + }) + +#define hybrid_spin_lock_init(__rlock) hard_spin_lock_init(__rlock) + +/* + * CAUTION: We don't want the hand-coded irq-enable of + * do_raw_spin_lock_flags(), hard locked sections assume that + * interrupts are not re-enabled during lock-acquire. + */ +#define hard_lock_acquire(__rlock, __try, __ip) \ + do { \ + hard_spin_lock_prepare(__rlock); \ + if (irq_pipeline_debug_locking()) { \ + spin_acquire(&(__rlock)->dep_map, 0, __try, __ip); \ + LOCK_CONTENDED(__rlock, do_raw_spin_trylock, do_raw_spin_lock); \ + } else { \ + do_raw_spin_lock(__rlock); \ + } \ + } while (0) + +#define hard_lock_acquire_nested(__rlock, __subclass, __ip) \ + do { \ + hard_spin_lock_prepare(__rlock); \ + if (irq_pipeline_debug_locking()) { \ + spin_acquire(&(__rlock)->dep_map, __subclass, 0, __ip); \ + LOCK_CONTENDED(__rlock, do_raw_spin_trylock, do_raw_spin_lock); \ + } else { \ + do_raw_spin_lock(__rlock); \ + } \ + } while (0) + +#define hard_trylock_acquire(__rlock, __try, __ip) \ + do { \ + if (irq_pipeline_debug_locking()) \ + spin_acquire(&(__rlock)->dep_map, 0, __try, __ip); \ + } while (0) + +#define hard_lock_release(__rlock, __ip) \ + do { \ + if (irq_pipeline_debug_locking()) \ + spin_release(&(__rlock)->dep_map, __ip); \ + do_raw_spin_unlock(__rlock); \ + hard_spin_unlock_finish(__rlock); \ + } while (0) + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + +#ifdef CONFIG_DEBUG_SPINLOCK +/* + * Hard spinlocks are not checked for invalid wait context in-band + * wise (LD_WAIT_INV). We could be smarter with handling a specific + * wait type for them, so that we could detect hard_spin_lock -> + * {raw_}spin_lock for instance, but we already have + * check_inband_stage() calls all over the place in the latter API, so + * that kind of misusage would be detected regardless. + */ +#define hard_spin_lock_init(__lock) \ + do { \ + static struct lock_class_key __key; \ + __raw_spin_lock_init((raw_spinlock_t *)__lock, #__lock, &__key, LD_WAIT_INV); \ + } while (0) +#else +#define hard_spin_lock_init(__rlock) \ + do { *(__rlock) = __RAW_SPIN_LOCK_UNLOCKED(__rlock); } while (0) +#endif + +/* + * XXX: no preempt_enable/disable when hard locking. + */ + +static inline +void hard_spin_lock(struct raw_spinlock *rlock) +{ + hard_lock_acquire(rlock, 0, _THIS_IP_); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline +void hard_spin_lock_nested(struct raw_spinlock *rlock, int subclass) +{ + hard_lock_acquire_nested(rlock, subclass, _THIS_IP_); +} +#else +static inline +void hard_spin_lock_nested(struct raw_spinlock *rlock, int subclass) +{ + hard_spin_lock(rlock); +} +#endif + +static inline +void hard_spin_unlock(struct raw_spinlock *rlock) +{ + hard_lock_release(rlock, _THIS_IP_); +} + +static inline +void hard_spin_lock_irq(struct raw_spinlock *rlock) +{ + hard_local_irq_disable(); + hard_lock_acquire(rlock, 0, _THIS_IP_); +} + +static inline +void hard_spin_unlock_irq(struct raw_spinlock *rlock) +{ + hard_lock_release(rlock, _THIS_IP_); + hard_local_irq_enable(); +} + +static inline +void hard_spin_unlock_irqrestore(struct raw_spinlock *rlock, + unsigned long flags) +{ + hard_lock_release(rlock, _THIS_IP_); + hard_local_irq_restore(flags); +} + +static inline +unsigned long __hard_spin_lock_irqsave(struct raw_spinlock *rlock) +{ + unsigned long flags = hard_local_irq_save(); + + hard_lock_acquire(rlock, 0, _THIS_IP_); + + return flags; +} + +static inline +int hard_spin_trylock(struct raw_spinlock *rlock) +{ + hard_spin_trylock_prepare(rlock); + + if (do_raw_spin_trylock(rlock)) { + hard_trylock_acquire(rlock, 1, _THIS_IP_); + return 1; + } + + hard_spin_trylock_fail(rlock); + + return 0; +} + +static inline +unsigned long __hard_spin_trylock_irqsave(struct raw_spinlock *rlock, + int *locked) +{ + unsigned long flags = hard_local_irq_save(); + *locked = hard_spin_trylock(rlock); + return *locked ? flags : ({ hard_local_irq_restore(flags); flags; }); +} + +static inline +int hard_spin_trylock_irq(struct raw_spinlock *rlock) +{ + hard_local_irq_disable(); + return hard_spin_trylock(rlock) ? : ({ hard_local_irq_enable(); 0; }); +} + +static inline +int hard_spin_is_locked(struct raw_spinlock *rlock) +{ + return arch_spin_is_locked(&rlock->raw_lock); +} + +static inline +int hard_spin_is_contended(struct raw_spinlock *rlock) +{ +#ifdef CONFIG_GENERIC_LOCKBREAK + return rlock->break_lock; +#elif defined(arch_spin_is_contended) + return arch_spin_is_contended(&rlock->raw_lock); +#else + return 0; +#endif +} + +#else /* !SMP && !DEBUG_SPINLOCK */ + +#define hard_spin_lock_init(__rlock) do { (void)(__rlock); } while (0) +#define hard_spin_lock(__rlock) __HARD_LOCK(__rlock) +#define hard_spin_lock_nested(__rlock, __subclass) \ + do { __HARD_LOCK(__rlock); (void)(__subclass); } while (0) +#define hard_spin_unlock(__rlock) __HARD_UNLOCK(__rlock) +#define hard_spin_lock_irq(__rlock) __HARD_LOCK_IRQ(__rlock) +#define hard_spin_unlock_irq(__rlock) __HARD_UNLOCK_IRQ(__rlock) +#define hard_spin_unlock_irqrestore(__rlock, __flags) \ + __HARD_UNLOCK_IRQRESTORE(__rlock, __flags) +#define __hard_spin_lock_irqsave(__rlock) \ + ({ \ + unsigned long __flags; \ + __HARD_LOCK_IRQSAVE(__rlock, __flags); \ + __flags; \ + }) +#define __hard_spin_trylock_irqsave(__rlock, __locked) \ + ({ \ + unsigned long __flags; \ + __HARD_LOCK_IRQSAVE(__rlock, __flags); \ + *(__locked) = 1; \ + __flags; \ + }) +#define hard_spin_trylock(__rlock) ({ __HARD_LOCK(__rlock); 1; }) +#define hard_spin_trylock_irq(__rlock) ({ __HARD_LOCK_IRQ(__rlock); 1; }) +#define hard_spin_is_locked(__rlock) ((void)(__rlock), 0) +#define hard_spin_is_contended(__rlock) ((void)(__rlock), 0) +#endif /* !SMP && !DEBUG_SPINLOCK */ + +/* + * In the pipeline entry context, the regular preemption and root + * stall logic do not apply since we may actually have preempted any + * critical section of the kernel which is protected by regular + * locking (spin or stall), or we may even have preempted the + * out-of-band stage. Therefore, we just need to grab the raw spinlock + * underlying a hybrid spinlock to exclude other CPUs. + * + * NOTE: When entering the pipeline, IRQs are already hard disabled. + */ + +void __hybrid_spin_lock(struct raw_spinlock *rlock); +void __hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass); + +static inline void hybrid_spin_lock(struct raw_spinlock *rlock) +{ + if (in_pipeline()) + hard_lock_acquire(rlock, 0, _THIS_IP_); + else + __hybrid_spin_lock(rlock); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline +void hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass) +{ + if (in_pipeline()) + hard_lock_acquire_nested(rlock, subclass, _THIS_IP_); + else + __hybrid_spin_lock_nested(rlock, subclass); +} +#else +static inline +void hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass) +{ + hybrid_spin_lock(rlock); +} +#endif + +void __hybrid_spin_unlock(struct raw_spinlock *rlock); + +static inline void hybrid_spin_unlock(struct raw_spinlock *rlock) +{ + if (in_pipeline()) + hard_lock_release(rlock, _THIS_IP_); + else + __hybrid_spin_unlock(rlock); +} + +void __hybrid_spin_lock_irq(struct raw_spinlock *rlock); + +static inline void hybrid_spin_lock_irq(struct raw_spinlock *rlock) +{ + if (in_pipeline()) + hard_lock_acquire(rlock, 0, _THIS_IP_); + else + __hybrid_spin_lock_irq(rlock); +} + +void __hybrid_spin_unlock_irq(struct raw_spinlock *rlock); + +static inline void hybrid_spin_unlock_irq(struct raw_spinlock *rlock) +{ + if (in_pipeline()) + hard_lock_release(rlock, _THIS_IP_); + else + __hybrid_spin_unlock_irq(rlock); +} + +unsigned long __hybrid_spin_lock_irqsave(struct raw_spinlock *rlock); + +#define hybrid_spin_lock_irqsave(__rlock, __flags) \ + do { \ + if (in_pipeline()) { \ + hard_lock_acquire(__rlock, 0, _THIS_IP_); \ + (__flags) = hard_local_save_flags(); \ + } else \ + (__flags) = __hybrid_spin_lock_irqsave(__rlock); \ + } while (0) + +void __hybrid_spin_unlock_irqrestore(struct raw_spinlock *rlock, + unsigned long flags); + +static inline void hybrid_spin_unlock_irqrestore(struct raw_spinlock *rlock, + unsigned long flags) +{ + + if (in_pipeline()) + hard_lock_release(rlock, _THIS_IP_); + else + __hybrid_spin_unlock_irqrestore(rlock, flags); +} + +int __hybrid_spin_trylock(struct raw_spinlock *rlock); + +static inline int hybrid_spin_trylock(struct raw_spinlock *rlock) +{ + if (in_pipeline()) { + hard_spin_trylock_prepare(rlock); + if (do_raw_spin_trylock(rlock)) { + hard_trylock_acquire(rlock, 1, _THIS_IP_); + return 1; + } + hard_spin_trylock_fail(rlock); + return 0; + } + + return __hybrid_spin_trylock(rlock); +} + +int __hybrid_spin_trylock_irqsave(struct raw_spinlock *rlock, + unsigned long *flags); + +#define hybrid_spin_trylock_irqsave(__rlock, __flags) \ + ({ \ + int __ret = 1; \ + if (in_pipeline()) { \ + hard_spin_trylock_prepare(__rlock); \ + if (do_raw_spin_trylock(__rlock)) { \ + hard_trylock_acquire(__rlock, 1, _THIS_IP_); \ + (__flags) = hard_local_save_flags(); \ + } else { \ + hard_spin_trylock_fail(__rlock); \ + __ret = 0; \ + } \ + } else { \ + __ret = __hybrid_spin_trylock_irqsave(__rlock, &(__flags)); \ + } \ + __ret; \ + }) + +static inline int hybrid_spin_trylock_irq(struct raw_spinlock *rlock) +{ + unsigned long flags; + return hybrid_spin_trylock_irqsave(rlock, flags); +} + +static inline +int hybrid_spin_is_locked(struct raw_spinlock *rlock) +{ + return hard_spin_is_locked(rlock); +} + +static inline +int hybrid_spin_is_contended(struct raw_spinlock *rlock) +{ + return hard_spin_is_contended(rlock); +} + +#ifdef CONFIG_DEBUG_IRQ_PIPELINE +void check_spinlock_context(void); +#else +static inline void check_spinlock_context(void) { } +#endif + +#endif /* __LINUX_SPINLOCK_PIPELINE_H */ diff --git a/kernel/include/linux/spinlock_types.h b/kernel/include/linux/spinlock_types.h index b981caa..c385825 100644 --- a/kernel/include/linux/spinlock_types.h +++ b/kernel/include/linux/spinlock_types.h @@ -43,9 +43,15 @@ .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ } +# define HARD_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_INV, \ + } #else # define RAW_SPIN_DEP_MAP_INIT(lockname) # define SPIN_DEP_MAP_INIT(lockname) +# define HARD_SPIN_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_SPINLOCK @@ -96,6 +102,154 @@ #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#ifdef CONFIG_IRQ_PIPELINE + +void __bad_spinlock_type(void); + +#define __RAWLOCK(x) ((struct raw_spinlock *)(x)) + +#define LOCK_ALTERNATIVES(__lock, __base_op, __raw_form, __args...) \ + do { \ + if (__builtin_types_compatible_p(typeof(__lock), \ + raw_spinlock_t *)) \ + __raw_form; \ + else if (__builtin_types_compatible_p(typeof(__lock), \ + hard_spinlock_t *)) \ + hard_ ## __base_op(__RAWLOCK(__lock), ##__args); \ + else if (__builtin_types_compatible_p(typeof(__lock), \ + hybrid_spinlock_t *)) \ + hybrid_ ## __base_op(__RAWLOCK(__lock), ##__args); \ + else \ + __bad_spinlock_type(); \ + } while (0) + +#define LOCK_ALTERNATIVES_RET(__lock, __base_op, __raw_form, __args...) \ + ({ \ + long __ret = 0; \ + if (__builtin_types_compatible_p(typeof(__lock), \ + raw_spinlock_t *)) \ + __ret = __raw_form; \ + else if (__builtin_types_compatible_p(typeof(__lock), \ + hard_spinlock_t *)) \ + __ret = hard_ ## __base_op(__RAWLOCK(__lock), ##__args); \ + else if (__builtin_types_compatible_p(typeof(__lock), \ + hybrid_spinlock_t *)) \ + __ret = hybrid_ ## __base_op(__RAWLOCK(__lock), ##__args); \ + else \ + __bad_spinlock_type(); \ + __ret; \ + }) + +#define LOCKDEP_ALT_DEPMAP(__lock) \ + ({ \ + struct lockdep_map *__ret; \ + if (__builtin_types_compatible_p(typeof(&(__lock)->dep_map), \ + struct phony_lockdep_map *)) \ + __ret = &__RAWLOCK(__lock)->dep_map; \ + else \ + __ret = (struct lockdep_map *)(&(__lock)->dep_map); \ + __ret; \ + }) + +#define LOCKDEP_HARD_DEBUG(__lock, __nodebug, __debug) \ + do { \ + if (__builtin_types_compatible_p(typeof(__lock), \ + raw_spinlock_t *) || \ + irq_pipeline_debug_locking()) { \ + __debug; \ + } else { \ + __nodebug; \ + } \ + } while (0) + +#define LOCKDEP_HARD_DEBUG_RET(__lock, __nodebug, __debug) \ + ({ \ + typeof(__nodebug) __ret; \ + if (__builtin_types_compatible_p(typeof(__lock), \ + raw_spinlock_t *) || \ + irq_pipeline_debug_locking()) { \ + __ret = (__debug); \ + } else { \ + __ret = (__nodebug); \ + } \ + __ret; \ + }) + +#define __HARD_SPIN_LOCK_INITIALIZER(x) { \ + .rlock = { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(x) \ + HARD_SPIN_DEP_MAP_INIT(x) \ + } \ + } + +#define __HARD_SPIN_LOCK_UNLOCKED(x) \ + (hard_spinlock_t) __HARD_SPIN_LOCK_INITIALIZER(x) + +#define DEFINE_HARD_SPINLOCK(x) hard_spinlock_t x = __HARD_SPIN_LOCK_UNLOCKED(x) + +#define DECLARE_HARD_SPINLOCK(x) hard_spinlock_t x + +/* + * The presence of a phony depmap is tested by LOCKDEP_ALT_DEPMAP() to + * locate the real depmap without enumerating every spinlock type + * which may contain one. + */ +struct phony_lockdep_map { }; + +typedef struct hard_spinlock { + /* XXX: offset_of(struct hard_spinlock, rlock) == 0 */ + struct raw_spinlock rlock; + struct phony_lockdep_map dep_map; +} hard_spinlock_t; + +#define DEFINE_MUTABLE_SPINLOCK(x) hybrid_spinlock_t x = { \ + .rlock = __RAW_SPIN_LOCK_UNLOCKED(x), \ + } + +#define DECLARE_MUTABLE_SPINLOCK(x) hybrid_spinlock_t x + +typedef struct hybrid_spinlock { + /* XXX: offset_of(struct hybrid_spinlock, rlock) == 0 */ + struct raw_spinlock rlock; + unsigned long hwflags; + struct phony_lockdep_map dep_map; +} hybrid_spinlock_t; + +#else + +typedef raw_spinlock_t hard_spinlock_t; + +typedef raw_spinlock_t hybrid_spinlock_t; + +#define LOCK_ALTERNATIVES(__lock, __base_op, __raw_form, __args...) \ + __raw_form + +#define LOCK_ALTERNATIVES_RET(__lock, __base_op, __raw_form, __args...) \ + __raw_form + +#define LOCKDEP_ALT_DEPMAP(__lock) (&(__lock)->dep_map) + +#define LOCKDEP_HARD_DEBUG(__lock, __nondebug, __debug) do { __debug; } while (0) + +#define LOCKDEP_HARD_DEBUG_RET(__lock, __nondebug, __debug) ({ __debug; }) + +#define DEFINE_HARD_SPINLOCK(x) DEFINE_RAW_SPINLOCK(x) + +#define DECLARE_HARD_SPINLOCK(x) raw_spinlock_t x + +#define DEFINE_MUTABLE_SPINLOCK(x) DEFINE_RAW_SPINLOCK(x) + +#define DECLARE_MUTABLE_SPINLOCK(x) raw_spinlock_t x + +#define __RAWLOCK(x) (x) + +#define __HARD_SPIN_LOCK_UNLOCKED(__lock) __RAW_SPIN_LOCK_UNLOCKED(__lock) + +#define __HARD_SPIN_LOCK_INITIALIZER(__lock) __RAW_SPIN_LOCK_UNLOCKED(__lock) + +#endif /* CONFIG_IRQ_PIPELINE */ + #include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/kernel/include/linux/stop_machine.h b/kernel/include/linux/stop_machine.h index ddafb3c..82c56df 100644 --- a/kernel/include/linux/stop_machine.h +++ b/kernel/include/linux/stop_machine.h @@ -6,6 +6,7 @@ #include <linux/cpumask.h> #include <linux/smp.h> #include <linux/list.h> +#include <linux/interrupt.h> /* * stop_cpu[s]() is simplistic per-cpu maximum priority cpu @@ -143,7 +144,9 @@ unsigned long flags; int ret; local_irq_save(flags); + hard_irq_disable(); ret = fn(data); + hard_irq_enable(); local_irq_restore(flags); return ret; } diff --git a/kernel/include/linux/thread_info.h b/kernel/include/linux/thread_info.h index f3040b0..83187b9 100644 --- a/kernel/include/linux/thread_info.h +++ b/kernel/include/linux/thread_info.h @@ -170,6 +170,72 @@ static inline void arch_setup_new_exec(void) { } #endif +#ifdef ti_local_flags +/* + * If the arch defines a set of per-thread synchronous flags, provide + * generic accessors to them. + */ +static __always_inline +void set_ti_local_flags(struct thread_info *ti, unsigned int mask) +{ + ti_local_flags(ti) |= mask; +} + +static __always_inline void set_thread_local_flags(unsigned int mask) +{ + set_ti_local_flags(current_thread_info(), mask); +} + +static __always_inline +int test_and_set_ti_local_flags(struct thread_info *ti, unsigned int mask) +{ + int old = ti_local_flags(ti) & mask; + ti_local_flags(ti) |= mask; + return old != 0; +} + +static __always_inline int test_and_set_thread_local_flags(unsigned int mask) +{ + return test_and_set_ti_local_flags(current_thread_info(), mask); +} + +static __always_inline +void clear_ti_local_flags(struct thread_info *ti, unsigned int mask) +{ + ti_local_flags(ti) &= ~mask; +} + +static __always_inline +int test_and_clear_ti_local_flags(struct thread_info *ti, unsigned int mask) +{ + int old = ti_local_flags(ti) & mask; + ti_local_flags(ti) &= ~mask; + return old != 0; +} + +static __always_inline int test_and_clear_thread_local_flags(unsigned int mask) +{ + return test_and_clear_ti_local_flags(current_thread_info(), mask); +} + +static __always_inline void clear_thread_local_flags(unsigned int mask) +{ + clear_ti_local_flags(current_thread_info(), mask); +} + +static __always_inline +bool test_ti_local_flags(struct thread_info *ti, unsigned int mask) +{ + return (ti_local_flags(ti) & mask) != 0; +} + +static __always_inline bool test_thread_local_flags(unsigned int mask) +{ + return test_ti_local_flags(current_thread_info(), mask); +} + +#endif /* ti_local_flags */ + #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */ diff --git a/kernel/include/linux/tick.h b/kernel/include/linux/tick.h index 7340613..b9896c0 100644 --- a/kernel/include/linux/tick.h +++ b/kernel/include/linux/tick.h @@ -20,6 +20,14 @@ extern void tick_resume_local(void); extern void tick_handover_do_timer(void); extern void tick_cleanup_dead_cpu(int cpu); + +#ifdef CONFIG_IRQ_PIPELINE +int tick_install_proxy(void (*setup_proxy)(struct clock_proxy_device *dev), + const struct cpumask *cpumask); +void tick_uninstall_proxy(const struct cpumask *cpumask); +void tick_notify_proxy(void); +#endif + #else /* CONFIG_GENERIC_CLOCKEVENTS */ static inline void tick_init(void) { } static inline void tick_suspend_local(void) { } diff --git a/kernel/include/linux/tracepoint.h b/kernel/include/linux/tracepoint.h index c51a002..c8629cd 100644 --- a/kernel/include/linux/tracepoint.h +++ b/kernel/include/linux/tracepoint.h @@ -175,6 +175,10 @@ * The reason for this is to handle the "void" prototype. If a tracepoint * has a "void" prototype, then it is invalid to declare a function * as "(void *, void)". + * + * IRQ pipeline: we may not depend on RCU for data which may be + * manipulated from the out-of-band stage, so rcuidle has to be false + * if running_oob(). */ #define __DO_TRACE(name, proto, args, cond, rcuidle) \ do { \ @@ -223,7 +227,7 @@ __DO_TRACE(name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ - TP_CONDITION(cond), 1); \ + TP_CONDITION(cond), running_inband()); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) diff --git a/kernel/include/linux/vmalloc.h b/kernel/include/linux/vmalloc.h index 167a953..761ed17 100644 --- a/kernel/include/linux/vmalloc.h +++ b/kernel/include/linux/vmalloc.h @@ -244,6 +244,7 @@ int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); +void arch_advertise_page_mapping(unsigned long start, unsigned long end); /* Allow disabling lazy TLB flushing */ extern bool lazy_vunmap_enable; diff --git a/kernel/include/linux/wakeup_reason.h b/kernel/include/linux/wakeup_reason.h index 54f5caa..2fe0fd0 100644 --- a/kernel/include/linux/wakeup_reason.h +++ b/kernel/include/linux/wakeup_reason.h @@ -20,7 +20,7 @@ #define MAX_SUSPEND_ABORT_LEN 256 -#ifdef CONFIG_SUSPEND +#if IS_ENABLED(CONFIG_SUSPEND) && !IS_ENABLED(CONFIG_DOVETAIL) void log_irq_wakeup_reason(int irq); void log_threaded_irq_wakeup_reason(int irq, int parent_irq); void log_suspend_abort_reason(const char *fmt, ...); diff --git a/kernel/include/linux/xenomai/wrappers.h b/kernel/include/linux/xenomai/wrappers.h new file mode 120000 index 0000000..3cdb0a9 --- /dev/null +++ b/kernel/include/linux/xenomai/wrappers.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h \ No newline at end of file diff --git a/kernel/include/net/netoob.h b/kernel/include/net/netoob.h new file mode 100644 index 0000000..907376a --- /dev/null +++ b/kernel/include/net/netoob.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NET_OOBNET_H +#define _NET_OOBNET_H + +#include <dovetail/netdevice.h> + +/* Device supports direct out-of-band operations (RX & TX) */ +#define IFF_OOB_CAPABLE BIT(0) +/* Device is an out-of-band port */ +#define IFF_OOB_PORT BIT(1) + +struct oob_netdev_context { + int flags; + struct oob_netdev_state dev_state; +}; + +#endif /* !_NET_OOBNET_H */ diff --git a/kernel/include/net/sock.h b/kernel/include/net/sock.h index c604052..ea5f2fa 100644 --- a/kernel/include/net/sock.h +++ b/kernel/include/net/sock.h @@ -540,6 +540,9 @@ ANDROID_KABI_RESERVE(8); ANDROID_OEM_DATA(1); +#ifdef CONFIG_NET_OOB + void *oob_data; +#endif }; enum sk_pacing { diff --git a/kernel/include/trace/events/cobalt-core.h b/kernel/include/trace/events/cobalt-core.h new file mode 120000 index 0000000..735e8e8 --- /dev/null +++ b/kernel/include/trace/events/cobalt-core.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h \ No newline at end of file diff --git a/kernel/include/trace/events/cobalt-posix.h b/kernel/include/trace/events/cobalt-posix.h new file mode 120000 index 0000000..9dc0fe2 --- /dev/null +++ b/kernel/include/trace/events/cobalt-posix.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h \ No newline at end of file diff --git a/kernel/include/trace/events/cobalt-rtdm.h b/kernel/include/trace/events/cobalt-rtdm.h new file mode 120000 index 0000000..79c5693 --- /dev/null +++ b/kernel/include/trace/events/cobalt-rtdm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h \ No newline at end of file diff --git a/kernel/include/trace/events/irq.h b/kernel/include/trace/events/irq.h index bb70f46..4642f45 100644 --- a/kernel/include/trace/events/irq.h +++ b/kernel/include/trace/events/irq.h @@ -100,6 +100,48 @@ __entry->irq, __entry->ret ? "handled" : "unhandled") ); +/** + * irq_pipeline_entry - called when an external irq enters the pipeline + * @irq: irq number + */ +TRACE_EVENT(irq_pipeline_entry, + + TP_PROTO(int irq), + + TP_ARGS(irq), + + TP_STRUCT__entry( + __field( int, irq ) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("irq=%d", __entry->irq) +); + +/** + * irq_pipeline_exit - called when an external irq leaves the pipeline + * @irq: irq number + */ +TRACE_EVENT(irq_pipeline_exit, + + TP_PROTO(int irq), + + TP_ARGS(irq), + + TP_STRUCT__entry( + __field( int, irq ) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("irq=%d", __entry->irq) +); + DECLARE_EVENT_CLASS(softirq, TP_PROTO(unsigned int vec_nr), diff --git a/kernel/include/uapi/asm-generic/dovetail.h b/kernel/include/uapi/asm-generic/dovetail.h new file mode 100644 index 0000000..795aa38 --- /dev/null +++ b/kernel/include/uapi/asm-generic/dovetail.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_GENERIC_DOVETAIL_H +#define __ASM_GENERIC_DOVETAIL_H + +#define __OOB_SYSCALL_BIT 0x10000000 + +#endif /* !__ASM_GENERIC_DOVETAIL_H */ diff --git a/kernel/include/uapi/asm-generic/fcntl.h b/kernel/include/uapi/asm-generic/fcntl.h index 9dc0bf0..11415c6 100644 --- a/kernel/include/uapi/asm-generic/fcntl.h +++ b/kernel/include/uapi/asm-generic/fcntl.h @@ -89,6 +89,15 @@ #define __O_TMPFILE 020000000 #endif +/* + * Tells the open call that out-of-band operations should be enabled + * for the file (if supported). Can also be passed along to socket(2) + * via the type argument as SOCK_OOB. + */ +#ifndef O_OOB +#define O_OOB 010000000000 +#endif + /* a horrid kludge trying to make sure that this will fail on old kernels */ #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY) #define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT) diff --git a/kernel/include/uapi/linux/clocksource.h b/kernel/include/uapi/linux/clocksource.h new file mode 100644 index 0000000..a0a1c27 --- /dev/null +++ b/kernel/include/uapi/linux/clocksource.h @@ -0,0 +1,33 @@ +/* + * Definitions for user-mappable clock sources. + * + * Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + */ +#ifndef _UAPI_LINUX_CLOCKSOURCE_H +#define _UAPI_LINUX_CLOCKSOURCE_H + +enum clksrc_user_mmio_type { + CLKSRC_MMIO_L_UP, + CLKSRC_MMIO_L_DOWN, + CLKSRC_MMIO_W_UP, + CLKSRC_MMIO_W_DOWN, + CLKSRC_DMMIO_L_UP, + CLKSRC_DMMIO_W_UP, + + CLKSRC_MMIO_TYPE_NR, +}; + +struct clksrc_user_mmio_info { + enum clksrc_user_mmio_type type; + void *reg_lower; + unsigned int mask_lower; + unsigned int bits_lower; + void *reg_upper; + unsigned int mask_upper; +}; + +#define CLKSRC_USER_MMIO_MAX 16 + +#define CLKSRC_USER_MMIO_MAP _IOWR(0xC1, 0, struct clksrc_user_mmio_info) + +#endif /* _UAPI_LINUX_CLOCKSOURCE_H */ diff --git a/kernel/include/vdso/datapage.h b/kernel/include/vdso/datapage.h index 73eb622..e2cd1e8 100644 --- a/kernel/include/vdso/datapage.h +++ b/kernel/include/vdso/datapage.h @@ -106,9 +106,34 @@ u32 hrtimer_res; u32 __unused; +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + u32 cs_type_seq; + char cs_mmdev[16]; +#endif + struct arch_vdso_data arch_data; }; +#if defined(CONFIG_GENERIC_CLOCKSOURCE_VDSO) && !defined(ENABLE_COMPAT_VDSO) + +#include <linux/clocksource.h> + +struct clksrc_info; + +typedef u64 vdso_read_cycles_t(const struct clksrc_info *info); + +struct clksrc_info { + vdso_read_cycles_t *read_cycles; + struct clksrc_user_mmio_info mmio; +}; + +struct vdso_priv { + u32 current_cs_type_seq; + struct clksrc_info clksrc_info[CLOCKSOURCE_VDSO_MMIO + CLKSRC_USER_MMIO_MAX]; +}; + +#endif /* !CONFIG_GENERIC_CLOCKSOURCE_VDSO */ + /* * We use the hidden visibility to prevent the compiler from generating a GOT * relocation. Not only is going through a GOT useless (the entry couldn't and diff --git a/kernel/include/xenomai/cobalt/kernel/ancillaries.h b/kernel/include/xenomai/cobalt/kernel/ancillaries.h new file mode 120000 index 0000000..b1779f7 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/ancillaries.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/arith.h b/kernel/include/xenomai/cobalt/kernel/arith.h new file mode 120000 index 0000000..02a5eaa --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/arith.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/assert.h b/kernel/include/xenomai/cobalt/kernel/assert.h new file mode 120000 index 0000000..4764f28 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/assert.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/bufd.h b/kernel/include/xenomai/cobalt/kernel/bufd.h new file mode 120000 index 0000000..dea345e --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/bufd.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/clock.h b/kernel/include/xenomai/cobalt/kernel/clock.h new file mode 120000 index 0000000..bb75117 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/clock.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/compat.h b/kernel/include/xenomai/cobalt/kernel/compat.h new file mode 120000 index 0000000..806f38c --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/compat.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/heap.h b/kernel/include/xenomai/cobalt/kernel/heap.h new file mode 120000 index 0000000..bf74265 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/heap.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/init.h b/kernel/include/xenomai/cobalt/kernel/init.h new file mode 120000 index 0000000..769d0be --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/init.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/intr.h b/kernel/include/xenomai/cobalt/kernel/intr.h new file mode 120000 index 0000000..82c1f25 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/intr.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/list.h b/kernel/include/xenomai/cobalt/kernel/list.h new file mode 120000 index 0000000..811ee1d --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/list.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/lock.h b/kernel/include/xenomai/cobalt/kernel/lock.h new file mode 120000 index 0000000..8513e93 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/lock.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/map.h b/kernel/include/xenomai/cobalt/kernel/map.h new file mode 120000 index 0000000..114ea04 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/map.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/pipe.h b/kernel/include/xenomai/cobalt/kernel/pipe.h new file mode 120000 index 0000000..dda8199 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/pipe.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/ppd.h b/kernel/include/xenomai/cobalt/kernel/ppd.h new file mode 120000 index 0000000..7afa5ef --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/ppd.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/registry.h b/kernel/include/xenomai/cobalt/kernel/registry.h new file mode 120000 index 0000000..e92a257 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/registry.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched-idle.h b/kernel/include/xenomai/cobalt/kernel/sched-idle.h new file mode 120000 index 0000000..c882e34 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched-idle.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched-quota.h b/kernel/include/xenomai/cobalt/kernel/sched-quota.h new file mode 120000 index 0000000..96dd8fa --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched-quota.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched-rt.h b/kernel/include/xenomai/cobalt/kernel/sched-rt.h new file mode 120000 index 0000000..c70900d --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched-rt.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched-sporadic.h b/kernel/include/xenomai/cobalt/kernel/sched-sporadic.h new file mode 120000 index 0000000..c4c1024 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched-sporadic.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched-tp.h b/kernel/include/xenomai/cobalt/kernel/sched-tp.h new file mode 120000 index 0000000..3ad87af --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched-tp.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched-weak.h b/kernel/include/xenomai/cobalt/kernel/sched-weak.h new file mode 120000 index 0000000..bba38c1 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched-weak.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/sched.h b/kernel/include/xenomai/cobalt/kernel/sched.h new file mode 120000 index 0000000..1f6c51f --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/sched.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/schedparam.h b/kernel/include/xenomai/cobalt/kernel/schedparam.h new file mode 120000 index 0000000..bf1c35a --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/schedparam.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/schedqueue.h b/kernel/include/xenomai/cobalt/kernel/schedqueue.h new file mode 120000 index 0000000..8c2abcb --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/schedqueue.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/select.h b/kernel/include/xenomai/cobalt/kernel/select.h new file mode 120000 index 0000000..37337d9 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/select.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/stat.h b/kernel/include/xenomai/cobalt/kernel/stat.h new file mode 120000 index 0000000..21ad687 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/stat.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/synch.h b/kernel/include/xenomai/cobalt/kernel/synch.h new file mode 120000 index 0000000..df9eaf7 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/synch.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/thread.h b/kernel/include/xenomai/cobalt/kernel/thread.h new file mode 120000 index 0000000..35fb1f0 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/time.h b/kernel/include/xenomai/cobalt/kernel/time.h new file mode 120000 index 0000000..d85138d --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/time.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/timer.h b/kernel/include/xenomai/cobalt/kernel/timer.h new file mode 120000 index 0000000..b32c1d2 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/timer.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/trace.h b/kernel/include/xenomai/cobalt/kernel/trace.h new file mode 120000 index 0000000..e27bfc3 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/trace.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/tree.h b/kernel/include/xenomai/cobalt/kernel/tree.h new file mode 120000 index 0000000..bad47ea --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/tree.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/vdso.h b/kernel/include/xenomai/cobalt/kernel/vdso.h new file mode 120000 index 0000000..7cec828 --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/vdso.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/kernel/vfile.h b/kernel/include/xenomai/cobalt/kernel/vfile.h new file mode 120000 index 0000000..63e86ab --- /dev/null +++ b/kernel/include/xenomai/cobalt/kernel/vfile.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h b/kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h new file mode 120000 index 0000000..c44382c --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/asm-generic/features.h b/kernel/include/xenomai/cobalt/uapi/asm-generic/features.h new file mode 120000 index 0000000..b2baff9 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/asm-generic/features.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h b/kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h new file mode 120000 index 0000000..54b1276 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/cond.h b/kernel/include/xenomai/cobalt/uapi/cond.h new file mode 120000 index 0000000..52c870b --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/cond.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/corectl.h b/kernel/include/xenomai/cobalt/uapi/corectl.h new file mode 120000 index 0000000..b6747f6 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/corectl.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/event.h b/kernel/include/xenomai/cobalt/uapi/event.h new file mode 120000 index 0000000..dae845f --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/event.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/heap.h b/kernel/include/xenomai/cobalt/uapi/kernel/heap.h new file mode 120000 index 0000000..4b7b7e7 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/heap.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/limits.h b/kernel/include/xenomai/cobalt/uapi/kernel/limits.h new file mode 120000 index 0000000..b2d6b1a --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/limits.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/pipe.h b/kernel/include/xenomai/cobalt/uapi/kernel/pipe.h new file mode 120000 index 0000000..29e61ce --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/pipe.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/synch.h b/kernel/include/xenomai/cobalt/uapi/kernel/synch.h new file mode 120000 index 0000000..96af408 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/synch.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/thread.h b/kernel/include/xenomai/cobalt/uapi/kernel/thread.h new file mode 120000 index 0000000..41c6343 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/trace.h b/kernel/include/xenomai/cobalt/uapi/kernel/trace.h new file mode 120000 index 0000000..f18e012 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/trace.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/types.h b/kernel/include/xenomai/cobalt/uapi/kernel/types.h new file mode 120000 index 0000000..740cd9e --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/types.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/urw.h b/kernel/include/xenomai/cobalt/uapi/kernel/urw.h new file mode 120000 index 0000000..88675ba --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/urw.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/vdso.h b/kernel/include/xenomai/cobalt/uapi/kernel/vdso.h new file mode 120000 index 0000000..3679dc4 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/kernel/vdso.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/monitor.h b/kernel/include/xenomai/cobalt/uapi/monitor.h new file mode 120000 index 0000000..1fef3c2 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/monitor.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/mutex.h b/kernel/include/xenomai/cobalt/uapi/mutex.h new file mode 120000 index 0000000..aaea1b2 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/mutex.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/sched.h b/kernel/include/xenomai/cobalt/uapi/sched.h new file mode 120000 index 0000000..4b96766 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/sched.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/sem.h b/kernel/include/xenomai/cobalt/uapi/sem.h new file mode 120000 index 0000000..2284303 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/sem.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/signal.h b/kernel/include/xenomai/cobalt/uapi/signal.h new file mode 120000 index 0000000..8330646 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/signal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/syscall.h b/kernel/include/xenomai/cobalt/uapi/syscall.h new file mode 120000 index 0000000..f3c6f55 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/thread.h b/kernel/include/xenomai/cobalt/uapi/thread.h new file mode 120000 index 0000000..23043e0 --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h \ No newline at end of file diff --git a/kernel/include/xenomai/cobalt/uapi/time.h b/kernel/include/xenomai/cobalt/uapi/time.h new file mode 120000 index 0000000..60b0c9c --- /dev/null +++ b/kernel/include/xenomai/cobalt/uapi/time.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h \ No newline at end of file diff --git a/kernel/include/xenomai/linux/stdarg.h b/kernel/include/xenomai/linux/stdarg.h new file mode 120000 index 0000000..dc30f53 --- /dev/null +++ b/kernel/include/xenomai/linux/stdarg.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/clock.h b/kernel/include/xenomai/pipeline/clock.h new file mode 120000 index 0000000..0605361 --- /dev/null +++ b/kernel/include/xenomai/pipeline/clock.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/inband_work.h b/kernel/include/xenomai/pipeline/inband_work.h new file mode 120000 index 0000000..8c96199 --- /dev/null +++ b/kernel/include/xenomai/pipeline/inband_work.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/irq.h b/kernel/include/xenomai/pipeline/irq.h new file mode 120000 index 0000000..db68bdf --- /dev/null +++ b/kernel/include/xenomai/pipeline/irq.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/kevents.h b/kernel/include/xenomai/pipeline/kevents.h new file mode 120000 index 0000000..c441eb1 --- /dev/null +++ b/kernel/include/xenomai/pipeline/kevents.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/lock.h b/kernel/include/xenomai/pipeline/lock.h new file mode 120000 index 0000000..f0a97be --- /dev/null +++ b/kernel/include/xenomai/pipeline/lock.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/machine.h b/kernel/include/xenomai/pipeline/machine.h new file mode 120000 index 0000000..2bf5b78 --- /dev/null +++ b/kernel/include/xenomai/pipeline/machine.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/pipeline.h b/kernel/include/xenomai/pipeline/pipeline.h new file mode 120000 index 0000000..e050cd6 --- /dev/null +++ b/kernel/include/xenomai/pipeline/pipeline.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/sched.h b/kernel/include/xenomai/pipeline/sched.h new file mode 120000 index 0000000..fcb384a --- /dev/null +++ b/kernel/include/xenomai/pipeline/sched.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/sirq.h b/kernel/include/xenomai/pipeline/sirq.h new file mode 120000 index 0000000..0335464 --- /dev/null +++ b/kernel/include/xenomai/pipeline/sirq.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/thread.h b/kernel/include/xenomai/pipeline/thread.h new file mode 120000 index 0000000..bdc0546 --- /dev/null +++ b/kernel/include/xenomai/pipeline/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/tick.h b/kernel/include/xenomai/pipeline/tick.h new file mode 120000 index 0000000..5c20516 --- /dev/null +++ b/kernel/include/xenomai/pipeline/tick.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/trace.h b/kernel/include/xenomai/pipeline/trace.h new file mode 120000 index 0000000..74b9e80 --- /dev/null +++ b/kernel/include/xenomai/pipeline/trace.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/vdso_fallback.h b/kernel/include/xenomai/pipeline/vdso_fallback.h new file mode 120000 index 0000000..4ca7a0d --- /dev/null +++ b/kernel/include/xenomai/pipeline/vdso_fallback.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h \ No newline at end of file diff --git a/kernel/include/xenomai/pipeline/wrappers.h b/kernel/include/xenomai/pipeline/wrappers.h new file mode 120000 index 0000000..7f1efd0 --- /dev/null +++ b/kernel/include/xenomai/pipeline/wrappers.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/buffer.h b/kernel/include/xenomai/rtdm/analogy/buffer.h new file mode 120000 index 0000000..c75cce3 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/buffer.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/channel_range.h b/kernel/include/xenomai/rtdm/analogy/channel_range.h new file mode 120000 index 0000000..bec9757 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/channel_range.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/command.h b/kernel/include/xenomai/rtdm/analogy/command.h new file mode 120000 index 0000000..6f997c9 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/command.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/context.h b/kernel/include/xenomai/rtdm/analogy/context.h new file mode 120000 index 0000000..fd74ca2 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/context.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/device.h b/kernel/include/xenomai/rtdm/analogy/device.h new file mode 120000 index 0000000..bf80883 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/device.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/driver.h b/kernel/include/xenomai/rtdm/analogy/driver.h new file mode 120000 index 0000000..aa75656 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/driver.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/instruction.h b/kernel/include/xenomai/rtdm/analogy/instruction.h new file mode 120000 index 0000000..e82a550 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/instruction.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h b/kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h new file mode 120000 index 0000000..a0e1b96 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/subdevice.h b/kernel/include/xenomai/rtdm/analogy/subdevice.h new file mode 120000 index 0000000..a11623e --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/subdevice.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/analogy/transfer.h b/kernel/include/xenomai/rtdm/analogy/transfer.h new file mode 120000 index 0000000..df69c91 --- /dev/null +++ b/kernel/include/xenomai/rtdm/analogy/transfer.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/autotune.h b/kernel/include/xenomai/rtdm/autotune.h new file mode 120000 index 0000000..ba8efcb --- /dev/null +++ b/kernel/include/xenomai/rtdm/autotune.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/can.h b/kernel/include/xenomai/rtdm/can.h new file mode 120000 index 0000000..8195f8a --- /dev/null +++ b/kernel/include/xenomai/rtdm/can.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/cobalt.h b/kernel/include/xenomai/rtdm/cobalt.h new file mode 120000 index 0000000..b7bbe77 --- /dev/null +++ b/kernel/include/xenomai/rtdm/cobalt.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/compat.h b/kernel/include/xenomai/rtdm/compat.h new file mode 120000 index 0000000..23cff61 --- /dev/null +++ b/kernel/include/xenomai/rtdm/compat.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/driver.h b/kernel/include/xenomai/rtdm/driver.h new file mode 120000 index 0000000..bd8e46a --- /dev/null +++ b/kernel/include/xenomai/rtdm/driver.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/fd.h b/kernel/include/xenomai/rtdm/fd.h new file mode 120000 index 0000000..804d905 --- /dev/null +++ b/kernel/include/xenomai/rtdm/fd.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/gpio.h b/kernel/include/xenomai/rtdm/gpio.h new file mode 120000 index 0000000..c808633 --- /dev/null +++ b/kernel/include/xenomai/rtdm/gpio.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/gpiopwm.h b/kernel/include/xenomai/rtdm/gpiopwm.h new file mode 120000 index 0000000..967bde3 --- /dev/null +++ b/kernel/include/xenomai/rtdm/gpiopwm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/ipc.h b/kernel/include/xenomai/rtdm/ipc.h new file mode 120000 index 0000000..4c62921 --- /dev/null +++ b/kernel/include/xenomai/rtdm/ipc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/net.h b/kernel/include/xenomai/rtdm/net.h new file mode 120000 index 0000000..7eaab5c --- /dev/null +++ b/kernel/include/xenomai/rtdm/net.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/rtdm.h b/kernel/include/xenomai/rtdm/rtdm.h new file mode 120000 index 0000000..097a4e3 --- /dev/null +++ b/kernel/include/xenomai/rtdm/rtdm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/serial.h b/kernel/include/xenomai/rtdm/serial.h new file mode 120000 index 0000000..9552598 --- /dev/null +++ b/kernel/include/xenomai/rtdm/serial.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/testing.h b/kernel/include/xenomai/rtdm/testing.h new file mode 120000 index 0000000..2b183c3 --- /dev/null +++ b/kernel/include/xenomai/rtdm/testing.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/analogy.h b/kernel/include/xenomai/rtdm/uapi/analogy.h new file mode 120000 index 0000000..827e76a --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/analogy.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/autotune.h b/kernel/include/xenomai/rtdm/uapi/autotune.h new file mode 120000 index 0000000..a5ac10a --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/autotune.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/can.h b/kernel/include/xenomai/rtdm/uapi/can.h new file mode 120000 index 0000000..af8f0cc --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/can.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/gpio.h b/kernel/include/xenomai/rtdm/uapi/gpio.h new file mode 120000 index 0000000..2526036 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/gpio.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/gpiopwm.h b/kernel/include/xenomai/rtdm/uapi/gpiopwm.h new file mode 120000 index 0000000..cbf4b3c --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/gpiopwm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/ipc.h b/kernel/include/xenomai/rtdm/uapi/ipc.h new file mode 120000 index 0000000..dcc43c0 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/ipc.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/net.h b/kernel/include/xenomai/rtdm/uapi/net.h new file mode 120000 index 0000000..155c861 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/net.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/rtdm.h b/kernel/include/xenomai/rtdm/uapi/rtdm.h new file mode 120000 index 0000000..d6262f9 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/rtdm.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/serial.h b/kernel/include/xenomai/rtdm/uapi/serial.h new file mode 120000 index 0000000..bd64996 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/serial.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/spi.h b/kernel/include/xenomai/rtdm/uapi/spi.h new file mode 120000 index 0000000..dfa76d6 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/spi.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/testing.h b/kernel/include/xenomai/rtdm/uapi/testing.h new file mode 120000 index 0000000..eb092d8 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/testing.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/uapi/udd.h b/kernel/include/xenomai/rtdm/uapi/udd.h new file mode 120000 index 0000000..0cba891 --- /dev/null +++ b/kernel/include/xenomai/rtdm/uapi/udd.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h \ No newline at end of file diff --git a/kernel/include/xenomai/rtdm/udd.h b/kernel/include/xenomai/rtdm/udd.h new file mode 120000 index 0000000..f822979 --- /dev/null +++ b/kernel/include/xenomai/rtdm/udd.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h \ No newline at end of file diff --git a/kernel/include/xenomai/version.h b/kernel/include/xenomai/version.h new file mode 120000 index 0000000..74a7bf3 --- /dev/null +++ b/kernel/include/xenomai/version.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/xenomai/version.h \ No newline at end of file diff --git a/kernel/init/Kconfig b/kernel/init/Kconfig index 2c92f94..39812ff 100644 --- a/kernel/init/Kconfig +++ b/kernel/init/Kconfig @@ -1531,7 +1531,19 @@ config PRINTK_NMI def_bool y depends on PRINTK - depends on HAVE_NMI + depends on HAVE_NMI || IRQ_PIPELINE + +config RAW_PRINTK + bool "Enable support for raw printk" + default n + help + This option enables a printk variant called raw_printk() for + writing all output unmodified to a raw console channel + immediately, without any header or preparation whatsoever, + usable from any context. + + Unlike early_printk() console devices, raw_printk() devices + can live past the boot sequence. config BUG bool "BUG() support" if EXPERT @@ -2449,3 +2461,52 @@ if !ROCKCHIP_MINI_KERNEL source "init/Kconfig.gki" endif +menuconfig XENOMAI + depends on X86_TSC || !X86 + bool "Xenomai/cobalt" + select IPIPE if HAVE_IPIPE_SUPPORT + select IPIPE_WANT_APIREV_2 if IPIPE + select DOVETAIL if HAVE_DOVETAIL + select DOVETAIL_LEGACY_SYSCALL_RANGE if HAVE_DOVETAIL + default y + help + Xenomai's Cobalt core is a real-time extension to the Linux + kernel, which exhibits very short interrupt and scheduling + latency, without affecting the regular kernel services. + + This option enables the set of extended kernel services + required to run the real-time applications in user-space, + over the Xenomai libraries. + + Please visit http://xenomai.org for more information. + +if XENOMAI +source "arch/arm64/xenomai/Kconfig" +endif + +if MIGRATION +comment "WARNING! Page migration (CONFIG_MIGRATION) may increase" +comment "latency." +endif + +if APM || CPU_FREQ || ACPI_PROCESSOR || INTEL_IDLE +comment "WARNING! At least one of APM, CPU frequency scaling, ACPI 'processor'" +comment "or CPU idle features is enabled. Any of these options may" +comment "cause troubles with Xenomai. You should disable them." +endif + +config XENO_VERSION_MAJOR + int + default 3 + +config XENO_VERSION_MINOR + int + default 2 + +config XENO_REVISION_LEVEL + int + default 4 + +config XENO_VERSION_STRING + string + default "3.2.4" diff --git a/kernel/init/Makefile b/kernel/init/Makefile index 6bc37f6..ce21edc 100644 --- a/kernel/init/Makefile +++ b/kernel/init/Makefile @@ -34,4 +34,4 @@ @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" \ - "$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)" + "$(CONFIG_PREEMPT_RT)" "$(CONFIG_IRQ_PIPELINE)" $(CONFIG_CC_VERSION_TEXT) "$(LD)" diff --git a/kernel/init/main.c b/kernel/init/main.c index 45ca352..6443f34 100644 --- a/kernel/init/main.c +++ b/kernel/init/main.c @@ -52,6 +52,7 @@ #include <linux/tick.h> #include <linux/sched/isolation.h> #include <linux/interrupt.h> +#include <linux/irq_pipeline.h> #include <linux/taskstats_kern.h> #include <linux/delayacct.h> #include <linux/unistd.h> @@ -847,13 +848,14 @@ char *command_line; char *after_dashes; + stall_inband_nocheck(); set_task_stack_end_magic(&init_task); smp_setup_processor_id(); debug_objects_early_init(); cgroup_init_early(); - local_irq_disable(); + local_irq_disable_full(); early_boot_irqs_disabled = true; /* @@ -913,6 +915,7 @@ setup_log_buf(0); vfs_caches_init_early(); sort_main_extable(); + irq_pipeline_init_early(); trap_init(); mm_init(); @@ -958,6 +961,7 @@ /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); + irq_pipeline_init(); tick_init(); rcu_init_nohz(); init_timers(); @@ -983,7 +987,7 @@ WARN(!irqs_disabled(), "Interrupts were enabled early\n"); early_boot_irqs_disabled = false; - local_irq_enable(); + local_irq_enable_full(); kmem_cache_init_late(); diff --git a/kernel/kernel/Kconfig.dovetail b/kernel/kernel/Kconfig.dovetail new file mode 100644 index 0000000..c9ec30d --- /dev/null +++ b/kernel/kernel/Kconfig.dovetail @@ -0,0 +1,23 @@ + +# DOVETAIL dual-kernel interface +config HAVE_DOVETAIL + bool + +# Selecting ARCH_WANT_IRQS_OFF_ACTIVATE_MM in this generic Kconfig +# portion is ugly, but the whole ARCH_WANT_IRQS_OFF_ACTIVATE_MM logic +# is a temporary kludge which is meant to disappear anyway. See +# the related comments in exec_mmap() for details. +config DOVETAIL + bool "Dovetail interface" + depends on HAVE_DOVETAIL + select IRQ_PIPELINE + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM + default n + help + Activate this option if you want to enable the interface for + running a secondary kernel side-by-side with Linux (aka + "dual kernel" configuration). + +config DOVETAIL_LEGACY_SYSCALL_RANGE + depends on DOVETAIL + def_bool y diff --git a/kernel/kernel/Makefile b/kernel/kernel/Makefile index 6ee614d..cafe18f 100644 --- a/kernel/kernel/Makefile +++ b/kernel/kernel/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_IRQ_WORK) += irq_work.o +obj-$(CONFIG_DOVETAIL) += dovetail.o obj-$(CONFIG_CPU_PM) += cpu_pm.o obj-$(CONFIG_BPF) += bpf/ obj-$(CONFIG_KCSAN) += kcsan/ @@ -155,3 +156,5 @@ $(call cmd,genikh) clean-files := kheaders_data.tar.xz kheaders.md5 + +obj-$(CONFIG_XENOMAI) += xenomai/ diff --git a/kernel/kernel/debug/debug_core.c b/kernel/kernel/debug/debug_core.c index 0f31b22..6e034f0 100644 --- a/kernel/kernel/debug/debug_core.c +++ b/kernel/kernel/debug/debug_core.c @@ -111,8 +111,8 @@ */ atomic_t kgdb_active = ATOMIC_INIT(-1); EXPORT_SYMBOL_GPL(kgdb_active); -static DEFINE_RAW_SPINLOCK(dbg_master_lock); -static DEFINE_RAW_SPINLOCK(dbg_slave_lock); +static DEFINE_HARD_SPINLOCK(dbg_master_lock); +static DEFINE_HARD_SPINLOCK(dbg_slave_lock); /* * We use NR_CPUs not PERCPU, in case kgdb is used to debug early @@ -612,7 +612,7 @@ * Interrupts will be restored by the 'trap return' code, except when * single stepping. */ - local_irq_save(flags); + flags = hard_local_irq_save(); cpu = ks->cpu; kgdb_info[cpu].debuggerinfo = regs; @@ -666,7 +666,7 @@ smp_mb__before_atomic(); atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); - local_irq_restore(flags); + hard_local_irq_restore(flags); rcu_read_unlock(); return 0; } @@ -685,7 +685,7 @@ atomic_set(&kgdb_active, -1); raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); - local_irq_restore(flags); + hard_local_irq_restore(flags); rcu_read_unlock(); goto acquirelock; @@ -721,8 +721,11 @@ atomic_set(ks->send_ready, 1); /* Signal the other CPUs to enter kgdb_wait() */ - else if ((!kgdb_single_step) && kgdb_do_roundup) + else if ((!kgdb_single_step) && kgdb_do_roundup && running_inband()) { + hard_cond_local_irq_enable(); kgdb_roundup_cpus(); + hard_cond_local_irq_disable(); + } #endif /* @@ -834,7 +837,7 @@ atomic_set(&kgdb_active, -1); raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); - local_irq_restore(flags); + hard_local_irq_restore(flags); rcu_read_unlock(); return kgdb_info[cpu].ret_state; @@ -957,7 +960,7 @@ if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode) return; - local_irq_save(flags); + flags = hard_local_irq_save(); gdbstub_msg_write(s, count); local_irq_restore(flags); } diff --git a/kernel/kernel/dovetail.c b/kernel/kernel/dovetail.c new file mode 100644 index 0000000..4d1e4c3 --- /dev/null +++ b/kernel/kernel/dovetail.c @@ -0,0 +1,450 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/timekeeper_internal.h> +#include <linux/sched/signal.h> +#include <linux/irq_pipeline.h> +#include <linux/dovetail.h> +#include <asm/unistd.h> +#include <asm/syscall.h> +#include <uapi/asm-generic/dovetail.h> + +static bool dovetail_enabled; + +void __weak arch_inband_task_init(struct task_struct *p) +{ +} + +void inband_task_init(struct task_struct *p) +{ + struct thread_info *ti = task_thread_info(p); + + clear_ti_local_flags(ti, _TLF_DOVETAIL|_TLF_OOB|_TLF_OFFSTAGE); + arch_inband_task_init(p); +} + +void dovetail_init_altsched(struct dovetail_altsched_context *p) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + check_inband_stage(); + p->task = tsk; + p->active_mm = mm; + p->borrowed_mm = false; + + /* + * Make sure the current process will not share any private + * page with its child upon fork(), sparing it the random + * latency induced by COW. MMF_DOVETAILED is never cleared once + * set. We serialize with dup_mmap() which holds the mm write + * lock. + */ + if (!(tsk->flags & PF_KTHREAD) && + !test_bit(MMF_DOVETAILED, &mm->flags)) { + mmap_write_lock(mm); + __set_bit(MMF_DOVETAILED, &mm->flags); + mmap_write_unlock(mm); + } +} +EXPORT_SYMBOL_GPL(dovetail_init_altsched); + +void dovetail_start_altsched(void) +{ + check_inband_stage(); + set_thread_local_flags(_TLF_DOVETAIL); +} +EXPORT_SYMBOL_GPL(dovetail_start_altsched); + +void dovetail_stop_altsched(void) +{ + clear_thread_local_flags(_TLF_DOVETAIL); + clear_thread_flag(TIF_MAYDAY); +} +EXPORT_SYMBOL_GPL(dovetail_stop_altsched); + +int __weak handle_oob_syscall(struct pt_regs *regs) +{ + return 0; +} + +int __weak handle_pipelined_syscall(struct irq_stage *stage, + struct pt_regs *regs) +{ + return 0; /* i.e. propagate to in-band handler. */ +} + +void __weak handle_oob_mayday(struct pt_regs *regs) +{ +} + +static inline +void call_mayday(struct thread_info *ti, struct pt_regs *regs) +{ + clear_ti_thread_flag(ti, TIF_MAYDAY); + handle_oob_mayday(regs); +} + +void dovetail_call_mayday(struct pt_regs *regs) +{ + struct thread_info *ti = current_thread_info(); + unsigned long flags; + + flags = hard_local_irq_save(); + call_mayday(ti, regs); + hard_local_irq_restore(flags); +} + +void inband_retuser_notify(void) +{ + clear_thread_flag(TIF_RETUSER); + inband_event_notify(INBAND_TASK_RETUSER, current); + /* CAUTION: we might have switched out-of-band here. */ +} + +int __pipeline_syscall(struct pt_regs *regs) +{ + struct thread_info *ti = current_thread_info(); + struct irq_stage *caller_stage, *target_stage; + struct irq_stage_data *p, *this_context; + unsigned long flags; + int ret = 0; + + /* + * We should definitely not pipeline a syscall through the + * slow path with IRQs off. + */ + WARN_ON_ONCE(dovetail_debug() && hard_irqs_disabled()); + + if (!dovetail_enabled) + return 0; + + flags = hard_local_irq_save(); + caller_stage = current_irq_stage; + this_context = current_irq_staged; + target_stage = &oob_stage; +next: + p = this_staged(target_stage); + set_current_irq_staged(p); + hard_local_irq_restore(flags); + ret = handle_pipelined_syscall(caller_stage, regs); + flags = hard_local_irq_save(); + /* + * Be careful about stage switching _and_ CPU migration that + * might have happened as a result of handing over the syscall + * to the out-of-band handler. + * + * - if a stage migration is detected, fetch the new + * per-stage, per-CPU context pointer. + * + * - if no stage migration happened, switch back to the + * initial call stage, on a possibly different CPU though. + */ + if (current_irq_stage != target_stage) { + this_context = current_irq_staged; + } else { + p = this_staged(this_context->stage); + set_current_irq_staged(p); + } + + if (this_context->stage == &inband_stage) { + if (target_stage != &inband_stage && ret == 0) { + target_stage = &inband_stage; + goto next; + } + p = this_inband_staged(); + if (stage_irqs_pending(p)) + sync_current_irq_stage(); + } else { + if (test_ti_thread_flag(ti, TIF_MAYDAY)) + call_mayday(ti, regs); + } + + hard_local_irq_restore(flags); + + return ret; +} + +static inline bool maybe_oob_syscall(unsigned int nr, struct pt_regs *regs) +{ + /* + * Check whether the companion core might be interested in the + * syscall call. If the old syscall form is handled, pass the + * request to the core if __OOB_SYSCALL_BIT is set in + * @nr. Otherwise, only check whether an oob syscall is folded + * into a prctl() request. + */ + if (IS_ENABLED(CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE)) { + if (nr & __OOB_SYSCALL_BIT) + return true; + } + + return arch_dovetail_is_syscall(nr) && syscall_get_arg0(regs) & __OOB_SYSCALL_BIT; +} + +int pipeline_syscall(unsigned int nr, struct pt_regs *regs) +{ + struct thread_info *ti = current_thread_info(); + unsigned long local_flags = READ_ONCE(ti_local_flags(ti)); + int ret; + + WARN_ON_ONCE(dovetail_debug() && hard_irqs_disabled()); + + /* + * If the syscall signature belongs to the out-of-band syscall + * set and we are running out-of-band, pass the request + * directly to the companion core by calling the oob syscall + * handler. + * + * Otherwise, if this is an out-of-band syscall or alternate + * scheduling is enabled for the caller, propagate the syscall + * through the pipeline stages, so that: + * + * - the core can manipulate the current execution stage for + * handling the request, which includes switching the current + * thread back to the in-band context if the syscall is a + * native one, or promoting it to the oob stage if handling an + * oob syscall requires this. + * + * - the core can receive the initial oob syscall a thread + * might have to emit for enabling dovetailing from the + * in-band stage. + * + * Native syscalls from common (non-dovetailed) threads are + * not subject to pipelining, but flow down to the in-band + * system call handler directly. + * + * Sanity check: we bark on returning from a syscall on a + * stalled in-band stage, which combined with running with + * hard irqs on might cause interrupts to linger in the log + * after exiting to user. + */ + + if ((local_flags & _TLF_OOB) && maybe_oob_syscall(nr, regs)) { + ret = handle_oob_syscall(regs); + if (!IS_ENABLED(CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE)) + WARN_ON_ONCE(dovetail_debug() && !ret); + local_flags = READ_ONCE(ti_local_flags(ti)); + if (likely(ret)) { + if (local_flags & _TLF_OOB) { + if (test_ti_thread_flag(ti, TIF_MAYDAY)) + dovetail_call_mayday(regs); + return 1; /* don't pass down, no tail work. */ + } else { + WARN_ON_ONCE(dovetail_debug() && irqs_disabled()); + return -1; /* don't pass down, do tail work. */ + } + } + } + + if ((local_flags & _TLF_DOVETAIL) || maybe_oob_syscall(nr, regs)) { + ret = __pipeline_syscall(regs); + local_flags = READ_ONCE(ti_local_flags(ti)); + if (local_flags & _TLF_OOB) + return 1; /* don't pass down, no tail work. */ + if (ret) { + WARN_ON_ONCE(dovetail_debug() && irqs_disabled()); + return -1; /* don't pass down, do tail work. */ + } + } + + return 0; /* pass syscall down to the in-band dispatcher. */ +} + +void __weak handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs) +{ +} + +noinstr void __oob_trap_notify(unsigned int exception, + struct pt_regs *regs) +{ + unsigned long flags; + + /* + * We send a notification about exceptions raised over a + * registered oob stage only. The trap_entry handler expects + * hard irqs off on entry. It may demote the current context + * to the in-band stage, may return with hard irqs on. + */ + if (dovetail_enabled) { + set_thread_local_flags(_TLF_OOBTRAP); + flags = hard_local_irq_save(); + instrumentation_begin(); + handle_oob_trap_entry(exception, regs); + instrumentation_end(); + hard_local_irq_restore(flags); + } +} + +void __weak handle_oob_trap_exit(unsigned int trapnr, struct pt_regs *regs) +{ +} + +noinstr void __oob_trap_unwind(unsigned int exception, struct pt_regs *regs) +{ + /* + * The trap_exit handler runs only if trap_entry was called + * for the same trap occurrence. It expects hard irqs off on + * entry, may switch the current context back to the oob + * stage. Must return with hard irqs off. + */ + hard_local_irq_disable(); + clear_thread_local_flags(_TLF_OOBTRAP); + instrumentation_begin(); + handle_oob_trap_exit(exception, regs); + instrumentation_end(); +} + +void __weak handle_inband_event(enum inband_event_type event, void *data) +{ +} + +void inband_event_notify(enum inband_event_type event, void *data) +{ + check_inband_stage(); + + if (dovetail_enabled) + handle_inband_event(event, data); +} + +void __weak resume_oob_task(struct task_struct *p) +{ +} + +static void finalize_oob_transition(void) /* hard IRQs off */ +{ + struct irq_pipeline_data *pd; + struct irq_stage_data *p; + struct thread_info *ti; + struct task_struct *t; + + pd = raw_cpu_ptr(&irq_pipeline); + t = pd->task_inflight; + if (t == NULL) + return; + + /* + * @t which is in flight to the oob stage might have received + * a signal while waiting in off-stage state to be actually + * scheduled out. We can't act upon that signal safely from + * here, we simply let the task complete the migration process + * to the oob stage. The pending signal will be handled when + * the task eventually exits the out-of-band context by the + * converse migration. + */ + pd->task_inflight = NULL; + ti = task_thread_info(t); + + /* + * The transition handler in the companion core assumes the + * oob stage is stalled, fix this up. + */ + stall_oob(); + resume_oob_task(t); + unstall_oob(); + p = this_oob_staged(); + if (stage_irqs_pending(p)) + /* Current stage (in-band) != p->stage (oob). */ + sync_irq_stage(p->stage); +} + +void oob_trampoline(void) +{ + unsigned long flags; + + check_inband_stage(); + flags = hard_local_irq_save(); + finalize_oob_transition(); + hard_local_irq_restore(flags); +} + +bool inband_switch_tail(void) +{ + bool oob; + + check_hard_irqs_disabled(); + + /* + * We may run this code either over the inband or oob + * contexts. If inband, we may have a thread blocked in + * dovetail_leave_inband(), waiting for the companion core to + * schedule it back in over the oob context, in which case + * finalize_oob_transition() should take care of it. If oob, + * the core just switched us back, and we may update the + * context markers before returning to context_switch(). + * + * Since the preemption count does not reflect the active + * stage yet upon inband -> oob transition, we figure out + * which one we are on by testing _TLF_OFFSTAGE. Having this + * bit set when running the inband switch tail code means that + * we are completing such transition for the current task, + * switched in by dovetail_context_switch() over the oob + * stage. If so, update the context markers appropriately. + */ + oob = test_thread_local_flags(_TLF_OFFSTAGE); + if (oob) { + /* + * The companion core assumes a stalled stage on exit + * from dovetail_leave_inband(). + */ + stall_oob(); + set_thread_local_flags(_TLF_OOB); + if (!IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) { + WARN_ON_ONCE(dovetail_debug() && + (preempt_count() & STAGE_MASK)); + preempt_count_add(STAGE_OFFSET); + } + } else { + finalize_oob_transition(); + hard_local_irq_enable(); + } + + return oob; +} + +void __weak inband_clock_was_set(void) +{ +} + +void __weak install_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files) +{ +} + +void __weak uninstall_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files) +{ +} + +void __weak replace_inband_fd(unsigned int fd, struct file *file, + struct files_struct *files) +{ +} + +int dovetail_start(void) +{ + check_inband_stage(); + + if (dovetail_enabled) + return -EBUSY; + + if (!oob_stage_present()) + return -EAGAIN; + + dovetail_enabled = true; + smp_wmb(); + + return 0; +} +EXPORT_SYMBOL_GPL(dovetail_start); + +void dovetail_stop(void) +{ + check_inband_stage(); + + dovetail_enabled = false; + smp_wmb(); +} +EXPORT_SYMBOL_GPL(dovetail_stop); diff --git a/kernel/kernel/entry/common.c b/kernel/kernel/entry/common.c index 09f5885..5f043bb 100644 --- a/kernel/kernel/entry/common.c +++ b/kernel/kernel/entry/common.c @@ -2,6 +2,7 @@ #include <linux/context_tracking.h> #include <linux/entry-common.h> +#include <linux/irq_pipeline.h> #include <linux/livepatch.h> #include <linux/audit.h> @@ -71,10 +72,45 @@ return ret ? : syscall; } +static __always_inline void +syscall_enter_from_user_enable_irqs(void) +{ + if (running_inband()) { + /* + * If pipelining interrupts, prepare for emulating a + * stall -> unstall transition (we are currently + * unstalled), fixing up the IRQ trace state in order + * to keep lockdep happy (and silent). + */ + stall_inband_nocheck(); + hard_cond_local_irq_enable(); + local_irq_enable(); + } else { + /* + * We are running on the out-of-band stage, don't mess + * with the in-band interrupt state. This is none of + * our business. We may manipulate the hardware state + * only. + */ + hard_local_irq_enable(); + } +} + static __always_inline long __syscall_enter_from_user_work(struct pt_regs *regs, long syscall) { unsigned long ti_work; + int ret; + + /* + * Pipeline the syscall to the companion core if the current + * task wants this. Compiled out if not dovetailing. + */ + ret = pipeline_syscall(syscall, regs); + if (ret > 0) /* out-of-band, bail out. */ + return EXIT_SYSCALL_OOB; + if (ret < 0) /* in-band, tail work only. */ + return EXIT_SYSCALL_TAIL; ti_work = READ_ONCE(current_thread_info()->flags); if (ti_work & SYSCALL_ENTER_WORK) @@ -95,7 +131,7 @@ enter_from_user_mode(regs); instrumentation_begin(); - local_irq_enable(); + syscall_enter_from_user_enable_irqs(); ret = __syscall_enter_from_user_work(regs, syscall); instrumentation_end(); @@ -106,7 +142,7 @@ { enter_from_user_mode(regs); instrumentation_begin(); - local_irq_enable(); + syscall_enter_from_user_enable_irqs(); instrumentation_end(); } @@ -121,6 +157,7 @@ * 3) Invoke architecture specific last minute exit code, e.g. speculation * mitigations, etc. * 4) Tell lockdep that interrupts are enabled + * 5) Unstall the in-band stage of the interrupt pipeline if current */ static __always_inline void exit_to_user_mode(void) { @@ -132,6 +169,8 @@ user_enter_irqoff(); arch_exit_to_user_mode(); lockdep_hardirqs_on(CALLER_ADDR0); + if (running_inband()) + unstall_inband(); } /* Workaround to allow gradual conversion of architecture code */ @@ -155,6 +194,12 @@ while (ti_work & EXIT_TO_USER_MODE_WORK) { local_irq_enable_exit_to_user(ti_work); + + /* + * Check that local_irq_enable_exit_to_user() does the + * right thing when pipelining. + */ + WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()); if (ti_work & _TIF_NEED_RESCHED) schedule(); @@ -182,6 +227,7 @@ * enabled above. */ local_irq_disable_exit_to_user(); + WARN_ON_ONCE(irq_pipeline_debug() && !hard_irqs_disabled()); ti_work = READ_ONCE(current_thread_info()->flags); } @@ -189,16 +235,36 @@ return ti_work; } +static inline bool do_retuser(unsigned long ti_work) +{ + if (dovetailing() && (ti_work & _TIF_RETUSER)) { + hard_local_irq_enable(); + inband_retuser_notify(); + hard_local_irq_disable(); + /* RETUSER might have switched oob */ + return running_inband(); + } + + return false; +} + static void exit_to_user_mode_prepare(struct pt_regs *regs) { - unsigned long ti_work = READ_ONCE(current_thread_info()->flags); + unsigned long ti_work; + + check_hard_irqs_disabled(); lockdep_assert_irqs_disabled(); +again: + ti_work = READ_ONCE(current_thread_info()->flags); if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) ti_work = exit_to_user_mode_loop(regs, ti_work); arch_exit_to_user_mode_prepare(regs, ti_work); + + if (do_retuser(ti_work)) + goto again; /* Ensure that the address limit is intact and no locks are held */ addr_limit_user_check(); @@ -252,7 +318,7 @@ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr)) - local_irq_enable(); + local_irq_enable_full(); } rseq_syscall(regs); @@ -261,8 +327,15 @@ * Do one-time syscall specific work. If these work items are * enabled, we want to run them exactly once per syscall exit with * interrupts enabled. + * + * Dovetail: if this does not look like an in-band syscall, it + * has to belong to the companion core. Typically, + * __OOB_SYSCALL_BIT would be set in this value. Skip the + * work for those syscalls. */ - if (unlikely(cached_flags & SYSCALL_EXIT_WORK)) + if (unlikely((cached_flags & SYSCALL_EXIT_WORK) && + (!irqs_pipelined() || + syscall_get_nr(current, regs) < NR_syscalls))) syscall_exit_work(regs, cached_flags); } @@ -278,6 +351,8 @@ noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs) { + WARN_ON_ONCE(irq_pipeline_debug() && irqs_disabled()); + stall_inband_nocheck(); enter_from_user_mode(regs); } @@ -293,12 +368,36 @@ { irqentry_state_t ret = { .exit_rcu = false, +#ifdef CONFIG_IRQ_PIPELINE + .stage_info = IRQENTRY_INBAND_STALLED, +#endif }; +#ifdef CONFIG_IRQ_PIPELINE + if (running_oob()) { + WARN_ON_ONCE(irq_pipeline_debug() && oob_irqs_disabled()); + ret.stage_info = IRQENTRY_OOB; + return ret; + } +#endif + if (user_mode(regs)) { +#ifdef CONFIG_IRQ_PIPELINE + ret.stage_info = IRQENTRY_INBAND_UNSTALLED; +#endif irqentry_enter_from_user_mode(regs); return ret; } + +#ifdef CONFIG_IRQ_PIPELINE + /* + * IRQ pipeline: If we trapped from kernel space, the virtual + * state may or may not match the hardware state. Since hard + * irqs are off on entry, we have to stall the in-band stage. + */ + if (!test_and_stall_inband_nocheck()) + ret.stage_info = IRQENTRY_INBAND_UNSTALLED; +#endif /* * If this entry hit the idle task invoke rcu_irq_enter() whether @@ -366,14 +465,91 @@ } } +#ifdef CONFIG_IRQ_PIPELINE + +static inline +bool irqexit_may_preempt_schedule(irqentry_state_t state, + struct pt_regs *regs) +{ + return state.stage_info == IRQENTRY_INBAND_UNSTALLED; +} + +#else + +static inline +bool irqexit_may_preempt_schedule(irqentry_state_t state, + struct pt_regs *regs) +{ + return !regs_irqs_disabled(regs); +} + +#endif + +#ifdef CONFIG_IRQ_PIPELINE + +static bool irqentry_syncstage(irqentry_state_t state) /* hard irqs off */ +{ + /* + * If pipelining interrupts, enable in-band IRQs then + * synchronize the interrupt log on exit if: + * + * - irqentry_enter() stalled the stage in order to mirror the + * hardware state. + * + * - we where coming from oob, thus went through a stage migration + * that was caused by taking a CPU exception, e.g., a fault. + * + * We run before preempt_schedule_irq() may be called later on + * by preemptible kernels, so that any rescheduling request + * triggered by in-band IRQ handlers is considered. + */ + if (state.stage_info == IRQENTRY_INBAND_UNSTALLED || + state.stage_info == IRQENTRY_OOB) { + unstall_inband_nocheck(); + synchronize_pipeline_on_irq(); + stall_inband_nocheck(); + return true; + } + + return false; +} + +static void irqentry_unstall(void) +{ + unstall_inband_nocheck(); +} + +#else + +static bool irqentry_syncstage(irqentry_state_t state) +{ + return false; +} + +static void irqentry_unstall(void) +{ +} + +#endif + noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state) { + bool synchronized = false; + + if (running_oob()) + return; + lockdep_assert_irqs_disabled(); /* Check whether this returns to user mode */ if (user_mode(regs)) { irqentry_exit_to_user_mode(regs); - } else if (!regs_irqs_disabled(regs)) { + return; + } + + synchronized = irqentry_syncstage(state); + + if (irqexit_may_preempt_schedule(state, regs)) { /* * If RCU was not watching on entry this needs to be done * carefully and needs the same ordering of lockdep/tracing @@ -387,7 +563,7 @@ instrumentation_end(); rcu_irq_exit(); lockdep_hardirqs_on(CALLER_ADDR0); - return; + goto out; } instrumentation_begin(); @@ -404,6 +580,12 @@ if (state.exit_rcu) rcu_irq_exit(); } + +out: + if (synchronized) + irqentry_unstall(); + + return; } irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs) diff --git a/kernel/kernel/exit.c b/kernel/kernel/exit.c index 86e4031..9150d66 100644 --- a/kernel/kernel/exit.c +++ b/kernel/kernel/exit.c @@ -14,6 +14,7 @@ #include <linux/sched/task_stack.h> #include <linux/sched/cputime.h> #include <linux/interrupt.h> +#include <linux/irq_pipeline.h> #include <linux/module.h> #include <linux/capability.h> #include <linux/completion.h> @@ -767,6 +768,7 @@ io_uring_files_cancel(); exit_signals(tsk); /* sets PF_EXITING */ + inband_exit_notify(); /* sync mm's RSS info before statistics gathering */ if (tsk->mm) diff --git a/kernel/kernel/fork.c b/kernel/kernel/fork.c index f73e3e6..978ecbf 100644 --- a/kernel/kernel/fork.c +++ b/kernel/kernel/fork.c @@ -48,6 +48,7 @@ #include <linux/cpu.h> #include <linux/cgroup.h> #include <linux/security.h> +#include <linux/dovetail.h> #include <linux/hugetlb.h> #include <linux/seccomp.h> #include <linux/swap.h> @@ -938,6 +939,7 @@ #endif setup_thread_stack(tsk, orig); + inband_task_init(tsk); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); @@ -1083,6 +1085,9 @@ #endif mm_init_uprobes_state(mm); hugetlb_count_init(mm); +#ifdef CONFIG_DOVETAIL + memset(&mm->oob_state, 0, sizeof(mm->oob_state)); +#endif if (current->mm) { mm->flags = current->mm->flags & MMF_INIT_MASK; @@ -1131,6 +1136,7 @@ exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ + inband_cleanup_notify(mm); /* ditto. */ exit_mmap(mm); mm_put_huge_zero_page(mm); set_mm_exe_file(mm, NULL); diff --git a/kernel/kernel/irq/Kconfig b/kernel/kernel/irq/Kconfig index 1bd144e..d170936 100644 --- a/kernel/kernel/irq/Kconfig +++ b/kernel/kernel/irq/Kconfig @@ -142,6 +142,19 @@ If you don't know what to do here, say N. +# Interrupt pipeline +config HAVE_IRQ_PIPELINE + bool + +config IRQ_PIPELINE + bool "Interrupt pipeline" + depends on HAVE_IRQ_PIPELINE + select IRQ_DOMAIN + default n + help + Activate this option if you want the interrupt pipeline to be + compiled in. + endmenu config GENERIC_IRQ_MULTI_HANDLER diff --git a/kernel/kernel/irq/Makefile b/kernel/kernel/irq/Makefile index b4f5371..b6e43ec 100644 --- a/kernel/kernel/irq/Makefile +++ b/kernel/kernel/irq/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o obj-$(CONFIG_IRQ_SIM) += irq_sim.o +obj-$(CONFIG_IRQ_PIPELINE) += pipeline.o +obj-$(CONFIG_IRQ_PIPELINE_TORTURE_TEST) += irqptorture.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o diff --git a/kernel/kernel/irq/chip.c b/kernel/kernel/irq/chip.c index 520b9fa..13edfa8 100644 --- a/kernel/kernel/irq/chip.c +++ b/kernel/kernel/irq/chip.c @@ -15,6 +15,7 @@ #include <linux/kernel_stat.h> #include <linux/irqdomain.h> #include <linux/wakeup_reason.h> +#include <linux/irq_pipeline.h> #include <trace/events/irq.h> @@ -49,6 +50,10 @@ if (!chip) chip = &no_irq_chip; + else + WARN_ONCE(irqs_pipelined() && + (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0, + "irqchip %s is not pipeline-safe!", chip->name); desc->irq_data.chip = chip; irq_put_desc_unlock(desc, flags); @@ -155,14 +160,6 @@ return 0; } EXPORT_SYMBOL(irq_set_chip_data); - -struct irq_data *irq_get_irq_data(unsigned int irq) -{ - struct irq_desc *desc = irq_to_desc(irq); - - return desc ? &desc->irq_data : NULL; -} -EXPORT_SYMBOL_GPL(irq_get_irq_data); static void irq_state_clr_disabled(struct irq_desc *desc) { @@ -386,7 +383,8 @@ */ void irq_disable(struct irq_desc *desc) { - __irq_disable(desc, irq_settings_disable_unlazy(desc)); + __irq_disable(desc, + irq_settings_disable_unlazy(desc) || irqs_pipelined()); } void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) @@ -532,8 +530,22 @@ * If the interrupt is an armed wakeup source, mark it pending * and suspended, disable it and notify the pm core about the * event. + * + * When pipelining, the logic is as follows: + * + * - from a pipeline entry context, we might have preempted + * the oob stage, or irqs might be [virtually] off, so we may + * not run the in-band PM code. Just make sure any wakeup + * interrupt is detected later on when the flow handler + * re-runs from the in-band stage. + * + * - from the in-band context, run the PM wakeup check. */ - if (irq_pm_check_wakeup(desc)) + if (irqs_pipelined()) { + WARN_ON_ONCE(irq_pipeline_debug() && !in_pipeline()); + if (irqd_is_wakeup_armed(&desc->irq_data)) + return true; + } else if (irq_pm_check_wakeup(desc)) return false; /* @@ -557,8 +569,13 @@ { raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (start_irq_flow() && !irq_may_run(desc)) goto out_unlock; + + if (on_pipeline_entry()) { + handle_oob_irq(desc); + goto out_unlock; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -594,8 +611,13 @@ raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (start_irq_flow() && !irq_may_run(desc)) goto out_unlock; + + if (on_pipeline_entry()) { + handle_oob_irq(desc); + goto out_unlock; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -617,6 +639,20 @@ raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_untracked_irq); + +static inline void cond_eoi_irq(struct irq_desc *desc) +{ + struct irq_chip *chip = desc->irq_data.chip; + + if (!(chip->flags & IRQCHIP_EOI_THREADED)) + chip->irq_eoi(&desc->irq_data); +} + +static inline void mask_cond_eoi_irq(struct irq_desc *desc) +{ + mask_irq(desc); + cond_eoi_irq(desc); +} /* * Called unconditionally from handle_level_irq() and only for oneshot @@ -648,10 +684,19 @@ void handle_level_irq(struct irq_desc *desc) { raw_spin_lock(&desc->lock); - mask_ack_irq(desc); - if (!irq_may_run(desc)) + if (start_irq_flow()) { + mask_ack_irq(desc); + + if (!irq_may_run(desc)) + goto out_unlock; + } + + if (on_pipeline_entry()) { + if (handle_oob_irq(desc)) + goto out_unmask; goto out_unlock; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -666,7 +711,7 @@ kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); - +out_unmask: cond_unmask_irq(desc); out_unlock: @@ -677,7 +722,10 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) { if (!(desc->istate & IRQS_ONESHOT)) { - chip->irq_eoi(&desc->irq_data); + if (!irqs_pipelined()) + chip->irq_eoi(&desc->irq_data); + else if (!irqd_irq_disabled(&desc->irq_data)) + unmask_irq(desc); return; } /* @@ -688,9 +736,11 @@ */ if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { - chip->irq_eoi(&desc->irq_data); + if (!irqs_pipelined()) + chip->irq_eoi(&desc->irq_data); unmask_irq(desc); - } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { + } else if (!irqs_pipelined() && + !(chip->flags & IRQCHIP_EOI_THREADED)) { chip->irq_eoi(&desc->irq_data); } } @@ -710,8 +760,16 @@ raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (start_irq_flow() && !irq_may_run(desc)) goto out; + + if (on_pipeline_entry()) { + if (handle_oob_irq(desc)) + chip->irq_eoi(&desc->irq_data); + else + mask_cond_eoi_irq(desc); + goto out_unlock; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -726,13 +784,13 @@ } kstat_incr_irqs_this_cpu(desc); - if (desc->istate & IRQS_ONESHOT) + if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT)) mask_irq(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); - +out_unlock: raw_spin_unlock(&desc->lock); return; out: @@ -792,30 +850,42 @@ */ void handle_edge_irq(struct irq_desc *desc) { + struct irq_chip *chip = irq_desc_get_chip(desc); + raw_spin_lock(&desc->lock); - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + if (start_irq_flow()) { + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - if (!irq_may_run(desc)) { - desc->istate |= IRQS_PENDING; - mask_ack_irq(desc); - goto out_unlock; + if (!irq_may_run(desc)) { + desc->istate |= IRQS_PENDING; + mask_ack_irq(desc); + goto out_unlock; + } + + /* + * If its disabled or no action available then mask it + * and get out of here. + */ + if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { + desc->istate |= IRQS_PENDING; + mask_ack_irq(desc); + goto out_unlock; + } } - /* - * If its disabled or no action available then mask it and get - * out of here. - */ - if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { - desc->istate |= IRQS_PENDING; - mask_ack_irq(desc); + if (on_pipeline_entry()) { + chip->irq_ack(&desc->irq_data); + desc->istate |= IRQS_EDGE; + handle_oob_irq(desc); goto out_unlock; } kstat_incr_irqs_this_cpu(desc); /* Start handling the irq */ - desc->irq_data.chip->irq_ack(&desc->irq_data); + if (!irqs_pipelined()) + chip->irq_ack(&desc->irq_data); do { if (unlikely(!desc->action)) { @@ -840,6 +910,8 @@ !irqd_irq_disabled(&desc->irq_data)); out_unlock: + if (on_pipeline_entry()) + desc->istate &= ~IRQS_EDGE; raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL(handle_edge_irq); @@ -858,11 +930,20 @@ raw_spin_lock(&desc->lock); - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + if (start_irq_flow()) { + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); - if (!irq_may_run(desc)) { - desc->istate |= IRQS_PENDING; - goto out_eoi; + if (!irq_may_run(desc)) { + desc->istate |= IRQS_PENDING; + goto out_eoi; + } + } + + if (on_pipeline_entry()) { + desc->istate |= IRQS_EDGE; + if (handle_oob_irq(desc)) + goto out_eoi; + goto out; } /* @@ -887,6 +968,9 @@ out_eoi: chip->irq_eoi(&desc->irq_data); +out: + if (on_pipeline_entry()) + desc->istate &= ~IRQS_EDGE; raw_spin_unlock(&desc->lock); } #endif @@ -900,6 +984,18 @@ void handle_percpu_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); + bool handled; + + if (on_pipeline_entry()) { + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + handled = handle_oob_irq(desc); + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + if (!handled && chip->irq_mask) + chip->irq_mask(&desc->irq_data); + return; + } /* * PER CPU interrupts are not serialized. Do not touch @@ -907,13 +1003,17 @@ */ __kstat_incr_irqs_this_cpu(desc); - if (chip->irq_ack) - chip->irq_ack(&desc->irq_data); - - handle_irq_event_percpu(desc); - - if (chip->irq_eoi) - chip->irq_eoi(&desc->irq_data); + if (irqs_pipelined()) { + handle_irq_event_percpu(desc); + if (chip->irq_unmask) + chip->irq_unmask(&desc->irq_data); + } else { + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + handle_irq_event_percpu(desc); + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + } } /** @@ -933,6 +1033,18 @@ struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; + bool handled; + + if (on_pipeline_entry()) { + if (chip->irq_ack) + chip->irq_ack(&desc->irq_data); + handled = handle_oob_irq(desc); + if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); + if (!handled && chip->irq_mask) + chip->irq_mask(&desc->irq_data); + return; + } /* * PER CPU interrupts are not serialized. Do not touch @@ -940,7 +1052,7 @@ */ __kstat_incr_irqs_this_cpu(desc); - if (chip->irq_ack) + if (!irqs_pipelined() && chip->irq_ack) chip->irq_ack(&desc->irq_data); if (likely(action)) { @@ -958,8 +1070,11 @@ enabled ? " and unmasked" : "", irq, cpu); } - if (chip->irq_eoi) - chip->irq_eoi(&desc->irq_data); + if (irqs_pipelined()) { + if (chip->irq_unmask) + chip->irq_unmask(&desc->irq_data); + } else if (chip->irq_eoi) + chip->irq_eoi(&desc->irq_data); } /** @@ -979,10 +1094,21 @@ unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; - __kstat_incr_irqs_this_cpu(desc); - if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); + + if (on_pipeline_entry()) { + handle_oob_irq(desc); + return; + } + + /* Trap spurious IPIs if pipelined. */ + if (irqs_pipelined() && !action) { + print_irq_desc(irq, desc); + return; + } + + __kstat_incr_irqs_this_cpu(desc); trace_irq_handler_entry(irq, action); res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); @@ -1076,6 +1202,7 @@ desc->handle_irq = handle; } + irq_settings_set_chained(desc); irq_settings_set_noprobe(desc); irq_settings_set_norequest(desc); irq_settings_set_nothread(desc); @@ -1251,8 +1378,17 @@ raw_spin_lock(&desc->lock); - if (!irq_may_run(desc)) + if (start_irq_flow() && !irq_may_run(desc)) goto out; + + if (on_pipeline_entry()) { + chip->irq_ack(&desc->irq_data); + if (handle_oob_irq(desc)) + chip->irq_eoi(&desc->irq_data); + else + mask_cond_eoi_irq(desc); + goto out_unlock; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -1267,11 +1403,13 @@ } kstat_incr_irqs_this_cpu(desc); - if (desc->istate & IRQS_ONESHOT) - mask_irq(desc); + if (!irqs_pipelined()) { + if (desc->istate & IRQS_ONESHOT) + mask_irq(desc); - /* Start handling the irq */ - desc->irq_data.chip->irq_ack(&desc->irq_data); + /* Start handling the irq */ + chip->irq_ack(&desc->irq_data); + } handle_irq_event(desc); @@ -1282,6 +1420,7 @@ out: if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) chip->irq_eoi(&desc->irq_data); +out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); @@ -1301,10 +1440,21 @@ struct irq_chip *chip = desc->irq_data.chip; raw_spin_lock(&desc->lock); - mask_ack_irq(desc); - if (!irq_may_run(desc)) - goto out; + if (start_irq_flow()) { + mask_ack_irq(desc); + + if (!irq_may_run(desc)) + goto out; + } + + if (on_pipeline_entry()) { + if (handle_oob_irq(desc)) + chip->irq_eoi(&desc->irq_data); + else + cond_eoi_irq(desc); + goto out_unlock; + } desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); @@ -1319,7 +1469,7 @@ } kstat_incr_irqs_this_cpu(desc); - if (desc->istate & IRQS_ONESHOT) + if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT)) mask_irq(desc); handle_irq_event(desc); @@ -1331,6 +1481,7 @@ out: if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) chip->irq_eoi(&desc->irq_data); +out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); diff --git a/kernel/kernel/irq/cpuhotplug.c b/kernel/kernel/irq/cpuhotplug.c index cf8d4f7..867db10 100644 --- a/kernel/kernel/irq/cpuhotplug.c +++ b/kernel/kernel/irq/cpuhotplug.c @@ -156,6 +156,9 @@ { struct irq_desc *desc; unsigned int irq; + unsigned long flags; + + flags = hard_local_irq_save(); for_each_active_irq(irq) { bool affinity_broken; @@ -170,6 +173,8 @@ irq, smp_processor_id()); } } + + hard_local_irq_restore(flags); } static bool hk_should_isolate(struct irq_data *data, unsigned int cpu) diff --git a/kernel/kernel/irq/debug.h b/kernel/kernel/irq/debug.h index 8ccb326..40f7268 100644 --- a/kernel/kernel/irq/debug.h +++ b/kernel/kernel/irq/debug.h @@ -33,6 +33,8 @@ ___P(IRQ_NOREQUEST); ___P(IRQ_NOTHREAD); ___P(IRQ_NOAUTOEN); + ___P(IRQ_OOB); + ___P(IRQ_CHAINED); ___PS(IRQS_AUTODETECT); ___PS(IRQS_REPLAY); diff --git a/kernel/kernel/irq/dummychip.c b/kernel/kernel/irq/dummychip.c index 0b0cdf2..7bf8cbe 100644 --- a/kernel/kernel/irq/dummychip.c +++ b/kernel/kernel/irq/dummychip.c @@ -43,7 +43,7 @@ .irq_enable = noop, .irq_disable = noop, .irq_ack = ack_bad, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE, }; /* @@ -59,6 +59,6 @@ .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE, }; EXPORT_SYMBOL_GPL(dummy_irq_chip); diff --git a/kernel/kernel/irq/generic-chip.c b/kernel/kernel/irq/generic-chip.c index 79cb6d0..6ca043e 100644 --- a/kernel/kernel/irq/generic-chip.c +++ b/kernel/kernel/irq/generic-chip.c @@ -16,7 +16,7 @@ #include "internals.h" static LIST_HEAD(gc_list); -static DEFINE_RAW_SPINLOCK(gc_lock); +static DEFINE_HARD_SPINLOCK(gc_lock); /** * irq_gc_noop - NOOP function diff --git a/kernel/kernel/irq/handle.c b/kernel/kernel/irq/handle.c index 8806444..e4fe9c6 100644 --- a/kernel/kernel/irq/handle.c +++ b/kernel/kernel/irq/handle.c @@ -32,9 +32,16 @@ { unsigned int irq = irq_desc_get_irq(desc); + /* Let the in-band stage report the issue. */ + if (on_pipeline_entry()) { + ack_bad_irq(irq); + return; + } + print_irq_desc(irq, desc); kstat_incr_irqs_this_cpu(desc); - ack_bad_irq(irq); + if (!irqs_pipelined()) + ack_bad_irq(irq); } EXPORT_SYMBOL_GPL(handle_bad_irq); diff --git a/kernel/kernel/irq/internals.h b/kernel/kernel/irq/internals.h index e58342a..341c8f6 100644 --- a/kernel/kernel/irq/internals.h +++ b/kernel/kernel/irq/internals.h @@ -52,6 +52,7 @@ * IRQS_PENDING - irq is pending and replayed later * IRQS_SUSPENDED - irq is suspended * IRQS_NMI - irq line is used to deliver NMIs + * IRQS_EDGE - irq line received an edge event */ enum { IRQS_AUTODETECT = 0x00000001, @@ -64,6 +65,7 @@ IRQS_SUSPENDED = 0x00000800, IRQS_TIMINGS = 0x00001000, IRQS_NMI = 0x00002000, + IRQS_EDGE = 0x00004000, }; #include "debug.h" diff --git a/kernel/kernel/irq/irqdesc.c b/kernel/kernel/irq/irqdesc.c index 2f35de3..846c2c8 100644 --- a/kernel/kernel/irq/irqdesc.c +++ b/kernel/kernel/irq/irqdesc.c @@ -16,6 +16,7 @@ #include <linux/bitmap.h> #include <linux/irqdomain.h> #include <linux/sysfs.h> +#include <linux/irq_pipeline.h> #include "internals.h" @@ -453,6 +454,7 @@ * irq_sysfs_init() as well. */ irq_sysfs_del(desc); + uncache_irq_desc(irq); delete_irq_desc(irq); /* @@ -633,9 +635,12 @@ #endif /* !CONFIG_SPARSE_IRQ */ /** - * generic_handle_irq - Invoke the handler for a particular irq + * generic_handle_irq - Handle a particular irq * @irq: The irq number to handle * + * The handler is invoked, unless we are entering the interrupt + * pipeline, in which case the incoming IRQ is only scheduled for + * deferred delivery. */ int generic_handle_irq(unsigned int irq) { @@ -646,7 +651,7 @@ return -EINVAL; data = irq_desc_get_irq_data(desc); - if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) + if (WARN_ON_ONCE(!in_hard_irq() && handle_enforce_irqctx(data))) return -EPERM; generic_handle_irq_desc(desc); diff --git a/kernel/kernel/irq/irqptorture.c b/kernel/kernel/irq/irqptorture.c new file mode 100644 index 0000000..2518c47 --- /dev/null +++ b/kernel/kernel/irq/irqptorture.c @@ -0,0 +1,254 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/kernel.h> +#include <linux/torture.h> +#include <linux/printk.h> +#include <linux/delay.h> +#include <linux/tick.h> +#include <linux/smp.h> +#include <linux/cpumask.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irq_pipeline.h> +#include <linux/stop_machine.h> +#include <linux/irq_work.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include "settings.h" + +static void torture_event_handler(struct clock_event_device *dev) +{ + /* + * We are running on the oob stage, in NMI-like mode. Schedule + * a tick on the proxy device to satisfy the corresponding + * timing request asap. + */ + tick_notify_proxy(); +} + +static void setup_proxy(struct clock_proxy_device *dev) +{ + dev->handle_oob_event = torture_event_handler; +} + +static int start_tick_takeover_test(void) +{ + return tick_install_proxy(setup_proxy, cpu_online_mask); +} + +static void stop_tick_takeover_test(void) +{ + tick_uninstall_proxy(cpu_online_mask); +} + +struct stop_machine_p_data { + int origin_cpu; + cpumask_var_t disable_mask; +}; + +static int stop_machine_handler(void *arg) +{ + struct stop_machine_p_data *p = arg; + int cpu = raw_smp_processor_id(); + + /* + * The stop_machine() handler must run with hard + * IRQs off, note the current state in the result mask. + */ + if (hard_irqs_disabled()) + cpumask_set_cpu(cpu, p->disable_mask); + + if (cpu != p->origin_cpu) + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d responds to stop_machine()\n", cpu); + return 0; +} + +/* + * We test stop_machine() as a way to validate IPI handling in a + * pipelined interrupt context. + */ +static int test_stop_machine(void) +{ + struct stop_machine_p_data d; + cpumask_var_t tmp_mask; + int ret = -ENOMEM, cpu; + + if (!zalloc_cpumask_var(&d.disable_mask, GFP_KERNEL)) { + WARN_ON(1); + return ret; + } + + if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) { + WARN_ON(1); + goto fail; + } + + ret = -EINVAL; + d.origin_cpu = raw_smp_processor_id(); + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d initiates stop_machine()\n", + d.origin_cpu); + + ret = stop_machine(stop_machine_handler, &d, cpu_online_mask); + WARN_ON(ret); + if (ret) + goto fail; + + /* + * Check whether all handlers did run with hard IRQs off. If + * some of them did not, then we have a problem with the stop + * IRQ delivery. + */ + cpumask_xor(tmp_mask, cpu_online_mask, d.disable_mask); + if (!cpumask_empty(tmp_mask)) { + for_each_cpu(cpu, tmp_mask) + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: hard IRQs ON in stop_machine()" + " handler!\n", cpu); + } + + free_cpumask_var(tmp_mask); +fail: + free_cpumask_var(d.disable_mask); + + return ret; +} + +static struct irq_work_tester { + struct irq_work work; + struct completion done; +} irq_work_tester; + +static void irq_work_handler(struct irq_work *work) +{ + int cpu = raw_smp_processor_id(); + + if (!running_inband()) { + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: irq_work handler not running on" + " in-band stage?!\n", cpu); + return; + } + + if (work != &irq_work_tester.work) + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: irq_work handler received broken" + " arg?!\n", cpu); + else { + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: irq_work handled\n", cpu); + complete(&irq_work_tester.done); + } +} + +static int trigger_oob_work(void *arg) +{ + int cpu = raw_smp_processor_id(); + + if (!running_oob()) { + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: escalated request not running on" + " oob stage?!\n", cpu); + return -EINVAL; + } + + if ((struct irq_work_tester *)arg != &irq_work_tester) { + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: escalation handler received broken" + " arg?!\n", cpu); + return -EINVAL; + } + + irq_work_queue(&irq_work_tester.work); + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: stage escalation request works\n", + cpu); + + return 0; +} + +static int test_interstage_work_injection(void) +{ + struct irq_work_tester *p = &irq_work_tester; + int ret, cpu = raw_smp_processor_id(); + unsigned long rem; + + init_completion(&p->done); + init_irq_work(&p->work, irq_work_handler); + + /* Trigger over the in-band stage. */ + irq_work_queue(&p->work); + rem = wait_for_completion_timeout(&p->done, HZ / 10); + if (!rem) { + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: irq_work trigger from in-band stage not handled!\n", + cpu); + return -EINVAL; + } + + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: in-band->in-band irq_work trigger works\n", cpu); + + reinit_completion(&p->done); + + /* Now try over the oob stage. */ + ret = run_oob_call(trigger_oob_work, p); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&p->done, HZ / 10); + if (!rem) { + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: irq_work trigger from oob" + " stage not handled!\n", cpu); + return -EINVAL; + } + + pr_alert("irq_pipeline" TORTURE_FLAG + " CPU%d: oob->in-band irq_work trigger works\n", + cpu); + + return 0; +} + +static int __init irqp_torture_init(void) +{ + int ret; + + pr_info("Starting IRQ pipeline tests..."); + + ret = enable_oob_stage("torture"); + if (ret) { + if (ret == -EBUSY) + pr_alert("irq_pipeline" TORTURE_FLAG + " won't run, oob stage '%s' is already installed", + oob_stage.name); + + return ret; + } + + ret = test_stop_machine(); + if (ret) + goto out; + + ret = start_tick_takeover_test(); + if (ret) + goto out; + + ret = test_interstage_work_injection(); + if (!ret) + msleep(1000); + + stop_tick_takeover_test(); +out: + disable_oob_stage(); + pr_info("IRQ pipeline tests %s.", ret ? "FAILED" : "OK"); + + return 0; +} +late_initcall(irqp_torture_init); diff --git a/kernel/kernel/irq/manage.c b/kernel/kernel/irq/manage.c index 76da8de..4b06f5a 100644 --- a/kernel/kernel/irq/manage.c +++ b/kernel/kernel/irq/manage.c @@ -10,6 +10,7 @@ #include <linux/irq.h> #include <linux/kthread.h> +#include <linux/kconfig.h> #include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> @@ -829,6 +830,50 @@ } EXPORT_SYMBOL(irq_set_irq_wake); +#ifdef CONFIG_IRQ_PIPELINE + +/** + * irq_switch_oob - Control out-of-band setting for a registered IRQ descriptor + * @irq: interrupt to control + * @on: enable/disable pipelining + * + * Enable/disable out-of-band handling for an IRQ. At least one + * action must have been previously registered for such + * interrupt. + * + * The previously registered action(s) need(s) not bearing the + * IRQF_OOB flag for the IRQ to be switched to out-of-band + * handling. This call enables switching pre-installed IRQs from + * in-band to out-of-band handling. + * + * NOTE: This routine affects all action handlers sharing the + * IRQ. + */ +int irq_switch_oob(unsigned int irq, bool on) +{ + struct irq_desc *desc; + unsigned long flags; + int ret = 0; + + desc = irq_get_desc_lock(irq, &flags, 0); + if (!desc) + return -EINVAL; + + if (!desc->action) + ret = -EINVAL; + else if (on) + irq_settings_set_oob(desc); + else + irq_settings_clr_oob(desc); + + irq_put_desc_unlock(desc, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(irq_switch_oob); + +#endif /* CONFIG_IRQ_PIPELINE */ + /* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available @@ -845,7 +890,8 @@ if (irq_settings_can_request(desc)) { if (!desc->action || - irqflags & desc->action->flags & IRQF_SHARED) + ((irqflags & desc->action->flags & IRQF_SHARED) && + !((irqflags ^ desc->action->flags) & IRQF_OOB))) canrequest = 1; } irq_put_desc_unlock(desc, flags); @@ -1419,6 +1465,21 @@ new->irq = irq; + ret = -EINVAL; + /* + * Out-of-band interrupts can be shared but not threaded. We + * silently ignore the OOB setting if interrupt pipelining is + * disabled. + */ + if (!irqs_pipelined()) + new->flags &= ~IRQF_OOB; + else if (new->flags & IRQF_OOB) { + if (new->thread_fn) + goto out_mput; + new->flags |= IRQF_NO_THREAD; + new->flags &= ~IRQF_ONESHOT; + } + /* * If the trigger type is not specified by the caller, * then use the default for this interrupt. @@ -1432,10 +1493,8 @@ */ nested = irq_settings_is_nested_thread(desc); if (nested) { - if (!new->thread_fn) { - ret = -EINVAL; + if (!new->thread_fn) goto out_mput; - } /* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the @@ -1519,7 +1578,7 @@ * the same type (level, edge, polarity). So both flag * fields must have IRQF_SHARED set and the bits which * set the trigger type must match. Also all must - * agree on ONESHOT. + * agree on ONESHOT and OOB. * Interrupt lines used for NMIs cannot be shared. */ unsigned int oldtype; @@ -1544,7 +1603,7 @@ if (!((old->flags & new->flags) & IRQF_SHARED) || (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || - ((old->flags ^ new->flags) & IRQF_ONESHOT)) + ((old->flags ^ new->flags) & (IRQF_OOB|IRQF_ONESHOT))) goto mismatch; /* All handlers must agree on per-cpuness */ @@ -1661,6 +1720,9 @@ if (new->flags & IRQF_ONESHOT) desc->istate |= IRQS_ONESHOT; + + if (new->flags & IRQF_OOB) + irq_settings_set_oob(desc); /* Exclude IRQ from balancing if requested */ if (new->flags & IRQF_NOBALANCING) { @@ -1809,6 +1871,8 @@ irq_settings_clr_disable_unlazy(desc); /* Only shutdown. Deactivate after synchronize_hardirq() */ irq_shutdown(desc); + /* Turn off OOB handling (after shutdown). */ + irq_settings_clr_oob(desc); } #ifdef CONFIG_SMP @@ -1845,14 +1909,15 @@ #ifdef CONFIG_DEBUG_SHIRQ /* - * It's a shared IRQ -- the driver ought to be prepared for an IRQ - * event to happen even now it's being freed, so let's make sure that - * is so by doing an extra call to the handler .... + * It's a shared IRQ (with in-band handler) -- the driver + * ought to be prepared for an IRQ event to happen even now + * it's being freed, so let's make sure that is so by doing an + * extra call to the handler .... * * ( We do this after actually deregistering it, to make sure that a * 'real' IRQ doesn't run in parallel with our fake. ) */ - if (action->flags & IRQF_SHARED) { + if ((action->flags & (IRQF_SHARED|IRQF_OOB)) == IRQF_SHARED) { local_irq_save(flags); action->handler(irq, dev_id); local_irq_restore(flags); @@ -2473,7 +2538,7 @@ * __request_percpu_irq - allocate a percpu interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. - * @flags: Interrupt type flags (IRQF_TIMER only) + * @flags: Interrupt type flags (IRQF_TIMER and/or IRQF_OOB only) * @devname: An ascii name for the claiming device * @dev_id: A percpu cookie passed back to the handler function * @@ -2502,7 +2567,7 @@ !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; - if (flags && flags != IRQF_TIMER) + if (flags & ~(IRQF_TIMER|IRQF_OOB)) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); diff --git a/kernel/kernel/irq/msi.c b/kernel/kernel/irq/msi.c index b47d95b..8fc1d87 100644 --- a/kernel/kernel/irq/msi.c +++ b/kernel/kernel/irq/msi.c @@ -272,6 +272,9 @@ struct irq_chip *chip = info->chip; BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); + WARN_ONCE(IS_ENABLED(CONFIG_IRQ_PIPELINE) && + (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0, + "MSI domain irqchip %s is not pipeline-safe!", chip->name); if (!chip->irq_set_affinity) chip->irq_set_affinity = msi_domain_set_affinity; } diff --git a/kernel/kernel/irq/pipeline.c b/kernel/kernel/irq/pipeline.c new file mode 100644 index 0000000..f64d731 --- /dev/null +++ b/kernel/kernel/irq/pipeline.c @@ -0,0 +1,1764 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/irq_pipeline.h> +#include <linux/irq_work.h> +#include <linux/jhash.h> +#include <linux/debug_locks.h> +#include <linux/dovetail.h> +#include <dovetail/irq.h> +#include <trace/events/irq.h> +#include "internals.h" + +#ifdef CONFIG_DEBUG_IRQ_PIPELINE +#define trace_on_debug +#else +#define trace_on_debug notrace +#endif + +struct irq_stage inband_stage = { + .name = "Linux", +}; +EXPORT_SYMBOL_GPL(inband_stage); + +struct irq_stage oob_stage; +EXPORT_SYMBOL_GPL(oob_stage); + +struct irq_domain *synthetic_irq_domain; +EXPORT_SYMBOL_GPL(synthetic_irq_domain); + +bool irq_pipeline_oopsing; +EXPORT_SYMBOL_GPL(irq_pipeline_oopsing); + +bool irq_pipeline_active; +EXPORT_SYMBOL_GPL(irq_pipeline_active); + +#define IRQ_L1_MAPSZ BITS_PER_LONG +#define IRQ_L2_MAPSZ (BITS_PER_LONG * BITS_PER_LONG) +#define IRQ_FLAT_MAPSZ DIV_ROUND_UP(IRQ_BITMAP_BITS, BITS_PER_LONG) + +#if IRQ_FLAT_MAPSZ > IRQ_L2_MAPSZ +#define __IRQ_STAGE_MAP_LEVELS 4 /* up to 4/16M vectors */ +#elif IRQ_FLAT_MAPSZ > IRQ_L1_MAPSZ +#define __IRQ_STAGE_MAP_LEVELS 3 /* up to 64/256M vectors */ +#else +#define __IRQ_STAGE_MAP_LEVELS 2 /* up to 1024/4096 vectors */ +#endif + +struct irq_event_map { +#if __IRQ_STAGE_MAP_LEVELS >= 3 + unsigned long index_1[IRQ_L1_MAPSZ]; +#if __IRQ_STAGE_MAP_LEVELS >= 4 + unsigned long index_2[IRQ_L2_MAPSZ]; +#endif +#endif + unsigned long flat[IRQ_FLAT_MAPSZ]; +}; + +#ifdef CONFIG_SMP + +static struct irq_event_map bootup_irq_map __initdata; + +static DEFINE_PER_CPU(struct irq_event_map, irq_map_array[2]); + +DEFINE_PER_CPU(struct irq_pipeline_data, irq_pipeline) = { + .stages = { + [0] = { + .log = { + .map = &bootup_irq_map, + }, + .stage = &inband_stage, + }, + }, +}; + +#else /* !CONFIG_SMP */ + +static struct irq_event_map inband_irq_map; + +static struct irq_event_map oob_irq_map; + +DEFINE_PER_CPU(struct irq_pipeline_data, irq_pipeline) = { + .stages = { + [0] = { + .log = { + .map = &inband_irq_map, + }, + .stage = &inband_stage, + }, + [1] = { + .log = { + .map = &oob_irq_map, + }, + }, + }, +}; + +#endif /* !CONFIG_SMP */ + +EXPORT_PER_CPU_SYMBOL(irq_pipeline); + +static void sirq_noop(struct irq_data *data) { } + +/* Virtual interrupt controller for synthetic IRQs. */ +static struct irq_chip sirq_chip = { + .name = "SIRQC", + .irq_enable = sirq_noop, + .irq_disable = sirq_noop, + .flags = IRQCHIP_PIPELINE_SAFE | IRQCHIP_SKIP_SET_WAKE, +}; + +static int sirq_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_percpu_devid(irq); + irq_set_chip_and_handler(irq, &sirq_chip, handle_synthetic_irq); + + return 0; +} + +static struct irq_domain_ops sirq_domain_ops = { + .map = sirq_map, +}; + +#ifdef CONFIG_SPARSE_IRQ +/* + * The performances of the radix tree in sparse mode are really ugly + * under mm stress on some hw, use a local descriptor cache to ease + * the pain. + */ +#define DESC_CACHE_SZ 128 + +static struct irq_desc *desc_cache[DESC_CACHE_SZ] __cacheline_aligned; + +static inline u32 hash_irq(unsigned int irq) +{ + return jhash(&irq, sizeof(irq), irq) % DESC_CACHE_SZ; +} + +static __always_inline +struct irq_desc *irq_to_cached_desc(unsigned int irq) +{ + int hval = hash_irq(irq); + struct irq_desc *desc = desc_cache[hval]; + + if (unlikely(desc == NULL || irq_desc_get_irq(desc) != irq)) { + desc = irq_to_desc(irq); + desc_cache[hval] = desc; + } + + return desc; +} + +void uncache_irq_desc(unsigned int irq) +{ + int hval = hash_irq(irq); + + desc_cache[hval] = NULL; +} + +#else + +static struct irq_desc *irq_to_cached_desc(unsigned int irq) +{ + return irq_to_desc(irq); +} + +#endif + +/** + * handle_synthetic_irq - synthetic irq handler + * @desc: the interrupt description structure for this irq + * + * Handles synthetic interrupts flowing down the IRQ pipeline + * with per-CPU semantics. + * + * CAUTION: synthetic IRQs may be used to map hardware-generated + * events (e.g. IPIs or traps), we must start handling them as + * common interrupts. + */ +void handle_synthetic_irq(struct irq_desc *desc) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct irqaction *action; + irqreturn_t ret; + void *dev_id; + + if (on_pipeline_entry()) { + handle_oob_irq(desc); + return; + } + + action = desc->action; + if (action == NULL) { + if (printk_ratelimit()) + printk(KERN_WARNING + "CPU%d: WARNING: synthetic IRQ%d has no action.\n", + smp_processor_id(), irq); + return; + } + + __kstat_incr_irqs_this_cpu(desc); + trace_irq_handler_entry(irq, action); + dev_id = raw_cpu_ptr(action->percpu_dev_id); + ret = action->handler(irq, dev_id); + trace_irq_handler_exit(irq, action, ret); +} + +void sync_irq_stage(struct irq_stage *top) +{ + struct irq_stage_data *p; + struct irq_stage *stage; + + /* We must enter over the inband stage with hardirqs off. */ + if (irq_pipeline_debug()) { + WARN_ON_ONCE(!hard_irqs_disabled()); + WARN_ON_ONCE(current_irq_stage != &inband_stage); + } + + stage = top; + + for (;;) { + if (stage == &inband_stage) { + if (test_inband_stall()) + break; + } else { + if (test_oob_stall()) + break; + } + + p = this_staged(stage); + if (stage_irqs_pending(p)) { + if (stage == &inband_stage) + sync_current_irq_stage(); + else { + /* Switch to oob before synchronizing. */ + switch_oob(p); + sync_current_irq_stage(); + /* Then back to the inband stage. */ + switch_inband(this_inband_staged()); + } + } + + if (stage == &inband_stage) + break; + + stage = &inband_stage; + } +} + +void synchronize_pipeline(void) /* hardirqs off */ +{ + struct irq_stage *top = &oob_stage; + int stalled = test_oob_stall(); + + if (unlikely(!oob_stage_present())) { + top = &inband_stage; + stalled = test_inband_stall(); + } + + if (current_irq_stage != top) + sync_irq_stage(top); + else if (!stalled) + sync_current_irq_stage(); +} + +static void __inband_irq_enable(void) +{ + struct irq_stage_data *p; + unsigned long flags; + + check_inband_stage(); + + flags = hard_local_irq_save(); + + unstall_inband_nocheck(); + + p = this_inband_staged(); + if (unlikely(stage_irqs_pending(p) && !in_pipeline())) { + sync_current_irq_stage(); + hard_local_irq_restore(flags); + preempt_check_resched(); + } else { + hard_local_irq_restore(flags); + } +} + +/** + * inband_irq_enable - enable interrupts for the inband stage + * + * Enable interrupts for the inband stage, allowing interrupts to + * preempt the in-band code. If in-band IRQs are pending for the + * inband stage in the per-CPU log at the time of this call, they + * are played back. + * + * The caller is expected to tell the tracer about the change, by + * calling trace_hardirqs_on(). + */ +notrace void inband_irq_enable(void) +{ + /* + * We are NOT supposed to enter this code with hard IRQs off. + * If we do, then the caller might be wrongly assuming that + * invoking local_irq_enable() implies enabling hard + * interrupts like the legacy I-pipe did, which is not the + * case anymore. Relax this requirement when oopsing, since + * the kernel may be in a weird state. + */ + WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()); + __inband_irq_enable(); +} +EXPORT_SYMBOL(inband_irq_enable); + +/** + * inband_irq_disable - disable interrupts for the inband stage + * + * Disable interrupts for the inband stage, disabling in-band + * interrupts. Out-of-band interrupts can still be taken and + * delivered to their respective handlers though. + */ +notrace void inband_irq_disable(void) +{ + check_inband_stage(); + stall_inband_nocheck(); +} +EXPORT_SYMBOL(inband_irq_disable); + +/** + * inband_irqs_disabled - test the virtual interrupt state + * + * Returns non-zero if interrupts are currently disabled for the + * inband stage, zero otherwise. + * + * May be used from the oob stage too (e.g. for tracing + * purpose). + */ +noinstr int inband_irqs_disabled(void) +{ + return test_inband_stall(); +} +EXPORT_SYMBOL(inband_irqs_disabled); + +/** + * inband_irq_save - test and disable (virtual) interrupts + * + * Save the virtual interrupt state then disables interrupts for + * the inband stage. + * + * Returns the original interrupt state. + */ +trace_on_debug unsigned long inband_irq_save(void) +{ + check_inband_stage(); + return test_and_stall_inband_nocheck(); +} +EXPORT_SYMBOL(inband_irq_save); + +/** + * inband_irq_restore - restore the (virtual) interrupt state + * @x: Interrupt state to restore + * + * Restore the virtual interrupt state from x. If the inband + * stage is unstalled as a consequence of this operation, any + * interrupt pending for the inband stage in the per-CPU log is + * played back. + */ +trace_on_debug void inband_irq_restore(unsigned long flags) +{ + if (flags) + inband_irq_disable(); + else + __inband_irq_enable(); +} +EXPORT_SYMBOL(inband_irq_restore); + +/** + * oob_irq_enable - enable interrupts in the CPU + * + * Enable interrupts in the CPU, allowing out-of-band interrupts + * to preempt any code. If out-of-band IRQs are pending in the + * per-CPU log for the oob stage at the time of this call, they + * are played back. + */ +trace_on_debug void oob_irq_enable(void) +{ + struct irq_stage_data *p; + + hard_local_irq_disable(); + + unstall_oob(); + + p = this_oob_staged(); + if (unlikely(stage_irqs_pending(p))) + synchronize_pipeline(); + + hard_local_irq_enable(); +} +EXPORT_SYMBOL(oob_irq_enable); + +/** + * oob_irq_restore - restore the hardware interrupt state + * @x: Interrupt state to restore + * + * Restore the harware interrupt state from x. If the oob stage + * is unstalled as a consequence of this operation, any interrupt + * pending for the oob stage in the per-CPU log is played back + * prior to turning IRQs on. + * + * NOTE: Stalling the oob stage must always be paired with + * disabling hard irqs and conversely when calling + * oob_irq_restore(), otherwise the latter would badly misbehave + * in unbalanced conditions. + */ +trace_on_debug void __oob_irq_restore(unsigned long flags) /* hw interrupt off */ +{ + struct irq_stage_data *p = this_oob_staged(); + + check_hard_irqs_disabled(); + + if (!flags) { + unstall_oob(); + if (unlikely(stage_irqs_pending(p))) + synchronize_pipeline(); + hard_local_irq_enable(); + } +} +EXPORT_SYMBOL(__oob_irq_restore); + +/** + * stage_disabled - test the interrupt state of the current stage + * + * Returns non-zero if interrupts are currently disabled for the + * current interrupt stage, zero otherwise. + * In other words, returns non-zero either if: + * - interrupts are disabled for the OOB context (i.e. hard disabled), + * - the inband stage is current and inband interrupts are disabled. + */ +noinstr bool stage_disabled(void) +{ + bool ret = true; + + if (!hard_irqs_disabled()) { + ret = false; + if (running_inband()) + ret = test_inband_stall(); + } + + return ret; +} +EXPORT_SYMBOL_GPL(stage_disabled); + +/** + * test_and_lock_stage - test and disable interrupts for the current stage + * @irqsoff: Pointer to boolean denoting stage_disabled() + * on entry + * + * Fully disables interrupts for the current stage. When the + * inband stage is current, the stall bit is raised and hardware + * IRQs are masked as well. Only the latter operation is + * performed when the oob stage is current. + * + * Returns the combined interrupt state on entry including the + * real/hardware (in CPU) and virtual (inband stage) states. For + * this reason, [test_and_]lock_stage() must be paired with + * unlock_stage() exclusively. The combined irq state returned by + * the former may NOT be passed to hard_local_irq_restore(). + * + * The interrupt state of the current stage in the return value + * (i.e. stall bit for the inband stage, hardware interrupt bit + * for the oob stage) must be testable using + * arch_irqs_disabled_flags(). + * + * Notice that test_and_lock_stage(), unlock_stage() are raw + * level ops, which substitute to raw_local_irq_save(), + * raw_local_irq_restore() in lockdep code. Therefore, changes to + * the in-band stall bit must not be propagated to the tracing + * core (i.e. no trace_hardirqs_*() annotations). + */ +noinstr unsigned long test_and_lock_stage(int *irqsoff) +{ + unsigned long flags; + int stalled, dummy; + + if (irqsoff == NULL) + irqsoff = &dummy; + + /* + * Combine the hard irq flag and the stall bit into a single + * state word. We need to fill in the stall bit only if the + * inband stage is current, otherwise it is not relevant. + */ + flags = hard_local_irq_save(); + *irqsoff = hard_irqs_disabled_flags(flags); + if (running_inband()) { + stalled = test_and_stall_inband_nocheck(); + flags = irqs_merge_flags(flags, stalled); + if (stalled) + *irqsoff = 1; + } + + /* + * CAUTION: don't ever pass this verbatim to + * hard_local_irq_restore(). Only unlock_stage() knows how to + * decode and use a combined state word. + */ + return flags; +} +EXPORT_SYMBOL_GPL(test_and_lock_stage); + +/** + * unlock_stage - restore interrupts for the current stage + * @flags: Combined interrupt state to restore as received from + * test_and_lock_stage() + * + * Restore the virtual interrupt state if the inband stage is + * current, and the hardware interrupt state unconditionally. + * The per-CPU log is not played for any stage. + */ +noinstr void unlock_stage(unsigned long irqstate) +{ + unsigned long flags = irqstate; + int stalled; + + WARN_ON_ONCE(irq_pipeline_debug_locking() && !hard_irqs_disabled()); + + if (running_inband()) { + flags = irqs_split_flags(irqstate, &stalled); + if (!stalled) + unstall_inband_nocheck(); + } + + /* + * The hardware interrupt bit is the only flag which may be + * present in the combined state at this point, all other + * status bits have been cleared by irqs_merge_flags(), so + * don't ever try to reload the hardware status register with + * such value directly! + */ + if (!hard_irqs_disabled_flags(flags)) + hard_local_irq_enable(); +} +EXPORT_SYMBOL_GPL(unlock_stage); + +/** + * sync_inband_irqs - Synchronize the inband log + * + * Play any deferred interrupt which might have been logged for the + * in-band stage while running with hard irqs on but stalled. + * + * Called from the unstalled in-band stage. Returns with hard irqs off. + */ +void sync_inband_irqs(void) +{ + struct irq_stage_data *p; + + check_inband_stage(); + WARN_ON_ONCE(irq_pipeline_debug() && irqs_disabled()); + + if (!hard_irqs_disabled()) + hard_local_irq_disable(); + + p = this_inband_staged(); + if (unlikely(stage_irqs_pending(p))) { + /* Do not pile up preemption frames. */ + preempt_disable_notrace(); + sync_current_irq_stage(); + preempt_enable_no_resched_notrace(); + } +} + +static inline bool irq_post_check(struct irq_stage *stage, unsigned int irq) +{ + if (irq_pipeline_debug()) { + if (WARN_ONCE(!hard_irqs_disabled(), + "hard irqs on posting IRQ%u to %s\n", + irq, stage->name)) + return true; + if (WARN_ONCE(irq >= IRQ_BITMAP_BITS, + "cannot post invalid IRQ%u to %s\n", + irq, stage->name)) + return true; + } + + return false; +} + +#if __IRQ_STAGE_MAP_LEVELS == 4 + +/* Must be called hard irqs off. */ +void irq_post_stage(struct irq_stage *stage, unsigned int irq) +{ + struct irq_stage_data *p = this_staged(stage); + int l0b, l1b, l2b; + + if (irq_post_check(stage, irq)) + return; + + l0b = irq / (BITS_PER_LONG * BITS_PER_LONG * BITS_PER_LONG); + l1b = irq / (BITS_PER_LONG * BITS_PER_LONG); + l2b = irq / BITS_PER_LONG; + + __set_bit(irq, p->log.map->flat); + __set_bit(l2b, p->log.map->index_2); + __set_bit(l1b, p->log.map->index_1); + __set_bit(l0b, &p->log.index_0); +} +EXPORT_SYMBOL_GPL(irq_post_stage); + +#define ltob_1(__n) ((__n) * BITS_PER_LONG) +#define ltob_2(__n) (ltob_1(__n) * BITS_PER_LONG) +#define ltob_3(__n) (ltob_2(__n) * BITS_PER_LONG) + +static inline int pull_next_irq(struct irq_stage_data *p) +{ + unsigned long l0m, l1m, l2m, l3m; + int l0b, l1b, l2b, l3b; + unsigned int irq; + + l0m = p->log.index_0; + if (l0m == 0) + return -1; + l0b = __ffs(l0m); + irq = ltob_3(l0b); + + l1m = p->log.map->index_1[l0b]; + if (unlikely(l1m == 0)) { + WARN_ON_ONCE(1); + return -1; + } + l1b = __ffs(l1m); + irq += ltob_2(l1b); + + l2m = p->log.map->index_2[ltob_1(l0b) + l1b]; + if (unlikely(l2m == 0)) { + WARN_ON_ONCE(1); + return -1; + } + l2b = __ffs(l2m); + irq += ltob_1(l2b); + + l3m = p->log.map->flat[ltob_2(l0b) + ltob_1(l1b) + l2b]; + if (unlikely(l3m == 0)) + return -1; + l3b = __ffs(l3m); + irq += l3b; + + __clear_bit(irq, p->log.map->flat); + if (p->log.map->flat[irq / BITS_PER_LONG] == 0) { + __clear_bit(l2b, &p->log.map->index_2[ltob_1(l0b) + l1b]); + if (p->log.map->index_2[ltob_1(l0b) + l1b] == 0) { + __clear_bit(l1b, &p->log.map->index_1[l0b]); + if (p->log.map->index_1[l0b] == 0) + __clear_bit(l0b, &p->log.index_0); + } + } + + return irq; +} + +#elif __IRQ_STAGE_MAP_LEVELS == 3 + +/* Must be called hard irqs off. */ +void irq_post_stage(struct irq_stage *stage, unsigned int irq) +{ + struct irq_stage_data *p = this_staged(stage); + int l0b, l1b; + + if (irq_post_check(stage, irq)) + return; + + l0b = irq / (BITS_PER_LONG * BITS_PER_LONG); + l1b = irq / BITS_PER_LONG; + + __set_bit(irq, p->log.map->flat); + __set_bit(l1b, p->log.map->index_1); + __set_bit(l0b, &p->log.index_0); +} +EXPORT_SYMBOL_GPL(irq_post_stage); + +static inline int pull_next_irq(struct irq_stage_data *p) +{ + unsigned long l0m, l1m, l2m; + int l0b, l1b, l2b, irq; + + l0m = p->log.index_0; + if (unlikely(l0m == 0)) + return -1; + + l0b = __ffs(l0m); + l1m = p->log.map->index_1[l0b]; + if (l1m == 0) + return -1; + + l1b = __ffs(l1m) + l0b * BITS_PER_LONG; + l2m = p->log.map->flat[l1b]; + if (unlikely(l2m == 0)) { + WARN_ON_ONCE(1); + return -1; + } + + l2b = __ffs(l2m); + irq = l1b * BITS_PER_LONG + l2b; + + __clear_bit(irq, p->log.map->flat); + if (p->log.map->flat[l1b] == 0) { + __clear_bit(l1b, p->log.map->index_1); + if (p->log.map->index_1[l0b] == 0) + __clear_bit(l0b, &p->log.index_0); + } + + return irq; +} + +#else /* __IRQ_STAGE_MAP_LEVELS == 2 */ + +/* Must be called hard irqs off. */ +void irq_post_stage(struct irq_stage *stage, unsigned int irq) +{ + struct irq_stage_data *p = this_staged(stage); + int l0b = irq / BITS_PER_LONG; + + if (irq_post_check(stage, irq)) + return; + + __set_bit(irq, p->log.map->flat); + __set_bit(l0b, &p->log.index_0); +} +EXPORT_SYMBOL_GPL(irq_post_stage); + +static inline int pull_next_irq(struct irq_stage_data *p) +{ + unsigned long l0m, l1m; + int l0b, l1b; + + l0m = p->log.index_0; + if (l0m == 0) + return -1; + + l0b = __ffs(l0m); + l1m = p->log.map->flat[l0b]; + if (unlikely(l1m == 0)) { + WARN_ON_ONCE(1); + return -1; + } + + l1b = __ffs(l1m); + __clear_bit(l1b, &p->log.map->flat[l0b]); + if (p->log.map->flat[l0b] == 0) + __clear_bit(l0b, &p->log.index_0); + + return l0b * BITS_PER_LONG + l1b; +} + +#endif /* __IRQ_STAGE_MAP_LEVELS == 2 */ + +/** + * hard_preempt_disable - Disable preemption the hard way + * + * Disable hardware interrupts in the CPU, and disable preemption + * if currently running in-band code on the inband stage. + * + * Return the hardware interrupt state. + */ +unsigned long hard_preempt_disable(void) +{ + unsigned long flags = hard_local_irq_save(); + + if (running_inband()) + preempt_disable(); + + return flags; +} +EXPORT_SYMBOL_GPL(hard_preempt_disable); + +/** + * hard_preempt_enable - Enable preemption the hard way + * + * Enable preemption if currently running in-band code on the + * inband stage, restoring the hardware interrupt state in the CPU. + * The per-CPU log is not played for the oob stage. + */ +void hard_preempt_enable(unsigned long flags) +{ + if (running_inband()) { + preempt_enable_no_resched(); + hard_local_irq_restore(flags); + if (!hard_irqs_disabled_flags(flags)) + preempt_check_resched(); + } else + hard_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(hard_preempt_enable); + +static void handle_unexpected_irq(struct irq_desc *desc, irqreturn_t ret) +{ + unsigned int irq = irq_desc_get_irq(desc); + struct irqaction *action; + + /* + * Since IRQ_HANDLED was not received from any handler, we may + * have a problem dealing with an OOB interrupt. The error + * detection logic is as follows: + * + * - check and complain about any bogus return value from a + * out-of-band IRQ handler: we only allow IRQ_HANDLED and + * IRQ_NONE from those routines. + * + * - filter out spurious IRQs which may have been due to bus + * asynchronicity, those tend to happen infrequently and + * should not cause us to pull the break (see + * note_interrupt()). + * + * - otherwise, stop pipelining the IRQ line after a thousand + * consecutive unhandled events. + * + * NOTE: we should already be holding desc->lock for non + * per-cpu IRQs, since we should only get there from the + * pipeline entry context. + */ + + WARN_ON_ONCE(irq_pipeline_debug() && + !irq_settings_is_per_cpu(desc) && + !raw_spin_is_locked(&desc->lock)); + + if (ret != IRQ_NONE) { + printk(KERN_ERR "out-of-band irq event %d: bogus return value %x\n", + irq, ret); + for_each_action_of_desc(desc, action) + printk(KERN_ERR "[<%p>] %pf", + action->handler, action->handler); + printk(KERN_CONT "\n"); + return; + } + + if (time_after(jiffies, desc->last_unhandled + HZ/10)) + desc->irqs_unhandled = 0; + else + desc->irqs_unhandled++; + + desc->last_unhandled = jiffies; + + if (unlikely(desc->irqs_unhandled > 1000)) { + printk(KERN_ERR "out-of-band irq %d: stuck or unexpected\n", irq); + irq_settings_clr_oob(desc); + desc->istate |= IRQS_SPURIOUS_DISABLED; + irq_disable(desc); + } +} + +static inline void incr_irq_kstat(struct irq_desc *desc) +{ + if (irq_settings_is_per_cpu_devid(desc)) + __kstat_incr_irqs_this_cpu(desc); + else + kstat_incr_irqs_this_cpu(desc); +} + +/* + * do_oob_irq() - Handles interrupts over the oob stage. Hard irqs + * off. + */ +static void do_oob_irq(struct irq_desc *desc) +{ + bool percpu_devid = irq_settings_is_per_cpu_devid(desc); + unsigned int irq = irq_desc_get_irq(desc); + irqreturn_t ret = IRQ_NONE, res; + struct irqaction *action; + void *dev_id; + + action = desc->action; + if (unlikely(action == NULL)) + goto done; + + if (percpu_devid) { + trace_irq_handler_entry(irq, action); + dev_id = raw_cpu_ptr(action->percpu_dev_id); + ret = action->handler(irq, dev_id); + trace_irq_handler_exit(irq, action, ret); + } else { + desc->istate &= ~IRQS_PENDING; + if (unlikely(irqd_irq_disabled(&desc->irq_data))) + return; + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); + raw_spin_unlock(&desc->lock); + for_each_action_of_desc(desc, action) { + trace_irq_handler_entry(irq, action); + dev_id = action->dev_id; + res = action->handler(irq, dev_id); + trace_irq_handler_exit(irq, action, res); + ret |= res; + } + raw_spin_lock(&desc->lock); + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); + } +done: + incr_irq_kstat(desc); + + if (likely(ret & IRQ_HANDLED)) { + desc->irqs_unhandled = 0; + return; + } + + handle_unexpected_irq(desc, ret); +} + +/* + * Over the inband stage, IRQs must be dispatched by the arch-specific + * arch_do_IRQ_pipelined() routine. + * + * Entered with hardirqs on, inband stalled. + */ +static inline +void do_inband_irq(struct irq_desc *desc) +{ + arch_do_IRQ_pipelined(desc); + WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled()); +} + +static inline bool is_active_edge_event(struct irq_desc *desc) +{ + return (desc->istate & IRQS_PENDING) && + !irqd_irq_disabled(&desc->irq_data); +} + +bool handle_oob_irq(struct irq_desc *desc) /* hardirqs off */ +{ + struct irq_stage_data *oobd = this_oob_staged(); + unsigned int irq = irq_desc_get_irq(desc); + int stalled; + + /* + * Flow handlers of chained interrupts have no business + * running here: they should decode the event, invoking + * generic_handle_irq() for each cascaded IRQ. + */ + if (WARN_ON_ONCE(irq_pipeline_debug() && + irq_settings_is_chained(desc))) + return false; + + /* + * If no oob stage is present, all interrupts must go to the + * inband stage through the interrupt log. Otherwise, + * out-of-band IRQs are immediately delivered to the oob + * stage, while in-band IRQs still go through the inband stage + * log. + * + * This routine returns a boolean status telling the caller + * whether an out-of-band interrupt was delivered. + */ + if (!oob_stage_present() || !irq_settings_is_oob(desc)) { + irq_post_stage(&inband_stage, irq); + return false; + } + + if (WARN_ON_ONCE(irq_pipeline_debug() && running_inband())) + return false; + + stalled = test_and_stall_oob(); + + if (unlikely(desc->istate & IRQS_EDGE)) { + do { + if (is_active_edge_event(desc)) { + if (irqd_irq_masked(&desc->irq_data)) + unmask_irq(desc); + } + do_oob_irq(desc); + } while (is_active_edge_event(desc)); + } else { + do_oob_irq(desc); + } + + /* + * Cascaded interrupts enter handle_oob_irq() on the stalled + * out-of-band stage during the parent invocation. Make sure + * to restore the stall bit accordingly. + */ + if (likely(!stalled)) + unstall_oob(); + + /* + * CPU migration and/or stage switching over the handler are + * NOT allowed. These should take place over + * irq_exit_pipeline(). + */ + if (irq_pipeline_debug()) { + /* No CPU migration allowed. */ + WARN_ON_ONCE(this_oob_staged() != oobd); + /* No stage migration allowed. */ + WARN_ON_ONCE(current_irq_staged != oobd); + } + + return true; +} + +static inline +void copy_timer_regs(struct irq_desc *desc, struct pt_regs *regs) +{ + struct irq_pipeline_data *p; + + if (desc->action == NULL || !(desc->action->flags & __IRQF_TIMER)) + return; + /* + * Given our deferred dispatching model for regular IRQs, we + * record the preempted context registers only for the latest + * timer interrupt, so that the regular tick handler charges + * CPU times properly. It is assumed that no other interrupt + * handler cares for such information. + */ + p = raw_cpu_ptr(&irq_pipeline); + arch_save_timer_regs(&p->tick_regs, regs); +} + +static __always_inline +struct irq_stage_data *switch_stage_on_irq(void) +{ + struct irq_stage_data *prevd = current_irq_staged, *nextd; + + if (oob_stage_present()) { + nextd = this_oob_staged(); + if (prevd != nextd) + switch_oob(nextd); + } + + return prevd; +} + +static __always_inline +void restore_stage_on_irq(struct irq_stage_data *prevd) +{ + /* + * CPU migration and/or stage switching over + * irq_exit_pipeline() are allowed. Our exit logic is as + * follows: + * + * ENTRY EXIT EPILOGUE + * + * oob oob nop + * inband oob switch inband + * oob inband nop + * inband inband nop + */ + if (prevd->stage == &inband_stage && + current_irq_staged == this_oob_staged()) + switch_inband(this_inband_staged()); +} + +/** + * generic_pipeline_irq_desc - Pass an IRQ to the pipeline + * @desc: Descriptor of the IRQ to pass + * @regs: Register file coming from the low-level handling code + * + * Inject an IRQ into the pipeline from a CPU interrupt or trap + * context. A flow handler runs next for this IRQ. + * + * Hard irqs must be off on entry. Caller should have pushed the + * IRQ regs using set_irq_regs(). + */ +void generic_pipeline_irq_desc(struct irq_desc *desc, struct pt_regs *regs) +{ + int irq = irq_desc_get_irq(desc); + + if (irq_pipeline_debug() && !hard_irqs_disabled()) { + hard_local_irq_disable(); + pr_err("IRQ pipeline: interrupts enabled on entry (IRQ%u)\n", irq); + } + + trace_irq_pipeline_entry(irq); + copy_timer_regs(desc, regs); + generic_handle_irq_desc(desc); + trace_irq_pipeline_exit(irq); +} + +void generic_pipeline_irq(unsigned int irq, struct pt_regs *regs) +{ + struct irq_desc *desc = irq_to_cached_desc(irq); + struct pt_regs *old_regs; + + old_regs = set_irq_regs(regs); + generic_pipeline_irq_desc(desc, regs); + set_irq_regs(old_regs); +} + +struct irq_stage_data *handle_irq_pipelined_prepare(struct pt_regs *regs) +{ + struct irq_stage_data *prevd; + + /* + * Running with the oob stage stalled implies hardirqs off. + * For this reason, if the oob stage is stalled when we + * receive an interrupt from the hardware, something is badly + * broken in our interrupt state. Try fixing up, but without + * great hopes. + */ + if (irq_pipeline_debug()) { + if (test_oob_stall()) { + pr_err("IRQ pipeline: out-of-band stage stalled on IRQ entry\n"); + unstall_oob(); + } + WARN_ON(on_pipeline_entry()); + } + + /* + * Switch early on to the out-of-band stage if present, + * anticipating a companion kernel is going to handle the + * incoming event. If not, never mind, we will switch back + * in-band before synchronizing interrupts. + */ + prevd = switch_stage_on_irq(); + + /* Tell the companion core about the entry. */ + irq_enter_pipeline(); + + /* + * Invariant: IRQs may not pile up in the section covered by + * the PIPELINE_OFFSET marker, because: + * + * - out-of-band handlers called from handle_oob_irq() may NOT + * re-enable hard interrupts. Ever. + * + * - synchronizing the in-band log with hard interrupts + * enabled is done outside of this section. + */ + preempt_count_add(PIPELINE_OFFSET); + + /* + * From the standpoint of the in-band context when pipelining + * is in effect, an interrupt entry is unsafe in a similar way + * a NMI is, since it may preempt almost anywhere as IRQs are + * only virtually masked most of the time, including inside + * (virtually) interrupt-free sections. Declare a NMI entry so + * that the low handling code is allowed to enter RCU read + * sides (e.g. handle_domain_irq() needs this to resolve IRQ + * mappings). + */ + rcu_nmi_enter(); + + return prevd; +} + +int handle_irq_pipelined_finish(struct irq_stage_data *prevd, + struct pt_regs *regs) +{ + /* + * Leave the (pseudo-)NMI entry for RCU before the out-of-band + * core might reschedule in irq_exit_pipeline(), and + * interrupts are hard enabled again on this CPU as a result + * of switching context. + */ + rcu_nmi_exit(); + + /* + * Make sure to leave the pipeline entry context before + * allowing the companion core to reschedule, and eventually + * synchronizing interrupts. + */ + preempt_count_sub(PIPELINE_OFFSET); + + /* Allow the companion core to reschedule. */ + irq_exit_pipeline(); + + /* Back to the preempted stage. */ + restore_stage_on_irq(prevd); + + /* + * We have to synchronize interrupts because some might have + * been logged while we were busy handling an out-of-band + * event coming from the hardware: + * + * - as a result of calling an out-of-band handler which in + * turn posted them. + * + * - because we posted them directly for scheduling the + * interrupt to happen from the in-band stage. + */ + synchronize_pipeline_on_irq(); + +#ifdef CONFIG_DOVETAIL + /* + * Sending MAYDAY is in essence a rare case, so prefer test + * then maybe clear over test_and_clear. + */ + if (user_mode(regs) && test_thread_flag(TIF_MAYDAY)) + dovetail_call_mayday(regs); +#endif + + return running_inband() && !irqs_disabled(); +} + +int handle_irq_pipelined(struct pt_regs *regs) +{ + struct irq_stage_data *prevd; + + prevd = handle_irq_pipelined_prepare(regs); + handle_arch_irq(regs); + return handle_irq_pipelined_finish(prevd, regs); +} + +/** + * irq_inject_pipeline - Inject a software-generated IRQ into the + * pipeline @irq: IRQ to inject + * + * Inject an IRQ into the pipeline by software as if such + * hardware event had happened on the current CPU. + */ +int irq_inject_pipeline(unsigned int irq) +{ + struct irq_stage_data *oobd, *prevd; + struct irq_desc *desc; + unsigned long flags; + + desc = irq_to_cached_desc(irq); + if (desc == NULL) + return -EINVAL; + + flags = hard_local_irq_save(); + + /* + * Handle the case of an IRQ sent to a stalled oob stage here, + * which allows to trap the same condition in handle_oob_irq() + * in a debug check (see comment there). + */ + oobd = this_oob_staged(); + if (oob_stage_present() && + irq_settings_is_oob(desc) && + test_oob_stall()) { + irq_post_stage(&oob_stage, irq); + } else { + prevd = switch_stage_on_irq(); + irq_enter_pipeline(); + handle_oob_irq(desc); + irq_exit_pipeline(); + restore_stage_on_irq(prevd); + synchronize_pipeline_on_irq(); + } + + hard_local_irq_restore(flags); + + return 0; + +} +EXPORT_SYMBOL_GPL(irq_inject_pipeline); + +/* + * sync_current_irq_stage() -- Flush the pending IRQs for the current + * stage (and processor). This routine flushes the interrupt log (see + * "Optimistic interrupt protection" from D. Stodolsky et al. for more + * on the deferred interrupt scheme). Every interrupt which has + * occurred while the pipeline was stalled gets played. + * + * CAUTION: CPU migration may occur over this routine if running over + * the inband stage. + */ +void sync_current_irq_stage(void) /* hard irqs off */ +{ + struct irq_stage_data *p; + struct irq_stage *stage; + struct irq_desc *desc; + int irq; + + WARN_ON_ONCE(irq_pipeline_debug() && on_pipeline_entry()); + check_hard_irqs_disabled(); + + p = current_irq_staged; +respin: + stage = p->stage; + if (stage == &inband_stage) { + /* + * Since we manipulate the stall bit directly, we have + * to open code the IRQ state tracing. + */ + stall_inband_nocheck(); + trace_hardirqs_off(); + } else { + stall_oob(); + } + + for (;;) { + irq = pull_next_irq(p); + if (irq < 0) + break; + /* + * Make sure the compiler does not reorder wrongly, so + * that all updates to maps are done before the + * handler gets called. + */ + barrier(); + + desc = irq_to_cached_desc(irq); + + if (stage == &inband_stage) { + hard_local_irq_enable(); + do_inband_irq(desc); + hard_local_irq_disable(); + } else { + do_oob_irq(desc); + } + + /* + * We might have switched from the oob stage to the + * in-band one on return from the handler, in which + * case we might also have migrated to a different CPU + * (the converse in-band -> oob switch is NOT allowed + * though). Reload the current per-cpu context + * pointer, so that we further pull pending interrupts + * from the proper in-band log. + */ + p = current_irq_staged; + if (p->stage != stage) { + if (WARN_ON_ONCE(irq_pipeline_debug() && + stage == &inband_stage)) + break; + goto respin; + } + } + + if (stage == &inband_stage) { + trace_hardirqs_on(); + unstall_inband_nocheck(); + } else { + unstall_oob(); + } +} + +#ifndef CONFIG_GENERIC_ENTRY + +/* + * These helpers are normally called from the kernel entry/exit code + * in the asm section by architectures which do not use the generic + * kernel entry code, in order to save the interrupt and lockdep + * states for the in-band stage on entry, restoring them when leaving + * the kernel. The per-architecture arch_kentry_set/get_irqstate() + * calls determine where this information should be kept while running + * in kernel context, indexed on the current register frame. + */ + +#define KENTRY_STALL_BIT BIT(0) /* Tracks INBAND_STALL_BIT */ +#define KENTRY_LOCKDEP_BIT BIT(1) /* Tracks hardirqs_enabled */ + +asmlinkage __visible noinstr void kentry_enter_pipelined(struct pt_regs *regs) +{ + long irqstate = 0; + + WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled()); + + if (!running_inband()) + return; + + if (lockdep_read_irqs_state()) + irqstate |= KENTRY_LOCKDEP_BIT; + + if (irqs_disabled()) + irqstate |= KENTRY_STALL_BIT; + else + trace_hardirqs_off(); + + arch_kentry_set_irqstate(regs, irqstate); +} + +asmlinkage void __visible noinstr kentry_exit_pipelined(struct pt_regs *regs) +{ + long irqstate; + + WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled()); + + if (!running_inband()) + return; + + /* + * If the in-band stage of the kernel is current but the IRQ + * is not going to be delivered because the latter is stalled, + * keep the tracing logic unaware of the receipt, so that no + * false positive is triggered in lockdep (e.g. IN-HARDIRQ-W + * -> HARDIRQ-ON-W). In this case, we still have to restore + * the lockdep irq state independently, since it might not be + * in sync with the stall bit (e.g. raw_local_irq_disable/save + * do flip the stall bit, but are not tracked by lockdep). + */ + + irqstate = arch_kentry_get_irqstate(regs); + if (!(irqstate & KENTRY_STALL_BIT)) { + stall_inband_nocheck(); + trace_hardirqs_on(); + unstall_inband_nocheck(); + } else { + lockdep_write_irqs_state(!!(irqstate & KENTRY_LOCKDEP_BIT)); + } +} + +#endif /* !CONFIG_GENERIC_ENTRY */ + +/** + * run_oob_call - escalate function call to the oob stage + * @fn: address of routine + * @arg: routine argument + * + * Make the specified function run on the oob stage, switching + * the current stage accordingly if needed. The escalated call is + * allowed to perform a stage migration in the process. + */ +int notrace run_oob_call(int (*fn)(void *arg), void *arg) +{ + struct irq_stage_data *p, *old; + struct irq_stage *oob; + unsigned long flags; + int ret, s; + + flags = hard_local_irq_save(); + + /* Switch to the oob stage if not current. */ + p = this_oob_staged(); + oob = p->stage; + old = current_irq_staged; + if (old != p) + switch_oob(p); + + s = test_and_stall_oob(); + barrier(); + ret = fn(arg); + hard_local_irq_disable(); + if (!s) + unstall_oob(); + + /* + * The exit logic is as follows: + * + * ON-ENTRY AFTER-CALL EPILOGUE + * + * oob oob sync current stage if !stalled + * inband oob switch to inband + sync all stages + * oob inband sync all stages + * inband inband sync all stages + * + * Each path which has stalled the oob stage while running on + * the inband stage at some point during the escalation + * process must synchronize all stages of the pipeline on + * exit. Otherwise, we may restrict the synchronization scope + * to the current stage when the whole sequence ran on the oob + * stage. + */ + p = this_oob_staged(); + if (likely(current_irq_staged == p)) { + if (old->stage == oob) { + if (!s && stage_irqs_pending(p)) + sync_current_irq_stage(); + goto out; + } + switch_inband(this_inband_staged()); + } + + sync_irq_stage(oob); +out: + hard_local_irq_restore(flags); + + return ret; +} +EXPORT_SYMBOL_GPL(run_oob_call); + +int enable_oob_stage(const char *name) +{ + struct irq_event_map *map; + struct irq_stage_data *p; + int cpu, ret; + + if (oob_stage_present()) + return -EBUSY; + + /* Set up the out-of-band interrupt stage on all CPUs. */ + + for_each_possible_cpu(cpu) { + p = &per_cpu(irq_pipeline.stages, cpu)[1]; + map = p->log.map; /* save/restore after memset(). */ + memset(p, 0, sizeof(*p)); + p->stage = &oob_stage; + memset(map, 0, sizeof(struct irq_event_map)); + p->log.map = map; +#ifdef CONFIG_DEBUG_IRQ_PIPELINE + p->cpu = cpu; +#endif + } + + ret = arch_enable_oob_stage(); + if (ret) + return ret; + + oob_stage.name = name; + smp_wmb(); + oob_stage.index = 1; + + pr_info("IRQ pipeline: high-priority %s stage added.\n", name); + + return 0; +} +EXPORT_SYMBOL_GPL(enable_oob_stage); + +void disable_oob_stage(void) +{ + const char *name = oob_stage.name; + + WARN_ON(!running_inband() || !oob_stage_present()); + + oob_stage.index = 0; + smp_wmb(); + + pr_info("IRQ pipeline: %s stage removed.\n", name); +} +EXPORT_SYMBOL_GPL(disable_oob_stage); + +void irq_pipeline_oops(void) +{ + irq_pipeline_oopsing = true; + local_irq_disable_full(); +} + +/* + * Used to save/restore the status bits of the inband stage across runs + * of NMI-triggered code, so that we can restore the original pipeline + * state before leaving NMI context. + */ +static DEFINE_PER_CPU(unsigned long, nmi_saved_stall_bits); + +noinstr void irq_pipeline_nmi_enter(void) +{ + raw_cpu_write(nmi_saved_stall_bits, current->stall_bits); + +} +EXPORT_SYMBOL(irq_pipeline_nmi_enter); + +noinstr void irq_pipeline_nmi_exit(void) +{ + current->stall_bits = raw_cpu_read(nmi_saved_stall_bits); +} +EXPORT_SYMBOL(irq_pipeline_nmi_exit); + +bool __weak irq_cpuidle_control(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + /* + * Allow entering the idle state by default, matching the + * original behavior when CPU_IDLE is turned + * on. irq_cpuidle_control() may be overriden by an + * out-of-band code for determining whether the CPU may + * actually enter the idle state. + */ + return true; +} + +/** + * irq_cpuidle_enter - Prepare for entering the next idle state + * @dev: CPUIDLE device + * @state: CPUIDLE state to be entered + * + * Flush the in-band interrupt log before the caller idles, so + * that no event lingers before we actually wait for the next + * IRQ, in which case we ask the caller to abort the idling + * process altogether. The companion core is also given the + * opportunity to block the idling process by having + * irq_cpuidle_control() return @false. + * + * Returns @true if caller may proceed with idling, @false + * otherwise. The in-band log is guaranteed empty on return, hard + * irqs left off so that no event might sneak in until the caller + * actually idles. + */ +bool irq_cpuidle_enter(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled()); + + hard_local_irq_disable(); + + if (stage_irqs_pending(this_inband_staged())) { + unstall_inband_nocheck(); + synchronize_pipeline(); + stall_inband_nocheck(); + trace_hardirqs_off(); + return false; + } + + return irq_cpuidle_control(dev, state); +} + +static unsigned int inband_work_sirq; + +static irqreturn_t inband_work_interrupt(int sirq, void *dev_id) +{ + irq_work_run(); + + return IRQ_HANDLED; +} + +static struct irqaction inband_work = { + .handler = inband_work_interrupt, + .name = "in-band work", + .flags = IRQF_NO_THREAD, +}; + +void irq_local_work_raise(void) +{ + unsigned long flags; + + /* + * irq_work_queue() may be called from the in-band stage too + * in case we want to delay a work until the hard irqs are on + * again, so we may only sync the in-band log when unstalled, + * with hard irqs on. + */ + flags = hard_local_irq_save(); + irq_post_inband(inband_work_sirq); + if (running_inband() && + !hard_irqs_disabled_flags(flags) && !irqs_disabled()) + sync_current_irq_stage(); + hard_local_irq_restore(flags); +} + +#ifdef CONFIG_DEBUG_IRQ_PIPELINE + +#ifdef CONFIG_LOCKDEP +static inline bool lockdep_on_error(void) +{ + return !debug_locks; +} +#else +static inline bool lockdep_on_error(void) +{ + return false; +} +#endif + +notrace void check_inband_stage(void) +{ + struct irq_stage *this_stage; + unsigned long flags; + + flags = hard_local_irq_save(); + + this_stage = current_irq_stage; + if (likely(this_stage == &inband_stage && !test_oob_stall())) { + hard_local_irq_restore(flags); + return; + } + + if (in_nmi() || irq_pipeline_oopsing || lockdep_on_error()) { + hard_local_irq_restore(flags); + return; + } + + /* + * This will disable all further pipeline debug checks, since + * a wrecked interrupt state is likely to trigger many of + * them, ending up in a terrible mess. IOW, the current + * situation must be fixed prior to investigating any + * subsequent issue that might still exist. + */ + irq_pipeline_oopsing = true; + + hard_local_irq_restore(flags); + + if (this_stage != &inband_stage) + pr_err("IRQ pipeline: some code running in oob context '%s'\n" + " called an in-band only routine\n", + this_stage->name); + else + pr_err("IRQ pipeline: oob stage found stalled while modifying in-band\n" + " interrupt state and/or running sleeping code\n"); + + dump_stack(); +} +EXPORT_SYMBOL(check_inband_stage); + +void check_spinlock_context(void) +{ + WARN_ON_ONCE(in_pipeline() || running_oob()); + +} +EXPORT_SYMBOL(check_spinlock_context); + +#endif /* CONFIG_DEBUG_IRQ_PIPELINE */ + +static inline void fixup_percpu_data(void) +{ +#ifdef CONFIG_SMP + struct irq_pipeline_data *p; + int cpu; + + /* + * A temporary event log is used by the inband stage during the + * early boot up (bootup_irq_map), until the per-cpu areas + * have been set up. + * + * Obviously, this code must run over the boot CPU, before SMP + * operations start, with hard IRQs off so that nothing can + * change under our feet. + */ + WARN_ON(!hard_irqs_disabled()); + + memcpy(&per_cpu(irq_map_array, 0)[0], &bootup_irq_map, + sizeof(struct irq_event_map)); + + for_each_possible_cpu(cpu) { + p = &per_cpu(irq_pipeline, cpu); + p->stages[0].stage = &inband_stage; + p->stages[0].log.map = &per_cpu(irq_map_array, cpu)[0]; + p->stages[1].log.map = &per_cpu(irq_map_array, cpu)[1]; +#ifdef CONFIG_DEBUG_IRQ_PIPELINE + p->stages[0].cpu = cpu; + p->stages[1].cpu = cpu; +#endif + } +#endif +} + +void __init irq_pipeline_init_early(void) +{ + /* + * This is called early from start_kernel(), even before the + * actual number of IRQs is known. We are running on the boot + * CPU, hw interrupts are off, and secondary CPUs are still + * lost in space. Careful. + */ + fixup_percpu_data(); +} + +/** + * irq_pipeline_init - Main pipeline core inits + * + * This is step #2 of the 3-step pipeline initialization, which + * should happen right after init_IRQ() has run. The internal + * service interrupts are created along with the synthetic IRQ + * domain, and the arch-specific init chores are performed too. + * + * Interrupt pipelining should be fully functional when this + * routine returns. + */ +void __init irq_pipeline_init(void) +{ + WARN_ON(!hard_irqs_disabled()); + + synthetic_irq_domain = irq_domain_add_nomap(NULL, ~0, + &sirq_domain_ops, + NULL); + inband_work_sirq = irq_create_direct_mapping(synthetic_irq_domain); + setup_percpu_irq(inband_work_sirq, &inband_work); + + /* + * We are running on the boot CPU, hw interrupts are off, and + * secondary CPUs are still lost in space. Now we may run + * arch-specific code for enabling the pipeline. + */ + arch_irq_pipeline_init(); + + irq_pipeline_active = true; + + pr_info("IRQ pipeline enabled\n"); +} + +#ifndef CONFIG_SPARSE_IRQ +EXPORT_SYMBOL_GPL(irq_desc); +#endif diff --git a/kernel/kernel/irq/proc.c b/kernel/kernel/irq/proc.c index 0459b69..cad725f 100644 --- a/kernel/kernel/irq/proc.c +++ b/kernel/kernel/irq/proc.c @@ -518,6 +518,9 @@ #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); #endif +#ifdef CONFIG_IRQ_PIPELINE + seq_printf(p, " %-3s", irq_settings_is_oob(desc) ? "oob" : ""); +#endif if (desc->name) seq_printf(p, "-%-8s", desc->name); diff --git a/kernel/kernel/irq/resend.c b/kernel/kernel/irq/resend.c index 8ccd32a..01b9f23 100644 --- a/kernel/kernel/irq/resend.c +++ b/kernel/kernel/irq/resend.c @@ -16,10 +16,11 @@ #include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> +#include <linux/irq_pipeline.h> #include "internals.h" -#ifdef CONFIG_HARDIRQS_SW_RESEND +#if defined(CONFIG_HARDIRQS_SW_RESEND) && !defined(CONFIG_IRQ_PIPELINE) /* Bitmap to handle software resend of interrupts: */ static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); @@ -82,7 +83,12 @@ #else static int irq_sw_resend(struct irq_desc *desc) { +#if defined(CONFIG_HARDIRQS_SW_RESEND) && defined(CONFIG_IRQ_PIPELINE) + irq_inject_pipeline(irq_desc_get_irq(desc)); + return 0; +#else return -EINVAL; +#endif } #endif diff --git a/kernel/kernel/irq/settings.h b/kernel/kernel/irq/settings.h index 0033d45..aa97556 100644 --- a/kernel/kernel/irq/settings.h +++ b/kernel/kernel/irq/settings.h @@ -19,6 +19,8 @@ _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, _IRQ_HIDDEN = IRQ_HIDDEN, _IRQ_RAW = IRQ_RAW, + _IRQ_OOB = IRQ_OOB, + _IRQ_CHAINED = IRQ_CHAINED, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -35,6 +37,8 @@ #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON #define IRQ_HIDDEN GOT_YOU_MORON #define IRQ_RAW GOT_YOU_MORON +#define IRQ_OOB GOT_YOU_MORON +#define IRQ_CHAINED GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -195,3 +199,33 @@ WARN_ON_ONCE(1); return false; } + +static inline bool irq_settings_is_oob(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_OOB; +} + +static inline void irq_settings_clr_oob(struct irq_desc *desc) +{ + desc->status_use_accessors &= ~_IRQ_OOB; +} + +static inline void irq_settings_set_oob(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_OOB; +} + +static inline bool irq_settings_is_chained(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_CHAINED; +} + +static inline void irq_settings_set_chained(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_CHAINED; +} + +static inline void irq_settings_clr_chained(struct irq_desc *desc) +{ + desc->status_use_accessors &= ~_IRQ_CHAINED; +} diff --git a/kernel/kernel/irq_work.c b/kernel/kernel/irq_work.c index e0ed16d..19417ce 100644 --- a/kernel/kernel/irq_work.c +++ b/kernel/kernel/irq_work.c @@ -49,6 +49,11 @@ */ } +void __weak irq_local_work_raise(void) +{ + arch_irq_work_raise(); +} + /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { @@ -56,10 +61,10 @@ if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && tick_nohz_tick_stopped()) - arch_irq_work_raise(); + irq_local_work_raise(); } else { if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) - arch_irq_work_raise(); + irq_local_work_raise(); } } diff --git a/kernel/kernel/kthread.c b/kernel/kernel/kthread.c index 9d736f5..896383b 100644 --- a/kernel/kernel/kthread.c +++ b/kernel/kernel/kthread.c @@ -14,6 +14,7 @@ #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/kthread.h> +#include <linux/irq_pipeline.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/cgroup.h> @@ -1331,6 +1332,7 @@ { struct mm_struct *active_mm; struct task_struct *tsk = current; + unsigned long flags; WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); WARN_ON_ONCE(tsk->mm); @@ -1339,12 +1341,14 @@ /* Hold off tlb flush IPIs while switching mm's */ local_irq_disable(); active_mm = tsk->active_mm; + protect_inband_mm(flags); if (active_mm != mm) { mmgrab(mm); tsk->active_mm = mm; } tsk->mm = mm; switch_mm_irqs_off(active_mm, mm, tsk); + unprotect_inband_mm(flags); local_irq_enable(); task_unlock(tsk); #ifdef finish_arch_post_lock_switch diff --git a/kernel/kernel/locking/Makefile b/kernel/kernel/locking/Makefile index 6d11cfb..c491040 100644 --- a/kernel/kernel/locking/Makefile +++ b/kernel/kernel/locking/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o +obj-$(CONFIG_IRQ_PIPELINE) += pipeline.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o diff --git a/kernel/kernel/locking/lockdep.c b/kernel/kernel/locking/lockdep.c index 6cbd2b4..48f5a6b 100644 --- a/kernel/kernel/locking/lockdep.c +++ b/kernel/kernel/locking/lockdep.c @@ -42,6 +42,7 @@ #include <linux/stacktrace.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> +#include <linux/irqstage.h> #include <linux/utsname.h> #include <linux/hash.h> #include <linux/ftrace.h> @@ -104,9 +105,56 @@ static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static struct task_struct *__owner; +static __always_inline bool lockdep_stage_disabled(void) +{ + return stage_disabled(); +} + +#ifdef CONFIG_IRQ_PIPELINE +/* + * If LOCKDEP is enabled, we want irqs to be disabled for both stages + * when traversing the lockdep code for hard and mutable locks (at the + * expense of massive latency overhead though). + */ +static __always_inline unsigned long lockdep_stage_test_and_disable(int *irqsoff) +{ + return test_and_lock_stage(irqsoff); +} + +static __always_inline unsigned long lockdep_stage_disable(void) +{ + return lockdep_stage_test_and_disable(NULL); +} + +static __always_inline void lockdep_stage_restore(unsigned long flags) +{ + unlock_stage(flags); +} + +#else + +#define lockdep_stage_test_and_disable(__irqsoff) \ + ({ \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + *(__irqsoff) = irqs_disabled_flags(__flags); \ + __flags; \ + }) + +#define lockdep_stage_disable() \ + ({ \ + unsigned long __flags; \ + raw_local_irq_save(__flags); \ + __flags; \ + }) + +#define lockdep_stage_restore(__flags) raw_local_irq_restore(__flags) + +#endif /* !CONFIG_IRQ_PIPELINE */ + static inline void lockdep_lock(void) { - DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled()); __this_cpu_inc(lockdep_recursion); arch_spin_lock(&__lock); @@ -115,7 +163,7 @@ static inline void lockdep_unlock(void) { - DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled()); if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) return; @@ -882,7 +930,7 @@ /* * We do an RCU walk of the hash, see lockdep_free_key_range(). */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) return NULL; hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) { @@ -1179,7 +1227,7 @@ return; hash_head = keyhashentry(key); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); if (!graph_lock()) goto restore_irqs; hlist_for_each_entry_rcu(k, hash_head, hash_entry) { @@ -1190,7 +1238,7 @@ out_unlock: graph_unlock(); restore_irqs: - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_register_key); @@ -1239,7 +1287,7 @@ struct lock_class *class; int idx; - DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()); class = look_up_lock_class(lock, subclass); if (likely(class)) @@ -2035,11 +2083,11 @@ __bfs_init_root(&this, class); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); ret = __lockdep_count_forward_deps(&this); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return ret; } @@ -2061,11 +2109,11 @@ __bfs_init_root(&this, class); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); ret = __lockdep_count_backward_deps(&this); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return ret; } @@ -4170,6 +4218,8 @@ */ void lockdep_hardirqs_on_prepare(unsigned long ip) { + unsigned long flags; + if (unlikely(!debug_locks)) return; @@ -4192,38 +4242,43 @@ return; } + flags = hard_cond_local_irq_save(); + /* * We're enabling irqs and according to our state above irqs weren't * already enabled, yet we find the hardware thinks they are in fact * enabled.. someone messed up their IRQ state tracing. */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return; + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) + goto out; /* * See the fine text that goes along with this variable definition. */ if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) - return; + goto out; /* * Can't allow enabling interrupts while in an interrupt handler, * that's general bad form and such. Recursion, limited stack etc.. */ - if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) - return; + if (DEBUG_LOCKS_WARN_ON(running_inband() && lockdep_hardirq_context())) + goto out; current->hardirq_chain_key = current->curr_chain_key; lockdep_recursion_inc(); __trace_hardirqs_on_caller(); lockdep_recursion_finish(); +out: + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare); void noinstr lockdep_hardirqs_on(unsigned long ip) { struct irqtrace_events *trace = ¤t->irqtrace; + unsigned long flags; if (unlikely(!debug_locks)) return; @@ -4261,13 +4316,15 @@ return; } + flags = hard_cond_local_irq_save(); + /* * We're enabling irqs and according to our state above irqs weren't * already enabled, yet we find the hardware thinks they are in fact * enabled.. someone messed up their IRQ state tracing. */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return; + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) + goto out; /* * Ensure the lock stack remained unchanged between @@ -4282,6 +4339,8 @@ trace->hardirq_enable_ip = ip; trace->hardirq_enable_event = ++trace->irq_events; debug_atomic_inc(hardirqs_on_events); +out: + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_on); @@ -4290,6 +4349,8 @@ */ void noinstr lockdep_hardirqs_off(unsigned long ip) { + unsigned long flags; + if (unlikely(!debug_locks)) return; @@ -4304,12 +4365,14 @@ } else if (__this_cpu_read(lockdep_recursion)) return; + flags = hard_cond_local_irq_save(); + /* * So we're supposed to get called after you mask local IRQs, but for * some reason the hardware doesn't quite think you did a proper job. */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return; + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) + goto out; if (lockdep_hardirqs_enabled()) { struct irqtrace_events *trace = ¤t->irqtrace; @@ -4324,6 +4387,8 @@ } else { debug_atomic_inc(redundant_hardirqs_off); } +out: + hard_cond_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lockdep_hardirqs_off); @@ -4333,20 +4398,23 @@ void lockdep_softirqs_on(unsigned long ip) { struct irqtrace_events *trace = ¤t->irqtrace; + unsigned long flags; if (unlikely(!lockdep_enabled())) return; + + flags = hard_cond_local_irq_save(); /* * We fancy IRQs being disabled here, see softirq.c, avoids * funny state and nesting things. */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return; + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) + goto out; if (current->softirqs_enabled) { debug_atomic_inc(redundant_softirqs_on); - return; + goto out; } lockdep_recursion_inc(); @@ -4365,6 +4433,8 @@ if (lockdep_hardirqs_enabled()) mark_held_locks(current, LOCK_ENABLED_SOFTIRQ); lockdep_recursion_finish(); +out: + hard_cond_local_irq_restore(flags); } /* @@ -4372,14 +4442,18 @@ */ void lockdep_softirqs_off(unsigned long ip) { + unsigned long flags; + if (unlikely(!lockdep_enabled())) return; + + flags = hard_cond_local_irq_save(); /* * We fancy IRQs being disabled here, see softirq.c */ - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return; + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) + goto out; if (current->softirqs_enabled) { struct irqtrace_events *trace = ¤t->irqtrace; @@ -4397,6 +4471,8 @@ DEBUG_LOCKS_WARN_ON(!softirq_count()); } else debug_atomic_inc(redundant_softirqs_off); +out: + hard_cond_local_irq_restore(flags); } static int @@ -4751,11 +4827,11 @@ if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_recursion_inc(); register_lock_class(lock, subclass, 1); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } } EXPORT_SYMBOL_GPL(lockdep_init_map_type); @@ -5085,7 +5161,7 @@ struct held_lock *hlock; int first_idx = idx; - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled())) return 0; for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { @@ -5397,7 +5473,13 @@ static noinstr void check_flags(unsigned long flags) { #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) - if (!debug_locks) + /* + * irq_pipeline: we can't and don't want to check the + * consistency of the irq tracer when running over the + * pipeline entry or oob stage contexts, since the inband + * stall bit does not reflect the current irq state there. + */ + if (on_pipeline_entry() || running_oob() || !debug_locks) return; /* Get the warning out.. */ @@ -5444,13 +5526,13 @@ if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_recursion_inc(); check_flags(flags); if (__lock_set_class(lock, name, key, subclass, ip)) check_chain_key(current); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_set_class); @@ -5461,13 +5543,13 @@ if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_recursion_inc(); check_flags(flags); if (__lock_downgrade(lock, ip)) check_chain_key(current); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_downgrade); @@ -5532,6 +5614,7 @@ struct lockdep_map *nest_lock, unsigned long ip) { unsigned long flags; + int irqsoff; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); @@ -5558,14 +5641,14 @@ return; } - raw_local_irq_save(flags); + flags = lockdep_stage_test_and_disable(&irqsoff); check_flags(flags); lockdep_recursion_inc(); __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), nest_lock, ip, 0, 0); + irqsoff, nest_lock, ip, 0, 0); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_acquire); @@ -5578,14 +5661,14 @@ if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); if (__lock_release(lock, ip)) check_chain_key(current); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_release); @@ -5597,13 +5680,13 @@ if (unlikely(!lockdep_enabled())) return 1; /* avoid false negative lockdep_assert_held() */ - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); ret = __lock_is_held(lock, read); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return ret; } @@ -5618,13 +5701,13 @@ if (unlikely(!lockdep_enabled())) return cookie; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); cookie = __lock_pin_lock(lock); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); return cookie; } @@ -5637,13 +5720,13 @@ if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); __lock_repin_lock(lock, cookie); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_repin_lock); @@ -5654,13 +5737,13 @@ if (unlikely(!lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); __lock_unpin_lock(lock, cookie); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_unpin_lock); @@ -5790,12 +5873,12 @@ if (unlikely(!lock_stat || !lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); __lock_contended(lock, ip); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_contended); @@ -5808,12 +5891,12 @@ if (unlikely(!lock_stat || !lockdep_enabled())) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); check_flags(flags); lockdep_recursion_inc(); __lock_acquired(lock, ip); lockdep_recursion_finish(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(lock_acquired); #endif @@ -5828,7 +5911,7 @@ unsigned long flags; int i; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_init_task(current); memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); nr_hardirq_chains = 0; @@ -5837,7 +5920,7 @@ debug_locks = 1; for (i = 0; i < CHAINHASH_SIZE; i++) INIT_HLIST_HEAD(chainhash_table + i); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } /* Remove a class from a lock chain. Must be called with the graph lock held. */ @@ -6014,7 +6097,7 @@ if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); /* closed head */ @@ -6028,7 +6111,7 @@ call_rcu_zapped(delayed_free.pf + delayed_free.index); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } /* @@ -6071,13 +6154,13 @@ init_data_structures_once(); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); pf = get_pending_free(); __lockdep_free_key_range(pf, start, size); call_rcu_zapped(pf); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); /* * Wait for any possible iterators from look_up_lock_class() to pass @@ -6097,12 +6180,12 @@ init_data_structures_once(); - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); __lockdep_free_key_range(pf, start, size); __free_zapped_classes(pf); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } void lockdep_free_key_range(void *start, unsigned long size) @@ -6173,7 +6256,7 @@ unsigned long flags; int locked; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); locked = graph_lock(); if (!locked) goto out_irq; @@ -6184,7 +6267,7 @@ graph_unlock(); out_irq: - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } /* @@ -6196,12 +6279,12 @@ struct pending_free *pf = delayed_free.pf; unsigned long flags; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); __lockdep_reset_lock(pf, lock); __free_zapped_classes(pf); lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } void lockdep_reset_lock(struct lockdep_map *lock) @@ -6234,7 +6317,7 @@ if (WARN_ON_ONCE(static_obj(key))) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); lockdep_lock(); hlist_for_each_entry_rcu(k, hash_head, hash_entry) { @@ -6251,7 +6334,7 @@ call_rcu_zapped(pf); } lockdep_unlock(); - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ synchronize_rcu(); @@ -6342,7 +6425,7 @@ if (unlikely(!debug_locks)) return; - raw_local_irq_save(flags); + flags = lockdep_stage_disable(); for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; @@ -6353,7 +6436,7 @@ print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } - raw_local_irq_restore(flags); + lockdep_stage_restore(flags); } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); diff --git a/kernel/kernel/locking/lockdep_internals.h b/kernel/kernel/locking/lockdep_internals.h index bbe9000..6f78acc 100644 --- a/kernel/kernel/locking/lockdep_internals.h +++ b/kernel/kernel/locking/lockdep_internals.h @@ -213,12 +213,12 @@ this_cpu_inc(lockdep_stats.ptr); #define debug_atomic_inc(ptr) { \ - WARN_ON_ONCE(!irqs_disabled()); \ + WARN_ON_ONCE(!hard_irqs_disabled() && !irqs_disabled());\ __this_cpu_inc(lockdep_stats.ptr); \ } #define debug_atomic_dec(ptr) { \ - WARN_ON_ONCE(!irqs_disabled()); \ + WARN_ON_ONCE(!hard_irqs_disabled() && !irqs_disabled());\ __this_cpu_dec(lockdep_stats.ptr); \ } diff --git a/kernel/kernel/locking/pipeline.c b/kernel/kernel/locking/pipeline.c new file mode 100644 index 0000000..fde458e --- /dev/null +++ b/kernel/kernel/locking/pipeline.c @@ -0,0 +1,231 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/linkage.h> +#include <linux/preempt.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/irq_pipeline.h> +#include <linux/kconfig.h> + +/* + * A hybrid spinlock behaves in different ways depending on the + * current interrupt stage on entry. + * + * Such spinlock always leaves hard IRQs disabled once locked. In + * addition, it stalls the in-band stage when protecting a critical + * section there, disabling preemption like regular spinlocks do as + * well. This combination preserves the regular locking logic when + * called from the in-band stage, while fully disabling preemption by + * other interrupt stages. + * + * When taken from the pipeline entry context, a hybrid lock behaves + * like a hard spinlock, assuming that hard IRQs are already disabled. + * + * The irq descriptor lock (struct irq_desc) is a typical example of + * such lock, which properly serializes accesses regardless of the + * calling context. + */ +void __hybrid_spin_lock(struct raw_spinlock *rlock) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + if (running_inband()) + preempt_disable(); + + __flags = hard_local_irq_save(); + hard_lock_acquire(rlock, 0, _RET_IP_); + lock = container_of(rlock, struct hybrid_spinlock, rlock); + lock->hwflags = __flags; +} +EXPORT_SYMBOL(__hybrid_spin_lock); + +void __hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + if (running_inband()) + preempt_disable(); + + __flags = hard_local_irq_save(); + hard_lock_acquire_nested(rlock, subclass, _RET_IP_); + lock = container_of(rlock, struct hybrid_spinlock, rlock); + lock->hwflags = __flags; +} +EXPORT_SYMBOL(__hybrid_spin_lock_nested); + +void __hybrid_spin_unlock(struct raw_spinlock *rlock) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + /* Pick the flags before releasing the lock. */ + lock = container_of(rlock, struct hybrid_spinlock, rlock); + __flags = lock->hwflags; + hard_lock_release(rlock, _RET_IP_); + hard_local_irq_restore(__flags); + + if (running_inband()) + preempt_enable(); +} +EXPORT_SYMBOL(__hybrid_spin_unlock); + +void __hybrid_spin_lock_irq(struct raw_spinlock *rlock) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + __flags = hard_local_irq_save(); + + if (running_inband()) { + stall_inband(); + trace_hardirqs_off(); + preempt_disable(); + } + + hard_lock_acquire(rlock, 0, _RET_IP_); + lock = container_of(rlock, struct hybrid_spinlock, rlock); + lock->hwflags = __flags; +} +EXPORT_SYMBOL(__hybrid_spin_lock_irq); + +void __hybrid_spin_unlock_irq(struct raw_spinlock *rlock) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + /* Pick the flags before releasing the lock. */ + lock = container_of(rlock, struct hybrid_spinlock, rlock); + __flags = lock->hwflags; + hard_lock_release(rlock, _RET_IP_); + + if (running_inband()) { + trace_hardirqs_on(); + unstall_inband_nocheck(); + hard_local_irq_restore(__flags); + preempt_enable(); + return; + } + + hard_local_irq_restore(__flags); +} +EXPORT_SYMBOL(__hybrid_spin_unlock_irq); + +unsigned long __hybrid_spin_lock_irqsave(struct raw_spinlock *rlock) +{ + struct hybrid_spinlock *lock; + unsigned long __flags, flags; + + __flags = flags = hard_local_irq_save(); + + if (running_inband()) { + flags = test_and_stall_inband(); + trace_hardirqs_off(); + preempt_disable(); + } + + hard_lock_acquire(rlock, 0, _RET_IP_); + lock = container_of(rlock, struct hybrid_spinlock, rlock); + lock->hwflags = __flags; + + return flags; +} +EXPORT_SYMBOL(__hybrid_spin_lock_irqsave); + +void __hybrid_spin_unlock_irqrestore(struct raw_spinlock *rlock, + unsigned long flags) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + /* Pick the flags before releasing the lock. */ + lock = container_of(rlock, struct hybrid_spinlock, rlock); + __flags = lock->hwflags; + hard_lock_release(rlock, _RET_IP_); + + if (running_inband()) { + if (!flags) { + trace_hardirqs_on(); + unstall_inband_nocheck(); + } + hard_local_irq_restore(__flags); + preempt_enable(); + return; + } + + hard_local_irq_restore(__flags); +} +EXPORT_SYMBOL(__hybrid_spin_unlock_irqrestore); + +int __hybrid_spin_trylock(struct raw_spinlock *rlock) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + + if (running_inband()) + preempt_disable(); + + lock = container_of(rlock, struct hybrid_spinlock, rlock); + __flags = hard_local_irq_save(); + + hard_spin_trylock_prepare(rlock); + if (do_raw_spin_trylock(rlock)) { + lock->hwflags = __flags; + hard_trylock_acquire(rlock, 1, _RET_IP_); + return 1; + } + + hard_spin_trylock_fail(rlock); + hard_local_irq_restore(__flags); + + if (running_inband()) + preempt_enable(); + + return 0; +} +EXPORT_SYMBOL(__hybrid_spin_trylock); + +int __hybrid_spin_trylock_irqsave(struct raw_spinlock *rlock, + unsigned long *flags) +{ + struct hybrid_spinlock *lock; + unsigned long __flags; + bool inband; + + inband = running_inband(); + + __flags = *flags = hard_local_irq_save(); + + lock = container_of(rlock, struct hybrid_spinlock, rlock); + if (inband) { + *flags = test_and_stall_inband(); + trace_hardirqs_off(); + preempt_disable(); + } + + hard_spin_trylock_prepare(rlock); + if (do_raw_spin_trylock(rlock)) { + hard_trylock_acquire(rlock, 1, _RET_IP_); + lock->hwflags = __flags; + return 1; + } + + hard_spin_trylock_fail(rlock); + + if (inband && !*flags) { + trace_hardirqs_on(); + unstall_inband_nocheck(); + } + + hard_local_irq_restore(__flags); + + if (inband) + preempt_enable(); + + return 0; +} +EXPORT_SYMBOL(__hybrid_spin_trylock_irqsave); diff --git a/kernel/kernel/locking/spinlock_debug.c b/kernel/kernel/locking/spinlock_debug.c index b9d9308..0bcc39f 100644 --- a/kernel/kernel/locking/spinlock_debug.c +++ b/kernel/kernel/locking/spinlock_debug.c @@ -114,6 +114,7 @@ mmiowb_spin_lock(); debug_spin_lock_after(lock); } +EXPORT_SYMBOL_GPL(do_raw_spin_lock); int do_raw_spin_trylock(raw_spinlock_t *lock) { @@ -131,6 +132,7 @@ #endif return ret; } +EXPORT_SYMBOL_GPL(do_raw_spin_trylock); void do_raw_spin_unlock(raw_spinlock_t *lock) { @@ -138,6 +140,7 @@ debug_spin_unlock(lock); arch_spin_unlock(&lock->raw_lock); } +EXPORT_SYMBOL_GPL(do_raw_spin_unlock); static void rwlock_bug(rwlock_t *lock, const char *msg) { diff --git a/kernel/kernel/notifier.c b/kernel/kernel/notifier.c index 1b019cb..b116e14 100644 --- a/kernel/kernel/notifier.c +++ b/kernel/kernel/notifier.c @@ -213,6 +213,9 @@ { int ret; + if (!running_inband()) + return notifier_call_chain(&nh->head, val, v, -1, NULL); + rcu_read_lock(); ret = notifier_call_chain(&nh->head, val, v, -1, NULL); rcu_read_unlock(); diff --git a/kernel/kernel/panic.c b/kernel/kernel/panic.c index 332736a..f4a05dd 100644 --- a/kernel/kernel/panic.c +++ b/kernel/kernel/panic.c @@ -27,6 +27,7 @@ #include <linux/sysrq.h> #include <linux/init.h> #include <linux/nmi.h> +#include <linux/irq_pipeline.h> #include <linux/console.h> #include <linux/bug.h> #include <linux/ratelimit.h> @@ -49,7 +50,7 @@ IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; static int pause_on_oops; static int pause_on_oops_flag; -static DEFINE_SPINLOCK(pause_on_oops_lock); +static DEFINE_HARD_SPINLOCK(pause_on_oops_lock); bool crash_kexec_post_notifiers; int panic_on_warn __read_mostly; unsigned long panic_on_taint; @@ -189,8 +190,9 @@ * there is nothing to prevent an interrupt handler (that runs * after setting panic_cpu) from invoking panic() again. */ - local_irq_disable(); + hard_local_irq_disable(); preempt_disable_notrace(); + irq_pipeline_oops(); /* * It's possible to come here directly from a panic-assertion and @@ -267,9 +269,12 @@ /* * Run any panic handlers, including those that might need to - * add information to the kmsg dump output. + * add information to the kmsg dump output. Skip panic + * handlers if running over the oob stage, as they would most + * likely break. */ - atomic_notifier_call_chain(&panic_notifier_list, 0, buf); + if (running_inband()) + atomic_notifier_call_chain(&panic_notifier_list, 0, buf); /* Call flush even twice. It tries harder with a single online CPU */ printk_safe_flush_on_panic(); @@ -474,7 +479,7 @@ if (!pause_on_oops) return; - spin_lock_irqsave(&pause_on_oops_lock, flags); + raw_spin_lock_irqsave(&pause_on_oops_lock, flags); if (pause_on_oops_flag == 0) { /* This CPU may now print the oops message */ pause_on_oops_flag = 1; @@ -484,21 +489,21 @@ /* This CPU gets to do the counting */ spin_counter = pause_on_oops; do { - spin_unlock(&pause_on_oops_lock); + raw_spin_unlock(&pause_on_oops_lock); spin_msec(MSEC_PER_SEC); - spin_lock(&pause_on_oops_lock); + raw_spin_lock(&pause_on_oops_lock); } while (--spin_counter); pause_on_oops_flag = 0; } else { /* This CPU waits for a different one */ while (spin_counter) { - spin_unlock(&pause_on_oops_lock); + raw_spin_unlock(&pause_on_oops_lock); spin_msec(1); - spin_lock(&pause_on_oops_lock); + raw_spin_lock(&pause_on_oops_lock); } } } - spin_unlock_irqrestore(&pause_on_oops_lock, flags); + raw_spin_unlock_irqrestore(&pause_on_oops_lock, flags); } /* @@ -528,6 +533,7 @@ { tracing_off(); /* can't trust the integrity of the kernel anymore: */ + irq_pipeline_oops(); debug_locks_off(); do_oops_enter_exit(); diff --git a/kernel/kernel/power/Makefile b/kernel/kernel/power/Makefile index 9770575..bab1221 100644 --- a/kernel/kernel/power/Makefile +++ b/kernel/kernel/power/Makefile @@ -17,5 +17,7 @@ obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o +ifndef CONFIG_DOVETAIL obj-$(CONFIG_SUSPEND) += wakeup_reason.o +endif obj-$(CONFIG_ENERGY_MODEL) += energy_model.o diff --git a/kernel/kernel/power/hibernate.c b/kernel/kernel/power/hibernate.c index b6875eb..bc68e0f 100644 --- a/kernel/kernel/power/hibernate.c +++ b/kernel/kernel/power/hibernate.c @@ -302,6 +302,7 @@ goto Enable_cpus; local_irq_disable(); + hard_cond_local_irq_disable(); system_state = SYSTEM_SUSPEND; @@ -469,6 +470,7 @@ local_irq_disable(); system_state = SYSTEM_SUSPEND; + hard_cond_local_irq_disable(); error = syscore_suspend(); if (error) @@ -590,6 +592,7 @@ local_irq_disable(); system_state = SYSTEM_SUSPEND; + hard_cond_local_irq_disable(); syscore_suspend(); if (pm_wakeup_pending()) { error = -EAGAIN; diff --git a/kernel/kernel/printk/printk.c b/kernel/kernel/printk/printk.c index e253475..db67ef3 100644 --- a/kernel/kernel/printk/printk.c +++ b/kernel/kernel/printk/printk.c @@ -47,6 +47,7 @@ #include <linux/sched/clock.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> +#include <linux/irqstage.h> #include <linux/uaccess.h> #include <asm/sections.h> @@ -2188,6 +2189,73 @@ } #endif +#ifdef CONFIG_RAW_PRINTK +static struct console *raw_console; +static DEFINE_HARD_SPINLOCK(raw_console_lock); + +void raw_puts(const char *s, size_t len) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&raw_console_lock, flags); + if (raw_console) + raw_console->write_raw(raw_console, s, len); + raw_spin_unlock_irqrestore(&raw_console_lock, flags); +} +EXPORT_SYMBOL(raw_puts); + +void raw_vprintk(const char *fmt, va_list ap) +{ + char buf[256]; + size_t n; + + if (raw_console == NULL || console_suspended) + return; + + touch_nmi_watchdog(); + n = vscnprintf(buf, sizeof(buf), fmt, ap); + raw_puts(buf, n); +} +EXPORT_SYMBOL(raw_vprintk); + +asmlinkage __visible void raw_printk(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + raw_vprintk(fmt, ap); + va_end(ap); +} +EXPORT_SYMBOL(raw_printk); + +static inline void register_raw_console(struct console *newcon) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&raw_console_lock, flags); + if (newcon->write_raw) + raw_console = newcon; + raw_spin_unlock_irqrestore(&raw_console_lock, flags); +} + +static inline void unregister_raw_console(struct console *oldcon) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&raw_console_lock, flags); + if (oldcon == raw_console) + raw_console = NULL; + raw_spin_unlock_irqrestore(&raw_console_lock, flags); +} + +#else + +static inline void register_raw_console(struct console *newcon) { } + +static inline void unregister_raw_console(struct console *oldcon) { } + +#endif + static int __add_preferred_console(char *name, int idx, char *options, char *brl_options, bool user_specified) { @@ -2854,6 +2922,9 @@ if (err || newcon->flags & CON_BRL) return; + /* The latest raw console to register is current. */ + register_raw_console(newcon); + /* * If we have a bootconsole, and are switching to a real console, * don't print everything out again, since when the boot console, and @@ -2938,6 +3009,8 @@ (console->flags & CON_BOOT) ? "boot" : "" , console->name, console->index); + unregister_raw_console(console); + res = _braille_unregister_console(console); if (res < 0) return res; diff --git a/kernel/kernel/printk/printk_safe.c b/kernel/kernel/printk/printk_safe.c index 2e9e3ed..9c065e4 100644 --- a/kernel/kernel/printk/printk_safe.c +++ b/kernel/kernel/printk/printk_safe.c @@ -9,6 +9,7 @@ #include <linux/kdb.h> #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/irq_pipeline.h> #include <linux/irq_work.h> #include <linux/printk.h> #include <linux/kprobes.h> @@ -374,6 +375,8 @@ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); #endif + if (inband_unsafe()) + return vprintk_nmi(fmt, args); /* * Try to use the main logbuf even in NMI. But avoid calling console diff --git a/kernel/kernel/ptrace.c b/kernel/kernel/ptrace.c index aab480e..18f26ae 100644 --- a/kernel/kernel/ptrace.c +++ b/kernel/kernel/ptrace.c @@ -854,10 +854,12 @@ if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); + inband_ptstep_notify(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); + inband_ptstep_notify(child); } else { user_disable_single_step(child); } diff --git a/kernel/kernel/rcu/tree.c b/kernel/kernel/rcu/tree.c index b10d6bc..7e71bc8 100644 --- a/kernel/kernel/rcu/tree.c +++ b/kernel/kernel/rcu/tree.c @@ -232,6 +232,11 @@ return 0; } +static inline bool rcu_in_nonmaskable(void) +{ + return on_pipeline_entry() || in_nmi(); +} + void rcu_softirq_qs(void) { rcu_qs(); @@ -710,6 +715,7 @@ struct rcu_data *rdp = this_cpu_ptr(&rcu_data); instrumentation_begin(); + /* * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. * (We are exiting an NMI handler, so RCU better be paying attention @@ -735,7 +741,7 @@ trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ - if (!in_nmi()) + if (!rcu_in_nonmaskable()) rcu_prepare_for_idle(); // instrumentation for the noinstr rcu_dynticks_eqs_enter() @@ -746,7 +752,7 @@ rcu_dynticks_eqs_enter(); // ... but is no longer watching here. - if (!in_nmi()) + if (!rcu_in_nonmaskable()) rcu_dynticks_task_enter(); } @@ -935,7 +941,7 @@ struct rcu_data *rdp = this_cpu_ptr(&rcu_data); // If we're here from NMI there's nothing to do. - if (in_nmi()) + if (rcu_in_nonmaskable()) return; RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), @@ -996,14 +1002,14 @@ */ if (rcu_dynticks_curr_cpu_in_eqs()) { - if (!in_nmi()) + if (!rcu_in_nonmaskable()) rcu_dynticks_task_exit(); // RCU is not watching here ... rcu_dynticks_eqs_exit(); // ... but is watching here. - if (!in_nmi()) { + if (!rcu_in_nonmaskable()) { instrumentation_begin(); rcu_cleanup_after_idle(); instrumentation_end(); @@ -1016,7 +1022,7 @@ instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); incby = 1; - } else if (!in_nmi()) { + } else if (!rcu_in_nonmaskable()) { instrumentation_begin(); rcu_irq_enter_check_tick(); } else { @@ -1094,10 +1100,10 @@ /** * rcu_is_watching - see if RCU thinks that the current CPU is not idle * - * Return true if RCU is watching the running CPU, which means that this - * CPU can safely enter RCU read-side critical sections. In other words, - * if the current CPU is not in its idle loop or is in an interrupt or - * NMI handler, return true. + * Return true if RCU is watching the running CPU, which means that + * this CPU can safely enter RCU read-side critical sections. In + * other words, if the current CPU is not in its idle loop or is in an + * interrupt or NMI handler, return true. * * Make notrace because it can be called by the internal functions of * ftrace, and making this notrace removes unnecessary recursion calls. @@ -1106,6 +1112,9 @@ { bool ret; + if (on_pipeline_entry()) + return true; + preempt_disable_notrace(); ret = !rcu_dynticks_curr_cpu_in_eqs(); preempt_enable_notrace(); @@ -1152,7 +1161,7 @@ struct rcu_node *rnp; bool ret = false; - if (in_nmi() || !rcu_scheduler_fully_active) + if (rcu_in_nonmaskable() || !rcu_scheduler_fully_active) return true; preempt_disable_notrace(); rdp = this_cpu_ptr(&rcu_data); diff --git a/kernel/kernel/rcu/tree_plugin.h b/kernel/kernel/rcu/tree_plugin.h index f5ba074..7d9ec7a 100644 --- a/kernel/kernel/rcu/tree_plugin.h +++ b/kernel/kernel/rcu/tree_plugin.h @@ -790,7 +790,7 @@ struct rcu_data *rdp; if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) || - irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) + on_pipeline_entry() || running_oob() || irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) return; rdp = this_cpu_ptr(&rcu_data); rcu_report_qs_rdp(rdp); diff --git a/kernel/kernel/rcu/update.c b/kernel/kernel/rcu/update.c index 849f0aa9..8e4b48b 100644 --- a/kernel/kernel/rcu/update.c +++ b/kernel/kernel/rcu/update.c @@ -99,6 +99,11 @@ */ static bool rcu_read_lock_held_common(bool *ret) { + if (irqs_pipelined() && + (hard_irqs_disabled() || running_oob())) { + *ret = 1; + return true; + } if (!debug_lockdep_rcu_enabled()) { *ret = true; return true; @@ -209,6 +214,32 @@ #endif /* #ifndef CONFIG_TINY_RCU */ +#ifdef CONFIG_IRQ_PIPELINE + +/* + * Prepare for taking the RCU read lock when running out-of-band. Nop + * otherwise. + */ +void rcu_oob_prepare_lock(void) +{ + if (!on_pipeline_entry() && running_oob()) + rcu_nmi_enter(); +} +EXPORT_SYMBOL_GPL(rcu_oob_prepare_lock); + +/* + * Converse to rcu_oob_prepare_lock(), after dropping the RCU read + * lock. + */ +void rcu_oob_finish_lock(void) +{ + if (!on_pipeline_entry() && running_oob()) + rcu_nmi_exit(); +} +EXPORT_SYMBOL_GPL(rcu_oob_finish_lock); + +#endif /* CONFIG_IRQ_PIPELINE */ + /* * Test each non-SRCU synchronous grace-period wait API. This is * useful just after a change in mode for these primitives, and diff --git a/kernel/kernel/sched/core.c b/kernel/kernel/sched/core.c index 7359375..b14a6fb 100644 --- a/kernel/kernel/sched/core.c +++ b/kernel/kernel/sched/core.c @@ -2045,6 +2045,7 @@ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; + inband_migration_notify(p, dest_cpu); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ @@ -3065,7 +3066,7 @@ * - we're serialized against set_special_state() by virtue of * it disabling IRQs (this allows not taking ->pi_lock). */ - if (!(p->state & state)) + if (!(p->state & state) || task_is_off_stage(p)) goto out; success = 1; @@ -3083,7 +3084,7 @@ */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); - if (!(p->state & state)) + if (!(p->state & state) || task_is_off_stage(p)) goto unlock; #ifdef CONFIG_FREEZER @@ -3348,6 +3349,9 @@ init_numa_balancing(clone_flags, p); #ifdef CONFIG_SMP p->wake_entry.u_flags = CSD_TYPE_TTWU; +#endif +#ifdef CONFIG_IRQ_PIPELINE + init_task_stall_bits(p); #endif } @@ -3816,6 +3820,13 @@ rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); prepare_task(next); + prepare_inband_switch(next); + /* + * Do not fold the following hard irqs disabling into + * prepare_inband_switch(), this is required when pipelining + * interrupts, not only by alternate scheduling. + */ + hard_cond_local_irq_disable(); prepare_arch_switch(next); } @@ -3973,8 +3984,19 @@ * finish_task_switch() will drop rq->lock() and lower preempt_count * and the preempt_enable() will end up enabling preemption (on * PREEMPT_COUNT kernels). + * + * If interrupts are pipelined, we may enable hard irqs since + * the in-band stage is stalled. If dovetailing is enabled + * too, schedule_tail() is the place where transitions of + * tasks from the in-band to the oob stage completes. The + * companion core is notified that 'prev' is now suspended in + * the in-band stage, and can be safely resumed in the oob + * stage. */ + WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled()); + hard_cond_local_irq_enable(); + oob_trampoline(); rq = finish_task_switch(prev); balance_callback(rq); preempt_enable(); @@ -4028,6 +4050,20 @@ */ switch_mm_irqs_off(prev->active_mm, next->mm, next); + /* + * If dovetail is enabled, insert a short window of + * opportunity for preemption by out-of-band IRQs + * before finalizing the context switch. + * dovetail_context_switch() can deal with preempting + * partially switched in-band contexts. + */ + if (dovetailing()) { + struct mm_struct *oldmm = prev->active_mm; + prev->active_mm = next->mm; + hard_local_irq_sync(); + prev->active_mm = oldmm; + } + if (!prev->mm) { // from kernel /* will mmdrop() in finish_task_switch(). */ rq->prev_mm = prev->active_mm; @@ -4042,6 +4078,15 @@ /* Here we just switch the register state and the stack. */ switch_to(prev, next, prev); barrier(); + + /* + * If 'next' is on its way to the oob stage, don't run the + * context switch epilogue just yet. We will do that at some + * point later, when the task switches back to the in-band + * stage. + */ + if (unlikely(inband_switch_tail())) + return NULL; return finish_task_switch(prev); } @@ -4557,6 +4602,8 @@ panic("corrupted shadow stack detected inside scheduler\n"); #endif + check_inband_stage(); + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP if (!preempt && prev->state && prev->non_block_count) { printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", @@ -4682,7 +4729,7 @@ * * WARNING: must be called with preemption disabled! */ -static void __sched notrace __schedule(bool preempt) +static int __sched notrace __schedule(bool preempt) { struct task_struct *prev, *next; unsigned long *switch_count; @@ -4802,12 +4849,17 @@ /* Also unlocks the rq: */ rq = context_switch(rq, prev, next, &rf); + if (dovetailing() && rq == NULL) + /* Task moved to the oob stage. */ + return 1; } else { rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); rq_unlock_irq(rq, &rf); } balance_callback(rq); + + return 0; } void __noreturn do_task_dead(void) @@ -4879,7 +4931,8 @@ sched_submit_work(tsk); do { preempt_disable(); - __schedule(false); + if (__schedule(false)) + return; sched_preempt_enable_no_resched(); } while (need_resched()); sched_update_worker(tsk); @@ -4960,7 +5013,8 @@ */ preempt_disable_notrace(); preempt_latency_start(1); - __schedule(true); + if (__schedule(true)) + return; preempt_latency_stop(1); preempt_enable_no_resched_notrace(); @@ -4982,7 +5036,7 @@ * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. */ - if (likely(!preemptible())) + if (likely(!running_inband() || !preemptible())) return; preempt_schedule_common(); @@ -5008,7 +5062,7 @@ { enum ctx_state prev_ctx; - if (likely(!preemptible())) + if (likely(!running_inband() || !preemptible())) return; do { @@ -5049,23 +5103,41 @@ * off of irq context. * Note, that this is called and return with irqs disabled. This will * protect us against recursive calling from irq. + * + * IRQ pipeline: we are called with hard irqs off, synchronize the + * pipeline then return the same way, so that the in-band log is + * guaranteed empty and further interrupt delivery is postponed by the + * hardware until have exited the kernel. */ asmlinkage __visible void __sched preempt_schedule_irq(void) { enum ctx_state prev_state; + + if (irq_pipeline_debug()) { + /* Catch any weirdness in pipelined entry code. */ + if (WARN_ON_ONCE(!running_inband())) + return; + WARN_ON_ONCE(!hard_irqs_disabled()); + } + + hard_cond_local_irq_enable(); /* Catch callers which need to be fixed */ BUG_ON(preempt_count() || !irqs_disabled()); prev_state = exception_enter(); - do { + for (;;) { preempt_disable(); local_irq_enable(); __schedule(true); + sync_inband_irqs(); local_irq_disable(); sched_preempt_enable_no_resched(); - } while (need_resched()); + if (!need_resched()) + break; + hard_cond_local_irq_enable(); + } exception_exit(prev_state); } @@ -8892,6 +8964,233 @@ #endif /* CONFIG_CGROUP_SCHED */ +#ifdef CONFIG_DOVETAIL + +int dovetail_leave_inband(void) +{ + struct task_struct *p = current; + struct irq_pipeline_data *pd; + unsigned long flags; + + preempt_disable(); + + pd = raw_cpu_ptr(&irq_pipeline); + + if (WARN_ON_ONCE(dovetail_debug() && pd->task_inflight)) + goto out; /* Paranoid. */ + + raw_spin_lock_irqsave(&p->pi_lock, flags); + pd->task_inflight = p; + /* + * The scope of the off-stage state is broader than _TLF_OOB, + * in that it includes the transition path from the in-band + * context to the oob stage. + */ + set_thread_local_flags(_TLF_OFFSTAGE); + set_current_state(TASK_INTERRUPTIBLE); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + sched_submit_work(p); + /* + * The current task is scheduled out from the inband stage, + * before resuming on the oob stage. Since this code stands + * for the scheduling tail of the oob scheduler, + * arch_dovetail_switch_finish() is called to perform + * architecture-specific fixups (e.g. fpu context reload). + */ + if (likely(__schedule(false))) { + arch_dovetail_switch_finish(false); + return 0; + } + + clear_thread_local_flags(_TLF_OFFSTAGE); + pd->task_inflight = NULL; +out: + preempt_enable(); + + return -ERESTARTSYS; +} +EXPORT_SYMBOL_GPL(dovetail_leave_inband); + +void dovetail_resume_inband(void) +{ + struct task_struct *p; + struct rq *rq; + + p = __this_cpu_read(irq_pipeline.rqlock_owner); + if (WARN_ON_ONCE(dovetail_debug() && p == NULL)) + return; + + if (WARN_ON_ONCE(dovetail_debug() && (preempt_count() & STAGE_MASK))) + return; + + rq = finish_task_switch(p); + balance_callback(rq); + preempt_enable(); + oob_trampoline(); +} +EXPORT_SYMBOL_GPL(dovetail_resume_inband); + +#ifdef CONFIG_KVM + +#include <linux/kvm_host.h> + +static inline void notify_guest_preempt(void) +{ + struct kvm_oob_notifier *nfy; + struct irq_pipeline_data *p; + + p = raw_cpu_ptr(&irq_pipeline); + nfy = p->vcpu_notify; + if (unlikely(nfy)) + nfy->handler(nfy); +} +#else +static inline void notify_guest_preempt(void) +{ } +#endif + +bool dovetail_context_switch(struct dovetail_altsched_context *out, + struct dovetail_altsched_context *in, + bool leave_inband) +{ + unsigned long pc __maybe_unused, lockdep_irqs; + struct task_struct *next, *prev, *last; + struct mm_struct *prev_mm, *next_mm; + bool inband_tail = false; + + WARN_ON_ONCE(dovetail_debug() && on_pipeline_entry()); + + if (leave_inband) { + struct task_struct *tsk = current; + /* + * We are about to leave the current inband context + * for switching to an out-of-band task, save the + * preempted context information. + */ + out->task = tsk; + out->active_mm = tsk->active_mm; + /* + * Switching out-of-band may require some housekeeping + * from a kernel VM which might currently run guest + * code, notify it about the upcoming preemption. + */ + notify_guest_preempt(); + } + + arch_dovetail_switch_prepare(leave_inband); + + next = in->task; + prev = out->task; + prev_mm = out->active_mm; + next_mm = in->active_mm; + + if (next_mm == NULL) { + in->active_mm = prev_mm; + in->borrowed_mm = true; + enter_lazy_tlb(prev_mm, next); + } else { + switch_oob_mm(prev_mm, next_mm, next); + /* + * We might be switching back to the inband context + * which we preempted earlier, shortly after "current" + * dropped its mm context in the do_exit() path + * (next->mm == NULL). In such a case, a lazy TLB + * state is expected when leaving the mm. + */ + if (next->mm == NULL) + enter_lazy_tlb(prev_mm, next); + } + + if (out->borrowed_mm) { + out->borrowed_mm = false; + out->active_mm = NULL; + } + + /* + * Tasks running out-of-band may alter the (in-band) + * preemption count as long as they don't trigger an in-band + * rescheduling, which Dovetail properly blocks. + * + * If the preemption count is not stack-based but a global + * per-cpu variable instead, changing it has a globally + * visible side-effect though, which is a problem if the + * out-of-band task is preempted and schedules away before the + * change is rolled back: this may cause the in-band context + * to later resume with a broken preemption count. + * + * For this reason, the preemption count of any context which + * blocks from the out-of-band stage is carried over and + * restored across switches, emulating a stack-based + * storage. + * + * Eventually, the count is reset to FORK_PREEMPT_COUNT upon + * transition from out-of-band to in-band stage, reinstating + * the value in effect when the converse transition happened + * at some point before. + */ + if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) + pc = preempt_count(); + + /* + * Like the preemption count and for the same reason, the irq + * state maintained by lockdep must be preserved across + * switches. + */ + lockdep_irqs = lockdep_read_irqs_state(); + + switch_to(prev, next, last); + barrier(); + + if (check_hard_irqs_disabled()) + hard_local_irq_disable(); + + /* + * If we entered this routine for switching to an out-of-band + * task but don't have _TLF_OOB set for the current context + * when resuming, this portion of code is the switch tail of + * the inband schedule() routine, finalizing a transition to + * the inband stage for the current task. Update the stage + * level as/if required. + */ + if (unlikely(!leave_inband && !test_thread_local_flags(_TLF_OOB))) { + if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) + preempt_count_set(FORK_PREEMPT_COUNT); + else if (unlikely(dovetail_debug() && + !(preempt_count() & STAGE_MASK))) + WARN_ON_ONCE(1); + else + preempt_count_sub(STAGE_OFFSET); + + lockdep_write_irqs_state(lockdep_irqs); + + /* + * Fixup the interrupt state conversely to what + * inband_switch_tail() does for the opposite stage + * switching direction. + */ + stall_inband(); + trace_hardirqs_off(); + inband_tail = true; + } else { + if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) + preempt_count_set(pc); + + lockdep_write_irqs_state(lockdep_irqs); + } + + arch_dovetail_switch_finish(leave_inband); + + /* + * inband_tail is true whenever we are finalizing a transition + * to the inband stage from the oob context for current. See + * above. + */ + return inband_tail; +} +EXPORT_SYMBOL_GPL(dovetail_context_switch); + +#endif /* CONFIG_DOVETAIL */ + void dump_cpu_task(int cpu) { pr_info("Task dump for CPU %d:\n", cpu); diff --git a/kernel/kernel/sched/idle.c b/kernel/kernel/sched/idle.c index 6dc7d9a..cc2710e 100644 --- a/kernel/kernel/sched/idle.c +++ b/kernel/kernel/sched/idle.c @@ -80,6 +80,7 @@ void __weak arch_cpu_idle(void) { cpu_idle_force_poll = 1; + hard_local_irq_enable(); raw_local_irq_enable(); } @@ -87,13 +88,18 @@ * default_idle_call - Default CPU idle routine. * * To use when the cpuidle framework cannot be used. + * + * When interrupts are pipelined, this call is entered with hard irqs + * on and the in-band stage is stalled. Returns with hard irqs on, + * in-band stage stalled. irq_cpuidle_enter() then turns off hard irqs + * before synchronizing irqs, making sure we have no event lingering + * in the interrupt log as we go for a nap. */ void __cpuidle default_idle_call(void) { if (current_clr_polling_and_test()) { - local_irq_enable(); - } else { - + local_irq_enable_full(); + } else if (irq_cpuidle_enter(NULL, NULL)) { /* hard irqs off now */ trace_cpu_idle(1, smp_processor_id()); stop_critical_timings(); @@ -127,6 +133,8 @@ start_critical_timings(); trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); + } else { + local_irq_enable_full(); } } @@ -249,6 +257,13 @@ __current_set_polling(); /* + * Catch mishandling of the CPU's interrupt disable flag when + * pipelining IRQs. + */ + if (WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled())) + hard_local_irq_enable(); + + /* * It is up to the idle functions to reenable local interrupts */ if (WARN_ON_ONCE(irqs_disabled())) @@ -300,6 +315,7 @@ cpu_idle_poll(); } else { cpuidle_idle_call(); + WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()); } arch_cpu_idle_exit(); } diff --git a/kernel/kernel/sched/sched.h b/kernel/kernel/sched/sched.h index 9e798c5..2f1c1ab 100644 --- a/kernel/kernel/sched/sched.h +++ b/kernel/kernel/sched/sched.h @@ -52,6 +52,8 @@ #include <linux/membarrier.h> #include <linux/migrate.h> #include <linux/mmu_context.h> +#include <linux/irq_pipeline.h> +#include <linux/dovetail.h> #include <linux/nmi.h> #include <linux/proc_fs.h> #include <linux/prefetch.h> diff --git a/kernel/kernel/sched/wait.c b/kernel/kernel/sched/wait.c index c4f324a..c3a42d9 100644 --- a/kernel/kernel/sched/wait.c +++ b/kernel/kernel/sched/wait.c @@ -71,6 +71,8 @@ wait_queue_entry_t *curr, *next; int cnt = 0; + check_inband_stage(); + lockdep_assert_held(&wq_head->lock); if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { diff --git a/kernel/kernel/signal.c b/kernel/kernel/signal.c index f6ecd01..afadec2 100644 --- a/kernel/kernel/signal.c +++ b/kernel/kernel/signal.c @@ -763,6 +763,10 @@ void signal_wake_up_state(struct task_struct *t, unsigned int state) { set_tsk_thread_flag(t, TIF_SIGPENDING); + + /* TIF_SIGPENDING must be set prior to notifying. */ + inband_signal_notify(t); + /* * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it @@ -984,8 +988,11 @@ if (sig == SIGKILL) return true; - if (task_is_stopped_or_traced(p)) + if (task_is_stopped_or_traced(p)) { + if (!signal_pending(p)) + inband_signal_notify(p); return false; + } return task_curr(p) || !task_sigpending(p); } @@ -2145,6 +2152,7 @@ * schedule() will not sleep if there is a pending signal that * can awaken the task. */ + inband_ptstop_notify(); set_special_state(TASK_TRACED); /* @@ -2238,6 +2246,8 @@ read_unlock(&tasklist_lock); } + inband_ptcont_notify(); + /* * We are back. Now reacquire the siglock before touching * last_siginfo, so that we are sure to have synchronized with diff --git a/kernel/kernel/stop_machine.c b/kernel/kernel/stop_machine.c index c65cfb7..49c00e4 100644 --- a/kernel/kernel/stop_machine.c +++ b/kernel/kernel/stop_machine.c @@ -207,8 +207,8 @@ curstate = newstate; switch (curstate) { case MULTI_STOP_DISABLE_IRQ: - local_irq_disable(); hard_irq_disable(); + local_irq_disable(); break; case MULTI_STOP_RUN: if (is_active) @@ -229,6 +229,7 @@ rcu_momentary_dyntick_idle(); } while (curstate != MULTI_STOP_EXIT); + hard_irq_enable(); local_irq_restore(flags); return err; } @@ -629,6 +630,7 @@ local_irq_save(flags); hard_irq_disable(); ret = (*fn)(data); + hard_irq_enable(); local_irq_restore(flags); return ret; diff --git a/kernel/kernel/time/Makefile b/kernel/kernel/time/Makefile index c8f0016..14cb45c 100644 --- a/kernel/kernel/time/Makefile +++ b/kernel/kernel/time/Makefile @@ -16,6 +16,7 @@ endif obj-$(CONFIG_GENERIC_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o +obj-$(CONFIG_IRQ_PIPELINE) += tick-proxy.o obj-$(CONFIG_HAVE_GENERIC_VDSO) += vsyscall.o obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o obj-$(CONFIG_TEST_UDELAY) += test_udelay.o diff --git a/kernel/kernel/time/clockevents.c b/kernel/kernel/time/clockevents.c index f549022..da6735d 100644 --- a/kernel/kernel/time/clockevents.c +++ b/kernel/kernel/time/clockevents.c @@ -97,6 +97,7 @@ /* Transition with new state-specific callbacks */ switch (state) { case CLOCK_EVT_STATE_DETACHED: + case CLOCK_EVT_STATE_RESERVED: /* The clockevent device is getting replaced. Shut it down. */ case CLOCK_EVT_STATE_SHUTDOWN: @@ -437,6 +438,69 @@ } EXPORT_SYMBOL_GPL(clockevents_unbind_device); +#ifdef CONFIG_IRQ_PIPELINE + +/** + * clockevents_register_proxy - register a proxy device on the current CPU + * @dev: proxy to register + */ +int clockevents_register_proxy(struct clock_proxy_device *dev) +{ + struct clock_event_device *proxy_dev, *real_dev; + unsigned long flags; + u32 freq; + int ret; + + raw_spin_lock_irqsave(&clockevents_lock, flags); + + ret = tick_setup_proxy(dev); + if (ret) { + raw_spin_unlock_irqrestore(&clockevents_lock, flags); + return ret; + } + + proxy_dev = &dev->proxy_device; + clockevent_set_state(proxy_dev, CLOCK_EVT_STATE_DETACHED); + + list_add(&proxy_dev->list, &clockevent_devices); + tick_check_new_device(proxy_dev); + clockevents_notify_released(); + + raw_spin_unlock_irqrestore(&clockevents_lock, flags); + + real_dev = dev->real_device; + freq = (1000000000ULL * real_dev->mult) >> real_dev->shift; + printk(KERN_INFO "CPU%d: proxy tick device registered (%u.%02uMHz)\n", + smp_processor_id(), freq / 1000000, (freq / 10000) % 100); + + return ret; +} + +void clockevents_unregister_proxy(struct clock_proxy_device *dev) +{ + unsigned long flags; + int ret; + + clockevents_register_device(dev->real_device); + clockevents_switch_state(dev->real_device, CLOCK_EVT_STATE_DETACHED); + + /* + * Pop the proxy device, do not give it back to the + * framework. + */ + raw_spin_lock_irqsave(&clockevents_lock, flags); + ret = clockevents_replace(&dev->proxy_device); + raw_spin_unlock_irqrestore(&clockevents_lock, flags); + + if (WARN_ON(ret)) + return; + + printk(KERN_INFO "CPU%d: proxy tick device unregistered\n", + smp_processor_id()); +} + +#endif + /** * clockevents_register_device - register a clock event device * @dev: device to register @@ -575,9 +639,18 @@ */ if (old) { module_put(old->owner); - clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); + /* + * Do not move the device backing a proxy tick device + * to the release list, keep it around but mark it as + * reserved. + */ list_del(&old->list); - list_add(&old->list, &clockevents_released); + if (tick_check_is_proxy(new)) { + clockevents_switch_state(old, CLOCK_EVT_STATE_RESERVED); + } else { + clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); + list_add(&old->list, &clockevents_released); + } } if (new) { diff --git a/kernel/kernel/time/clocksource.c b/kernel/kernel/time/clocksource.c index 74492f0..2921e18 100644 --- a/kernel/kernel/time/clocksource.c +++ b/kernel/kernel/time/clocksource.c @@ -1007,8 +1007,8 @@ clocksource_update_max_deferment(cs); - pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", - cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns); + pr_info("%s: freq: %Lu Hz, mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n", + cs->name, (u64)freq * scale, cs->mask, cs->max_cycles, cs->max_idle_ns); } EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); @@ -1275,10 +1275,36 @@ } static DEVICE_ATTR_RO(available_clocksource); +/** + * vdso_clocksource_show - sysfs interface for vDSO type of + * current clocksource + * @dev: unused + * @attr: unused + * @buf: char buffer to be filled with vDSO type + */ +static ssize_t vdso_clocksource_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t count = 0, type; + + mutex_lock(&clocksource_mutex); + type = curr_clocksource->vdso_type; + count = snprintf(buf, PAGE_SIZE, "%s\n", + type == CLOCKSOURCE_VDSO_NONE ? "none" : + type == CLOCKSOURCE_VDSO_ARCHITECTED ? "architected" : + "mmio"); + mutex_unlock(&clocksource_mutex); + + return count; +} +static DEVICE_ATTR_RO(vdso_clocksource); + static struct attribute *clocksource_attrs[] = { &dev_attr_current_clocksource.attr, &dev_attr_unbind_clocksource.attr, &dev_attr_available_clocksource.attr, + &dev_attr_vdso_clocksource.attr, NULL }; ATTRIBUTE_GROUPS(clocksource); diff --git a/kernel/kernel/time/hrtimer.c b/kernel/kernel/time/hrtimer.c index 544ce87..e52623c 100644 --- a/kernel/kernel/time/hrtimer.c +++ b/kernel/kernel/time/hrtimer.c @@ -873,6 +873,7 @@ on_each_cpu(retrigger_next_event, NULL, 1); #endif timerfd_clock_was_set(); + inband_clock_was_set(); } static void clock_was_set_work(struct work_struct *work) diff --git a/kernel/kernel/time/tick-broadcast.c b/kernel/kernel/time/tick-broadcast.c index 086d36b..e3d15b3 100644 --- a/kernel/kernel/time/tick-broadcast.c +++ b/kernel/kernel/time/tick-broadcast.c @@ -796,6 +796,23 @@ int ret = 0; ktime_t now; + /* + * If there is no broadcast device, tell the caller not to go + * into deep idle. + */ + if (!tick_broadcast_device.evtdev) + return -EBUSY; + + dev = this_cpu_ptr(&tick_cpu_device)->evtdev; + + /* + * If proxying the hardware timer for high-precision tick + * delivery to the out-of-band stage, the whole broadcast + * dance is a no-go. Deny entering deep idle. + */ + if (dev->features & CLOCK_EVT_FEAT_PROXY) + return -EBUSY; + raw_spin_lock(&tick_broadcast_lock); bc = tick_broadcast_device.evtdev; diff --git a/kernel/kernel/time/tick-common.c b/kernel/kernel/time/tick-common.c index 572b4c0..3f4b7ec 100644 --- a/kernel/kernel/time/tick-common.c +++ b/kernel/kernel/time/tick-common.c @@ -248,7 +248,8 @@ } else { handler = td->evtdev->event_handler; next_event = td->evtdev->next_event; - td->evtdev->event_handler = clockevents_handle_noop; + if (!clockevent_state_reserved(td->evtdev)) + td->evtdev->event_handler = clockevents_handle_noop; } td->evtdev = newdev; @@ -330,6 +331,12 @@ bool tick_check_replacement(struct clock_event_device *curdev, struct clock_event_device *newdev) { + /* + * Never replace an active proxy except when unregistering it. + */ + if (tick_check_is_proxy(curdev)) + return false; + if (!tick_check_percpu(curdev, newdev, smp_processor_id())) return false; @@ -350,6 +357,9 @@ td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; + if (tick_check_is_proxy(curdev)) + goto out_bc; + /* cpu local device ? */ if (!tick_check_percpu(curdev, newdev, cpu)) goto out_bc; @@ -367,7 +377,12 @@ * not give it back to the clockevents layer ! */ if (tick_is_broadcast_device(curdev)) { - clockevents_shutdown(curdev); + if (tick_check_is_proxy(newdev)) { + list_del(&curdev->list); + clockevents_switch_state(curdev, CLOCK_EVT_STATE_RESERVED); + } else { + clockevents_shutdown(curdev); + } curdev = NULL; } clockevents_exchange_device(curdev, newdev); diff --git a/kernel/kernel/time/tick-internal.h b/kernel/kernel/time/tick-internal.h index ab9cb68..1fc4bfc 100644 --- a/kernel/kernel/time/tick-internal.h +++ b/kernel/kernel/time/tick-internal.h @@ -48,15 +48,26 @@ dev->state_use_accessors = state; } +static inline bool tick_check_is_proxy(struct clock_event_device *dev) +{ + if (!irqs_pipelined()) + return false; + + return dev && dev->features & CLOCK_EVT_FEAT_PROXY; +} + extern void clockevents_shutdown(struct clock_event_device *dev); extern void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new); -extern void clockevents_switch_state(struct clock_event_device *dev, - enum clock_event_state state); extern int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force); extern void clockevents_handle_noop(struct clock_event_device *dev); extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); +#ifdef CONFIG_IRQ_PIPELINE +int clockevents_register_proxy(struct clock_proxy_device *dev); +extern void clockevents_unregister_proxy(struct clock_proxy_device *dev); +int tick_setup_proxy(struct clock_proxy_device *dev); +#endif extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); /* Broadcasting support */ diff --git a/kernel/kernel/time/tick-proxy.c b/kernel/kernel/time/tick-proxy.c new file mode 100644 index 0000000..5a87798 --- /dev/null +++ b/kernel/kernel/time/tick-proxy.c @@ -0,0 +1,466 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/delay.h> +#include <linux/smp.h> +#include <linux/err.h> +#include <linux/cpumask.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irq_pipeline.h> +#include <linux/stop_machine.h> +#include <linux/slab.h> +#include "tick-internal.h" + +static unsigned int proxy_tick_irq; + +static DEFINE_MUTEX(proxy_mutex); + +static DEFINE_PER_CPU(struct clock_proxy_device, proxy_tick_device); + +static inline struct clock_event_device * +get_real_tick_device(struct clock_event_device *proxy_dev) +{ + return container_of(proxy_dev, struct clock_proxy_device, proxy_device)->real_device; +} + +static void proxy_event_handler(struct clock_event_device *real_dev) +{ + struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device); + struct clock_event_device *proxy_dev = &dev->proxy_device; + + proxy_dev->event_handler(proxy_dev); +} + +static int proxy_set_state_oneshot(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->set_state_oneshot(real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static int proxy_set_state_periodic(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->set_state_periodic(real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static int proxy_set_state_oneshot_stopped(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->set_state_oneshot_stopped(real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static int proxy_set_state_shutdown(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->set_state_shutdown(real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static void proxy_suspend(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + + flags = hard_local_irq_save(); + real_dev->suspend(real_dev); + hard_local_irq_restore(flags); +} + +static void proxy_resume(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + + flags = hard_local_irq_save(); + real_dev->resume(real_dev); + hard_local_irq_restore(flags); +} + +static int proxy_tick_resume(struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->tick_resume(real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static void proxy_broadcast(const struct cpumask *mask) +{ + struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device); + struct clock_event_device *real_dev = dev->real_device; + unsigned long flags; + + flags = hard_local_irq_save(); + real_dev->broadcast(mask); + hard_local_irq_restore(flags); +} + +static int proxy_set_next_event(unsigned long delay, + struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->set_next_event(delay, real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static int proxy_set_next_ktime(ktime_t expires, + struct clock_event_device *dev) +{ + struct clock_event_device *real_dev = get_real_tick_device(dev); + unsigned long flags; + int ret; + + flags = hard_local_irq_save(); + ret = real_dev->set_next_ktime(expires, real_dev); + hard_local_irq_restore(flags); + + return ret; +} + +static irqreturn_t proxy_irq_handler(int sirq, void *dev_id) +{ + struct clock_event_device *evt; + + /* + * Tricky: we may end up running this in-band IRQ handler + * because tick_notify_proxy() was posted either: + * + * - from the out-of-band stage via ->handle_oob_event() for + * emulating an in-band tick. In this case, the active tick + * device for the in-band timing core is the proxy device, + * whose event handler is still the same than the real tick + * device's. + * + * - directly by the clock chip driver on the local CPU via + * clockevents_handle_event(), for propagating a tick to the + * in-band stage nobody from the out-of-band stage is + * interested on i.e. no proxy device was registered on the + * receiving CPU, which was excluded from @cpumask in the call + * to tick_install_proxy(). In this case, the active tick + * device for the in-band timing core is a real clock event + * device. + * + * In both cases, we are running on the in-band stage, and we + * should fire the event handler of the currently active tick + * device for the in-band timing core. + */ + evt = raw_cpu_ptr(&tick_cpu_device)->evtdev; + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +#define interpose_proxy_handler(__proxy, __real, __h) \ + do { \ + if ((__real)->__h) \ + (__proxy)->__h = proxy_ ## __h; \ + } while (0) + +/* + * Setup a proxy which is about to override the tick device on the + * current CPU. Called with clockevents_lock held and irqs off so that + * the tick device does not change under our feet. + */ +int tick_setup_proxy(struct clock_proxy_device *dev) +{ + struct clock_event_device *proxy_dev, *real_dev; + + real_dev = raw_cpu_ptr(&tick_cpu_device)->evtdev; + if ((real_dev->features & + (CLOCK_EVT_FEAT_PIPELINE|CLOCK_EVT_FEAT_ONESHOT)) + != (CLOCK_EVT_FEAT_PIPELINE|CLOCK_EVT_FEAT_ONESHOT)) { + WARN(1, "cannot use clockevent device %s in proxy mode!", + real_dev->name); + return -ENODEV; + } + + /* + * The assumption is that neither us nor clockevents_register_proxy() + * can fail afterwards, so this is ok to advertise the new proxy as + * built by setting dev->real_device early. + */ + dev->real_device = real_dev; + dev->__original_handler = real_dev->event_handler; + + /* + * Inherit the feature bits since the proxy device has the + * same capabilities than the real one we are overriding + * (including CLOCK_EVT_FEAT_C3STOP if present). + */ + proxy_dev = &dev->proxy_device; + memset(proxy_dev, 0, sizeof(*proxy_dev)); + proxy_dev->features = real_dev->features | + CLOCK_EVT_FEAT_PERCPU | CLOCK_EVT_FEAT_PROXY; + proxy_dev->name = "proxy"; + proxy_dev->irq = real_dev->irq; + proxy_dev->bound_on = -1; + proxy_dev->cpumask = cpumask_of(smp_processor_id()); + proxy_dev->rating = real_dev->rating + 1; + proxy_dev->mult = real_dev->mult; + proxy_dev->shift = real_dev->shift; + proxy_dev->max_delta_ticks = real_dev->max_delta_ticks; + proxy_dev->min_delta_ticks = real_dev->min_delta_ticks; + proxy_dev->max_delta_ns = real_dev->max_delta_ns; + proxy_dev->min_delta_ns = real_dev->min_delta_ns; + /* + * Interpose default handlers which are safe wrt preemption by + * the out-of-band stage. + */ + interpose_proxy_handler(proxy_dev, real_dev, set_state_oneshot); + interpose_proxy_handler(proxy_dev, real_dev, set_state_oneshot_stopped); + interpose_proxy_handler(proxy_dev, real_dev, set_state_periodic); + interpose_proxy_handler(proxy_dev, real_dev, set_state_shutdown); + interpose_proxy_handler(proxy_dev, real_dev, suspend); + interpose_proxy_handler(proxy_dev, real_dev, resume); + interpose_proxy_handler(proxy_dev, real_dev, tick_resume); + interpose_proxy_handler(proxy_dev, real_dev, broadcast); + interpose_proxy_handler(proxy_dev, real_dev, set_next_event); + interpose_proxy_handler(proxy_dev, real_dev, set_next_ktime); + + dev->__setup_handler(dev); + + return 0; +} + +static int enable_oob_timer(void *arg) /* hard_irqs_disabled() */ +{ + struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device); + struct clock_event_device *real_dev; + + /* + * Install the out-of-band handler on this CPU's real clock + * device, then turn on out-of-band mode for the associated + * IRQ (duplicates are silently ignored if the IRQ is common + * to multiple CPUs). + */ + real_dev = dev->real_device; + real_dev->event_handler = dev->handle_oob_event; + real_dev->features |= CLOCK_EVT_FEAT_OOB; + barrier(); + + /* + * irq_switch_oob() grabs the IRQ descriptor lock which is + * hybrid, so that is fine to invoke this routine with hard + * IRQs off. + */ + irq_switch_oob(real_dev->irq, true); + + return 0; +} + +struct proxy_install_arg { + void (*setup_proxy)(struct clock_proxy_device *dev); + int result; +}; + +static void register_proxy_device(void *arg) /* irqs_disabled() */ +{ + struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device); + struct proxy_install_arg *req = arg; + int ret; + + dev->__setup_handler = req->setup_proxy; + ret = clockevents_register_proxy(dev); + if (ret) { + if (!req->result) + req->result = ret; + } else { + dev->real_device->event_handler = proxy_event_handler; + } +} + +int tick_install_proxy(void (*setup_proxy)(struct clock_proxy_device *dev), + const struct cpumask *cpumask) +{ + struct proxy_install_arg arg; + int ret, sirq; + + mutex_lock(&proxy_mutex); + + ret = -EAGAIN; + if (proxy_tick_irq) + goto out; + + sirq = irq_create_direct_mapping(synthetic_irq_domain); + if (WARN_ON(sirq == 0)) + goto out; + + ret = __request_percpu_irq(sirq, proxy_irq_handler, + IRQF_NO_THREAD, /* no IRQF_TIMER here. */ + "proxy tick", + &proxy_tick_device); + if (WARN_ON(ret)) { + irq_dispose_mapping(sirq); + goto out; + } + + proxy_tick_irq = sirq; + barrier(); + + /* + * Install a proxy tick device on each CPU. As the proxy + * device is picked, the previous (real) tick device is + * switched to reserved state by the clockevent core. + * Immediately after, the proxy device starts controlling the + * real device under the hood to carry out the timing requests + * it receives. + * + * For a short period of time, after the proxy device is + * installed, and until the real device IRQ is switched to + * out-of-band mode, the flow is as follows: + * + * [inband timing request] + * proxy_dev->set_next_event(proxy_dev) + * oob_program_event(proxy_dev) + * real_dev->set_next_event(real_dev) + * ... + * <tick event> + * original_timer_handler() [in-band stage] + * clockevents_handle_event(real_dev) + * proxy_event_handler(real_dev) + * inband_event_handler(proxy_dev) + * + * Eventually, we substitute the original (in-band) clock + * event handler with the out-of-band handler for the real + * clock event device, then turn on out-of-band mode for the + * timer IRQ associated to the latter. These two steps are + * performed over a stop_machine() context, so that no tick + * can race with this code while we swap handlers. + * + * Once the hand over is complete, the flow is as follows: + * + * [inband timing request] + * proxy_dev->set_next_event(proxy_dev) + * oob_program_event(proxy_dev) + * real_dev->set_next_event(real_dev) + * ... + * <tick event> + * inband_event_handler() [out-of-band stage] + * clockevents_handle_event(real_dev) + * handle_oob_event(proxy_dev) + * ...(inband tick emulation)... + * tick_notify_proxy() + * ... + * proxy_irq_handler(proxy_dev) [in-band stage] + * clockevents_handle_event(proxy_dev) + * inband_event_handler(proxy_dev) + */ + arg.setup_proxy = setup_proxy; + arg.result = 0; + on_each_cpu_mask(cpumask, register_proxy_device, &arg, true); + if (arg.result) { + tick_uninstall_proxy(cpumask); + return arg.result; + } + + /* + * Start ticking from the out-of-band interrupt stage upon + * receipt of out-of-band timer events. + */ + stop_machine(enable_oob_timer, NULL, cpumask); +out: + mutex_unlock(&proxy_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(tick_install_proxy); + +static int disable_oob_timer(void *arg) /* hard_irqs_disabled() */ +{ + struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device); + struct clock_event_device *real_dev; + + dev = raw_cpu_ptr(&proxy_tick_device); + real_dev = dev->real_device; + real_dev->event_handler = dev->__original_handler; + real_dev->features &= ~CLOCK_EVT_FEAT_OOB; + barrier(); + + irq_switch_oob(real_dev->irq, false); + + return 0; +} + +static void unregister_proxy_device(void *arg) /* irqs_disabled() */ +{ + struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device); + + if (dev->real_device) { + clockevents_unregister_proxy(dev); + dev->real_device = NULL; + } +} + +void tick_uninstall_proxy(const struct cpumask *cpumask) +{ + /* + * Undo all we did in tick_install_proxy(), handing over + * control of the tick device back to the inband code. + */ + mutex_lock(&proxy_mutex); + stop_machine(disable_oob_timer, NULL, cpu_online_mask); + on_each_cpu_mask(cpumask, unregister_proxy_device, NULL, true); + free_percpu_irq(proxy_tick_irq, &proxy_tick_device); + irq_dispose_mapping(proxy_tick_irq); + proxy_tick_irq = 0; + mutex_unlock(&proxy_mutex); +} +EXPORT_SYMBOL_GPL(tick_uninstall_proxy); + +void tick_notify_proxy(void) +{ + /* + * Schedule a tick on the proxy device to occur from the + * in-band stage, which will trigger proxy_irq_handler() at + * some point (i.e. when the in-band stage is back in control + * and not stalled). Note that we might be called from the + * in-band stage in some cases (see proxy_irq_handler()). + */ + irq_post_inband(proxy_tick_irq); +} +EXPORT_SYMBOL_GPL(tick_notify_proxy); diff --git a/kernel/kernel/time/vsyscall.c b/kernel/kernel/time/vsyscall.c index 88e6b8e..2b9b786 100644 --- a/kernel/kernel/time/vsyscall.c +++ b/kernel/kernel/time/vsyscall.c @@ -69,15 +69,41 @@ vdso_ts->nsec = tk->tkr_mono.xtime_nsec; } +static void update_generic_mmio(struct vdso_data *vdata, struct timekeeper *tk) +{ +#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO + const struct clocksource *cs = tk->tkr_mono.clock; + u16 seq; + + if (cs->vdso_type == (vdata->cs_type_seq >> 16)) + return; + + seq = vdata->cs_type_seq; + if (++seq == 0) + seq = 1; + + vdata->cs_type_seq = cs->vdso_type << 16 | seq; + + if (cs->vdso_type >= CLOCKSOURCE_VDSO_MMIO) + snprintf(vdata->cs_mmdev, sizeof(vdata->cs_mmdev), + "/dev/ucs/%u", cs->vdso_type - CLOCKSOURCE_VDSO_MMIO); +#endif +} + void update_vsyscall(struct timekeeper *tk) { struct vdso_data *vdata = __arch_get_k_vdso_data(); struct vdso_timestamp *vdso_ts; + unsigned long flags; s32 clock_mode; u64 nsec; + flags = hard_cond_local_irq_save(); + /* copy vsyscall data */ vdso_write_begin(vdata); + + update_generic_mmio(vdata, tk); clock_mode = tk->tkr_mono.clock->vdso_clock_mode; vdata[CS_HRES_COARSE].clock_mode = clock_mode; @@ -110,13 +136,16 @@ * If the current clocksource is not VDSO capable, then spare the * update of the high reolution parts. */ - if (clock_mode != VDSO_CLOCKMODE_NONE) + if (IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO) || + clock_mode != VDSO_CLOCKMODE_NONE) update_vdso_data(vdata, tk); __arch_update_vsyscall(vdata, tk); vdso_write_end(vdata); + hard_cond_local_irq_restore(flags); + __arch_sync_vdso_data(vdata); } diff --git a/kernel/kernel/trace/ftrace.c b/kernel/kernel/trace/ftrace.c index d97c189..1e39489 100644 --- a/kernel/kernel/trace/ftrace.c +++ b/kernel/kernel/trace/ftrace.c @@ -6271,10 +6271,10 @@ * reason to cause large interrupt latencies while we do it. */ if (!mod) - local_irq_save(flags); + flags = hard_local_irq_save(); ftrace_update_code(mod, start_pg); if (!mod) - local_irq_restore(flags); + hard_local_irq_restore(flags); ret = 0; out: mutex_unlock(&ftrace_lock); @@ -6865,9 +6865,9 @@ unsigned long count, flags; int ret; - local_irq_save(flags); + flags = hard_local_irq_save(); ret = ftrace_dyn_arch_init(); - local_irq_restore(flags); + hard_local_irq_restore(flags); if (ret) goto failed; @@ -7022,7 +7022,15 @@ } } while_for_each_ftrace_op(op); out: - preempt_enable_notrace(); + if (irqs_pipelined() && (hard_irqs_disabled() || !running_inband())) + /* + * Nothing urgent to schedule here. At latest the + * timer tick will pick up whatever the tracing + * functions kicked off. + */ + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); trace_clear_recursion(bit); } diff --git a/kernel/kernel/trace/ring_buffer.c b/kernel/kernel/trace/ring_buffer.c index 49ebb8c..391151f 100644 --- a/kernel/kernel/trace/ring_buffer.c +++ b/kernel/kernel/trace/ring_buffer.c @@ -3165,8 +3165,8 @@ static __always_inline int trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) { - unsigned int val = cpu_buffer->current_context; - unsigned long pc = preempt_count(); + unsigned int val; + unsigned long pc = preempt_count(), flags; int bit; if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) @@ -3175,6 +3175,10 @@ bit = pc & NMI_MASK ? RB_CTX_NMI : pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ; + flags = hard_cond_local_irq_save(); + + val = cpu_buffer->current_context; + if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { /* * It is possible that this was called by transitioning @@ -3182,12 +3186,16 @@ * been updated yet. In this case, use the TRANSITION bit. */ bit = RB_CTX_TRANSITION; - if (val & (1 << (bit + cpu_buffer->nest))) + if (val & (1 << (bit + cpu_buffer->nest))) { + hard_cond_local_irq_restore(flags); return 1; + } } val |= (1 << (bit + cpu_buffer->nest)); cpu_buffer->current_context = val; + + hard_cond_local_irq_restore(flags); return 0; } @@ -3195,8 +3203,12 @@ static __always_inline void trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) { + unsigned long flags; + + flags = hard_cond_local_irq_save(); cpu_buffer->current_context &= cpu_buffer->current_context - (1 << cpu_buffer->nest); + hard_cond_local_irq_restore(flags); } /* The recursive locking above uses 5 bits */ diff --git a/kernel/kernel/trace/trace.c b/kernel/kernel/trace/trace.c index 8b1f74e..f339a56 100644 --- a/kernel/kernel/trace/trace.c +++ b/kernel/kernel/trace/trace.c @@ -1129,9 +1129,9 @@ return; } - local_irq_save(flags); + flags = hard_local_irq_save(); update_max_tr(tr, current, smp_processor_id(), cond_data); - local_irq_restore(flags); + hard_local_irq_restore(flags); } void tracing_snapshot_instance(struct trace_array *tr) @@ -1822,7 +1822,7 @@ if (tr->stop_count) return; - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE(!hard_irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ @@ -1866,7 +1866,7 @@ if (tr->stop_count) return; - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE(!hard_irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); @@ -2626,12 +2626,14 @@ entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | -#else + (hard_irqs_disabled() ? TRACE_FLAG_IRQS_HARDOFF : 0) | +#elif !defined(CONFIG_IRQ_PIPELINE) TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | + (running_oob() ? TRACE_FLAG_OOB_STAGE : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); } @@ -7085,13 +7087,13 @@ ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; - local_irq_disable(); + hard_local_irq_disable(); /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) update_max_tr(tr, current, smp_processor_id(), NULL); else update_max_tr_single(tr, current, iter->cpu_file); - local_irq_enable(); + hard_local_irq_enable(); break; default: if (tr->allocated_snapshot) { diff --git a/kernel/kernel/trace/trace.h b/kernel/kernel/trace/trace.h index 8d67f7f..c838a90 100644 --- a/kernel/kernel/trace/trace.h +++ b/kernel/kernel/trace/trace.h @@ -139,11 +139,14 @@ /* * trace_flag_type is an enumeration that holds different * states when a trace occurs. These are: - * IRQS_OFF - interrupts were disabled + * IRQS_OFF - interrupts were off (only virtually if pipelining) * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler + * IRQS_HARDOFF - interrupts were hard disabled + * OOB_STAGE - running over the oob stage (assume IRQ tracing + * support is always available w/ pipelining). */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -153,6 +156,8 @@ TRACE_FLAG_SOFTIRQ = 0x10, TRACE_FLAG_PREEMPT_RESCHED = 0x20, TRACE_FLAG_NMI = 0x40, + TRACE_FLAG_IRQS_HARDOFF = 0x80, + TRACE_FLAG_OOB_STAGE = TRACE_FLAG_IRQS_NOSUPPORT, }; #define TRACE_BUF_SIZE 1024 diff --git a/kernel/kernel/trace/trace_branch.c b/kernel/kernel/trace/trace_branch.c index eff0991..e9e754f 100644 --- a/kernel/kernel/trace/trace_branch.c +++ b/kernel/kernel/trace/trace_branch.c @@ -53,7 +53,7 @@ if (unlikely(!tr)) return; - raw_local_irq_save(flags); + flags = hard_local_irq_save(); current->trace_recursion |= TRACE_BRANCH_BIT; data = this_cpu_ptr(tr->array_buffer.data); if (atomic_read(&data->disabled)) @@ -87,7 +87,7 @@ out: current->trace_recursion &= ~TRACE_BRANCH_BIT; - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); } static inline diff --git a/kernel/kernel/trace/trace_clock.c b/kernel/kernel/trace/trace_clock.c index 4702efb..79a4cc1 100644 --- a/kernel/kernel/trace/trace_clock.c +++ b/kernel/kernel/trace/trace_clock.c @@ -97,7 +97,7 @@ int this_cpu; u64 now, prev_time; - raw_local_irq_save(flags); + flags = hard_local_irq_save(); this_cpu = raw_smp_processor_id(); @@ -139,7 +139,7 @@ arch_spin_unlock(&trace_clock_struct.lock); } out: - raw_local_irq_restore(flags); + hard_local_irq_restore(flags); return now; } diff --git a/kernel/kernel/trace/trace_functions.c b/kernel/kernel/trace/trace_functions.c index 93e20ed..c4d0338 100644 --- a/kernel/kernel/trace/trace_functions.c +++ b/kernel/kernel/trace/trace_functions.c @@ -196,7 +196,7 @@ * Need to use raw, since this must be called before the * recursive protection is performed. */ - local_irq_save(flags); + flags = hard_local_irq_save(); cpu = raw_smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); @@ -208,7 +208,7 @@ } atomic_dec(&data->disabled); - local_irq_restore(flags); + hard_local_irq_restore(flags); } static struct tracer_opt func_opts[] = { diff --git a/kernel/kernel/trace/trace_functions_graph.c b/kernel/kernel/trace/trace_functions_graph.c index 60d6627..8a3d156 100644 --- a/kernel/kernel/trace/trace_functions_graph.c +++ b/kernel/kernel/trace/trace_functions_graph.c @@ -169,7 +169,7 @@ if (tracing_thresh) return 1; - local_irq_save(flags); + flags = hard_local_irq_save(); cpu = raw_smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); @@ -181,7 +181,7 @@ } atomic_dec(&data->disabled); - local_irq_restore(flags); + hard_local_irq_restore(flags); return ret; } @@ -250,7 +250,7 @@ return; } - local_irq_save(flags); + flags = hard_local_irq_save(); cpu = raw_smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); disabled = atomic_inc_return(&data->disabled); @@ -259,7 +259,7 @@ __trace_graph_return(tr, trace, flags, pc); } atomic_dec(&data->disabled); - local_irq_restore(flags); + hard_local_irq_restore(flags); } void set_graph_array(struct trace_array *tr) diff --git a/kernel/kernel/trace/trace_irqsoff.c b/kernel/kernel/trace/trace_irqsoff.c index ee4571b..92a816d 100644 --- a/kernel/kernel/trace/trace_irqsoff.c +++ b/kernel/kernel/trace/trace_irqsoff.c @@ -14,6 +14,7 @@ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> +#include <linux/irqstage.h> #include <linux/kprobes.h> #include "trace.h" @@ -26,7 +27,7 @@ static DEFINE_PER_CPU(int, tracing_cpu); -static DEFINE_RAW_SPINLOCK(max_trace_lock); +static DEFINE_HARD_SPINLOCK(max_trace_lock); enum { TRACER_IRQS_OFF = (1 << 1), @@ -44,7 +45,7 @@ static inline int preempt_trace(int pc) { - return ((trace_type & TRACER_PREEMPT_OFF) && pc); + return (running_inband() && (trace_type & TRACER_PREEMPT_OFF) && pc); } #else # define preempt_trace(pc) (0) @@ -55,7 +56,7 @@ irq_trace(void) { return ((trace_type & TRACER_IRQS_OFF) && - irqs_disabled()); + (hard_irqs_disabled() || (running_inband() && irqs_disabled()))); } #else # define irq_trace() (0) @@ -393,7 +394,7 @@ data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; - local_save_flags(flags); + stage_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, pc); @@ -428,7 +429,7 @@ atomic_inc(&data->disabled); - local_save_flags(flags); + stage_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, pc); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; diff --git a/kernel/kernel/trace/trace_output.c b/kernel/kernel/trace/trace_output.c index 000e9dc..3858934 100644 --- a/kernel/kernel/trace/trace_output.c +++ b/kernel/kernel/trace/trace_output.c @@ -445,14 +445,19 @@ int hardirq; int softirq; int nmi; + int oob; nmi = entry->flags & TRACE_FLAG_NMI; hardirq = entry->flags & TRACE_FLAG_HARDIRQ; softirq = entry->flags & TRACE_FLAG_SOFTIRQ; + oob = irqs_pipelined() && (entry->flags & TRACE_FLAG_OOB_STAGE); irqs_off = + (entry->flags & (TRACE_FLAG_IRQS_OFF|TRACE_FLAG_IRQS_HARDOFF)) == + (TRACE_FLAG_IRQS_OFF|TRACE_FLAG_IRQS_HARDOFF) ? '*' : + (entry->flags & TRACE_FLAG_IRQS_HARDOFF) ? 'D' : (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : - (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : + !irqs_pipelined() && (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.'; switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | @@ -472,6 +477,8 @@ } hardsoft_irq = + (nmi && oob) ? '#' : + oob ? '~' : (nmi && hardirq) ? 'Z' : nmi ? 'z' : (hardirq && softirq) ? 'H' : diff --git a/kernel/kernel/trace/trace_preemptirq.c b/kernel/kernel/trace/trace_preemptirq.c index 4593f16..42b1790 100644 --- a/kernel/kernel/trace/trace_preemptirq.c +++ b/kernel/kernel/trace/trace_preemptirq.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/ftrace.h> #include <linux/kprobes.h> +#include <linux/irq_pipeline.h> #include "trace.h" #define CREATE_TRACE_POINTS @@ -133,6 +134,57 @@ } EXPORT_SYMBOL(trace_hardirqs_off_caller); NOKPROBE_SYMBOL(trace_hardirqs_off_caller); + +#ifdef CONFIG_IRQ_PIPELINE + +void trace_hardirqs_off_pipelined(void) +{ + WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled()); + + if (running_inband()) + trace_hardirqs_off(); +} +EXPORT_SYMBOL(trace_hardirqs_off_pipelined); +NOKPROBE_SYMBOL(trace_hardirqs_off_pipelined); + +void trace_hardirqs_on_pipelined(void) +{ + WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled()); + + /* + * If the in-band stage of the kernel is current but the IRQ + * was not delivered because the latter is stalled, keep the + * tracing logic unaware of the receipt, so that no false + * positive is triggered in lockdep (e.g. IN-HARDIRQ-W -> + * HARDIRQ-ON-W). + */ + if (running_inband() && !irqs_disabled()) { + stall_inband(); + trace_hardirqs_on(); + unstall_inband_nocheck(); + } +} +EXPORT_SYMBOL(trace_hardirqs_on_pipelined); +NOKPROBE_SYMBOL(trace_hardirqs_on_pipelined); + +#else + +void trace_hardirqs_off_pipelined(void) +{ + trace_hardirqs_off(); +} +EXPORT_SYMBOL(trace_hardirqs_off_pipelined); +NOKPROBE_SYMBOL(trace_hardirqs_off_pipelined); + +void trace_hardirqs_on_pipelined(void) +{ + trace_hardirqs_on(); +} +EXPORT_SYMBOL(trace_hardirqs_on_pipelined); +NOKPROBE_SYMBOL(trace_hardirqs_on_pipelined); + +#endif + #endif /* CONFIG_TRACE_IRQFLAGS */ #ifdef CONFIG_TRACE_PREEMPT_TOGGLE diff --git a/kernel/kernel/trace/trace_sched_wakeup.c b/kernel/kernel/trace/trace_sched_wakeup.c index 97b10bb..f7637f9 100644 --- a/kernel/kernel/trace/trace_sched_wakeup.c +++ b/kernel/kernel/trace/trace_sched_wakeup.c @@ -486,7 +486,9 @@ if (likely(!is_tracing_stopped())) { wakeup_trace->max_latency = delta; + hard_local_irq_disable(); update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL); + hard_local_irq_enable(); } out_unlock: diff --git a/kernel/kernel/trace/trace_stack.c b/kernel/kernel/trace/trace_stack.c index c408423..16392f4 100644 --- a/kernel/kernel/trace/trace_stack.c +++ b/kernel/kernel/trace/trace_stack.c @@ -171,8 +171,9 @@ if (!object_is_on_stack(stack)) return; - /* Can't do this from NMI context (can cause deadlocks) */ - if (in_nmi()) + /* Can't do this from NMI or oob stage contexts (can cause + deadlocks) */ + if (in_nmi() || !running_inband()) return; local_irq_save(flags); diff --git a/kernel/kernel/xenomai/Kconfig b/kernel/kernel/xenomai/Kconfig new file mode 120000 index 0000000..e5347bf --- /dev/null +++ b/kernel/kernel/xenomai/Kconfig @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig \ No newline at end of file diff --git a/kernel/kernel/xenomai/Makefile b/kernel/kernel/xenomai/Makefile new file mode 120000 index 0000000..07e1a0e --- /dev/null +++ b/kernel/kernel/xenomai/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile \ No newline at end of file diff --git a/kernel/kernel/xenomai/arith.c b/kernel/kernel/xenomai/arith.c new file mode 120000 index 0000000..f4ad084 --- /dev/null +++ b/kernel/kernel/xenomai/arith.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/bufd.c b/kernel/kernel/xenomai/bufd.c new file mode 120000 index 0000000..4237c0f --- /dev/null +++ b/kernel/kernel/xenomai/bufd.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/clock.c b/kernel/kernel/xenomai/clock.c new file mode 120000 index 0000000..1f5852f --- /dev/null +++ b/kernel/kernel/xenomai/clock.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/debug.c b/kernel/kernel/xenomai/debug.c new file mode 120000 index 0000000..ecb182b --- /dev/null +++ b/kernel/kernel/xenomai/debug.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/debug.h b/kernel/kernel/xenomai/debug.h new file mode 120000 index 0000000..50a1185 --- /dev/null +++ b/kernel/kernel/xenomai/debug.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/heap.c b/kernel/kernel/xenomai/heap.c new file mode 120000 index 0000000..5295d18 --- /dev/null +++ b/kernel/kernel/xenomai/heap.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/init.c b/kernel/kernel/xenomai/init.c new file mode 120000 index 0000000..32bd592 --- /dev/null +++ b/kernel/kernel/xenomai/init.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/init.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/lock.c b/kernel/kernel/xenomai/lock.c new file mode 120000 index 0000000..7a952d9 --- /dev/null +++ b/kernel/kernel/xenomai/lock.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/map.c b/kernel/kernel/xenomai/map.c new file mode 120000 index 0000000..4f5c1ee --- /dev/null +++ b/kernel/kernel/xenomai/map.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/map.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipe.c b/kernel/kernel/xenomai/pipe.c new file mode 120000 index 0000000..bf8638c --- /dev/null +++ b/kernel/kernel/xenomai/pipe.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/Makefile b/kernel/kernel/xenomai/pipeline/Makefile new file mode 120000 index 0000000..d77990a --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/init.c b/kernel/kernel/xenomai/pipeline/init.c new file mode 120000 index 0000000..fb9680a --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/init.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/intr.c b/kernel/kernel/xenomai/pipeline/intr.c new file mode 120000 index 0000000..b461d69 --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/intr.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/kevents.c b/kernel/kernel/xenomai/pipeline/kevents.c new file mode 120000 index 0000000..ea6afff --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/kevents.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/sched.c b/kernel/kernel/xenomai/pipeline/sched.c new file mode 120000 index 0000000..e64ea1a --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/sched.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/syscall.c b/kernel/kernel/xenomai/pipeline/syscall.c new file mode 120000 index 0000000..0969e5b --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/syscall.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/pipeline/tick.c b/kernel/kernel/xenomai/pipeline/tick.c new file mode 120000 index 0000000..a008287 --- /dev/null +++ b/kernel/kernel/xenomai/pipeline/tick.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/Makefile b/kernel/kernel/xenomai/posix/Makefile new file mode 120000 index 0000000..b251962 --- /dev/null +++ b/kernel/kernel/xenomai/posix/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/clock.c b/kernel/kernel/xenomai/posix/clock.c new file mode 120000 index 0000000..a519dae --- /dev/null +++ b/kernel/kernel/xenomai/posix/clock.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/clock.h b/kernel/kernel/xenomai/posix/clock.h new file mode 120000 index 0000000..c22aef0 --- /dev/null +++ b/kernel/kernel/xenomai/posix/clock.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/compat.c b/kernel/kernel/xenomai/posix/compat.c new file mode 120000 index 0000000..11292df --- /dev/null +++ b/kernel/kernel/xenomai/posix/compat.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/cond.c b/kernel/kernel/xenomai/posix/cond.c new file mode 120000 index 0000000..50eef82 --- /dev/null +++ b/kernel/kernel/xenomai/posix/cond.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/cond.h b/kernel/kernel/xenomai/posix/cond.h new file mode 120000 index 0000000..cfb1e6e --- /dev/null +++ b/kernel/kernel/xenomai/posix/cond.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/corectl.c b/kernel/kernel/xenomai/posix/corectl.c new file mode 120000 index 0000000..0daec86 --- /dev/null +++ b/kernel/kernel/xenomai/posix/corectl.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/corectl.h b/kernel/kernel/xenomai/posix/corectl.h new file mode 120000 index 0000000..798a76d --- /dev/null +++ b/kernel/kernel/xenomai/posix/corectl.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/event.c b/kernel/kernel/xenomai/posix/event.c new file mode 120000 index 0000000..0dbd0e8 --- /dev/null +++ b/kernel/kernel/xenomai/posix/event.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/event.h b/kernel/kernel/xenomai/posix/event.h new file mode 120000 index 0000000..fd94213 --- /dev/null +++ b/kernel/kernel/xenomai/posix/event.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/extension.h b/kernel/kernel/xenomai/posix/extension.h new file mode 120000 index 0000000..12fa756 --- /dev/null +++ b/kernel/kernel/xenomai/posix/extension.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/gen-syscall-entries.sh b/kernel/kernel/xenomai/posix/gen-syscall-entries.sh new file mode 120000 index 0000000..51c613a --- /dev/null +++ b/kernel/kernel/xenomai/posix/gen-syscall-entries.sh @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/internal.h b/kernel/kernel/xenomai/posix/internal.h new file mode 120000 index 0000000..90e2524 --- /dev/null +++ b/kernel/kernel/xenomai/posix/internal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/io.c b/kernel/kernel/xenomai/posix/io.c new file mode 120000 index 0000000..55b863b --- /dev/null +++ b/kernel/kernel/xenomai/posix/io.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/io.h b/kernel/kernel/xenomai/posix/io.h new file mode 120000 index 0000000..2bafb42 --- /dev/null +++ b/kernel/kernel/xenomai/posix/io.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/memory.c b/kernel/kernel/xenomai/posix/memory.c new file mode 120000 index 0000000..5f70b5d --- /dev/null +++ b/kernel/kernel/xenomai/posix/memory.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/memory.h b/kernel/kernel/xenomai/posix/memory.h new file mode 120000 index 0000000..1799ae1 --- /dev/null +++ b/kernel/kernel/xenomai/posix/memory.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/monitor.c b/kernel/kernel/xenomai/posix/monitor.c new file mode 120000 index 0000000..7ea9e29 --- /dev/null +++ b/kernel/kernel/xenomai/posix/monitor.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/monitor.h b/kernel/kernel/xenomai/posix/monitor.h new file mode 120000 index 0000000..5504660 --- /dev/null +++ b/kernel/kernel/xenomai/posix/monitor.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/mqueue.c b/kernel/kernel/xenomai/posix/mqueue.c new file mode 120000 index 0000000..bde04fe --- /dev/null +++ b/kernel/kernel/xenomai/posix/mqueue.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/mqueue.h b/kernel/kernel/xenomai/posix/mqueue.h new file mode 120000 index 0000000..ed5ae91 --- /dev/null +++ b/kernel/kernel/xenomai/posix/mqueue.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/mutex.c b/kernel/kernel/xenomai/posix/mutex.c new file mode 120000 index 0000000..24f63a2 --- /dev/null +++ b/kernel/kernel/xenomai/posix/mutex.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/mutex.h b/kernel/kernel/xenomai/posix/mutex.h new file mode 120000 index 0000000..996eab0 --- /dev/null +++ b/kernel/kernel/xenomai/posix/mutex.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/nsem.c b/kernel/kernel/xenomai/posix/nsem.c new file mode 120000 index 0000000..8f07ded --- /dev/null +++ b/kernel/kernel/xenomai/posix/nsem.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/process.c b/kernel/kernel/xenomai/posix/process.c new file mode 120000 index 0000000..9ce4ea4 --- /dev/null +++ b/kernel/kernel/xenomai/posix/process.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/process.h b/kernel/kernel/xenomai/posix/process.h new file mode 120000 index 0000000..ed01222 --- /dev/null +++ b/kernel/kernel/xenomai/posix/process.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/sched.c b/kernel/kernel/xenomai/posix/sched.c new file mode 120000 index 0000000..389173c --- /dev/null +++ b/kernel/kernel/xenomai/posix/sched.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/sched.h b/kernel/kernel/xenomai/posix/sched.h new file mode 120000 index 0000000..cd48370 --- /dev/null +++ b/kernel/kernel/xenomai/posix/sched.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/sem.c b/kernel/kernel/xenomai/posix/sem.c new file mode 120000 index 0000000..1540be1 --- /dev/null +++ b/kernel/kernel/xenomai/posix/sem.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/sem.h b/kernel/kernel/xenomai/posix/sem.h new file mode 120000 index 0000000..134f807 --- /dev/null +++ b/kernel/kernel/xenomai/posix/sem.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/signal.c b/kernel/kernel/xenomai/posix/signal.c new file mode 120000 index 0000000..c3a7793 --- /dev/null +++ b/kernel/kernel/xenomai/posix/signal.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/signal.h b/kernel/kernel/xenomai/posix/signal.h new file mode 120000 index 0000000..6dfed5d --- /dev/null +++ b/kernel/kernel/xenomai/posix/signal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/syscall.c b/kernel/kernel/xenomai/posix/syscall.c new file mode 120000 index 0000000..5eab046 --- /dev/null +++ b/kernel/kernel/xenomai/posix/syscall.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/syscall.h b/kernel/kernel/xenomai/posix/syscall.h new file mode 120000 index 0000000..8760e9b --- /dev/null +++ b/kernel/kernel/xenomai/posix/syscall.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/syscall32.c b/kernel/kernel/xenomai/posix/syscall32.c new file mode 120000 index 0000000..036ba80 --- /dev/null +++ b/kernel/kernel/xenomai/posix/syscall32.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/syscall32.h b/kernel/kernel/xenomai/posix/syscall32.h new file mode 120000 index 0000000..7202c21 --- /dev/null +++ b/kernel/kernel/xenomai/posix/syscall32.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/syscall_entries.h b/kernel/kernel/xenomai/posix/syscall_entries.h new file mode 100644 index 0000000..8572390 --- /dev/null +++ b/kernel/kernel/xenomai/posix/syscall_entries.h @@ -0,0 +1,232 @@ +#define __COBALT_CALL_ENTRIES \ + __COBALT_CALL_ENTRY(migrate) \ + __COBALT_CALL_ENTRY(trace) \ + __COBALT_CALL_ENTRY(ftrace_puts) \ + __COBALT_CALL_ENTRY(archcall) \ + __COBALT_CALL_ENTRY(get_current) \ + __COBALT_CALL_ENTRY(backtrace) \ + __COBALT_CALL_ENTRY(serialdbg) \ + __COBALT_CALL_ENTRY(bind) \ + __COBALT_CALL_ENTRY(extend) \ + __COBALT_CALL_ENTRY(sched_minprio) \ + __COBALT_CALL_ENTRY(sched_maxprio) \ + __COBALT_CALL_ENTRY(sched_yield) \ + __COBALT_CALL_ENTRY(sched_setconfig_np) \ + __COBALT_CALL_ENTRY(sched_getconfig_np) \ + __COBALT_CALL_ENTRY(sched_weightprio) \ + __COBALT_CALL_ENTRY(sched_setscheduler_ex) \ + __COBALT_CALL_ENTRY(sched_getscheduler_ex) \ + __COBALT_CALL_ENTRY(timerfd_create) \ + __COBALT_CALL_ENTRY(timerfd_settime) \ + __COBALT_CALL_ENTRY(timerfd_gettime) \ + __COBALT_CALL_ENTRY(open) \ + __COBALT_CALL_ENTRY(socket) \ + __COBALT_CALL_ENTRY(close) \ + __COBALT_CALL_ENTRY(fcntl) \ + __COBALT_CALL_ENTRY(ioctl) \ + __COBALT_CALL_ENTRY(read) \ + __COBALT_CALL_ENTRY(write) \ + __COBALT_CALL_ENTRY(recvmsg) \ + __COBALT_CALL_ENTRY(recvmmsg) \ + __COBALT_CALL_ENTRY(recvmmsg64) \ + __COBALT_CALL_ENTRY(sendmsg) \ + __COBALT_CALL_ENTRY(sendmmsg) \ + __COBALT_CALL_ENTRY(mmap) \ + __COBALT_CALL_ENTRY(select) \ + __COBALT_CALL_ENTRY(sem_init) \ + __COBALT_CALL_ENTRY(sem_post) \ + __COBALT_CALL_ENTRY(sem_wait) \ + __COBALT_CALL_ENTRY(sem_timedwait) \ + __COBALT_CALL_ENTRY(sem_timedwait64) \ + __COBALT_CALL_ENTRY(sem_trywait) \ + __COBALT_CALL_ENTRY(sem_getvalue) \ + __COBALT_CALL_ENTRY(sem_destroy) \ + __COBALT_CALL_ENTRY(sem_broadcast_np) \ + __COBALT_CALL_ENTRY(sem_inquire) \ + __COBALT_CALL_ENTRY(mutex_check_init) \ + __COBALT_CALL_ENTRY(mutex_init) \ + __COBALT_CALL_ENTRY(mutex_destroy) \ + __COBALT_CALL_ENTRY(mutex_trylock) \ + __COBALT_CALL_ENTRY(mutex_lock) \ + __COBALT_CALL_ENTRY(mutex_timedlock) \ + __COBALT_CALL_ENTRY(mutex_timedlock64) \ + __COBALT_CALL_ENTRY(mutex_unlock) \ + __COBALT_CALL_ENTRY(event_init) \ + __COBALT_CALL_ENTRY(event_wait) \ + __COBALT_CALL_ENTRY(event_wait64) \ + __COBALT_CALL_ENTRY(event_sync) \ + __COBALT_CALL_ENTRY(event_destroy) \ + __COBALT_CALL_ENTRY(event_inquire) \ + __COBALT_CALL_ENTRY(mq_notify) \ + __COBALT_CALL_ENTRY(mq_open) \ + __COBALT_CALL_ENTRY(mq_close) \ + __COBALT_CALL_ENTRY(mq_unlink) \ + __COBALT_CALL_ENTRY(mq_getattr) \ + __COBALT_CALL_ENTRY(mq_timedsend) \ + __COBALT_CALL_ENTRY(mq_timedsend64) \ + __COBALT_CALL_ENTRY(mq_timedreceive) \ + __COBALT_CALL_ENTRY(mq_timedreceive64) \ + __COBALT_CALL_ENTRY(sigwait) \ + __COBALT_CALL_ENTRY(sigtimedwait) \ + __COBALT_CALL_ENTRY(sigtimedwait64) \ + __COBALT_CALL_ENTRY(sigwaitinfo) \ + __COBALT_CALL_ENTRY(sigpending) \ + __COBALT_CALL_ENTRY(kill) \ + __COBALT_CALL_ENTRY(sigqueue) \ + __COBALT_CALL_ENTRY(corectl) \ + __COBALT_CALL_ENTRY(cond_init) \ + __COBALT_CALL_ENTRY(cond_destroy) \ + __COBALT_CALL_ENTRY(cond_wait_prologue) \ + __COBALT_CALL_ENTRY(cond_wait_epilogue) \ + __COBALT_CALL_ENTRY(sem_open) \ + __COBALT_CALL_ENTRY(sem_close) \ + __COBALT_CALL_ENTRY(sem_unlink) \ + __COBALT_CALL_ENTRY(monitor_init) \ + __COBALT_CALL_ENTRY(monitor_enter) \ + __COBALT_CALL_ENTRY(monitor_wait) \ + __COBALT_CALL_ENTRY(monitor_wait64) \ + __COBALT_CALL_ENTRY(monitor_sync) \ + __COBALT_CALL_ENTRY(monitor_exit) \ + __COBALT_CALL_ENTRY(monitor_destroy) \ + __COBALT_CALL_ENTRY(clock_getres) \ + __COBALT_CALL_ENTRY(clock_getres64) \ + __COBALT_CALL_ENTRY(clock_gettime) \ + __COBALT_CALL_ENTRY(clock_gettime64) \ + __COBALT_CALL_ENTRY(clock_settime) \ + __COBALT_CALL_ENTRY(clock_settime64) \ + __COBALT_CALL_ENTRY(clock_adjtime) \ + __COBALT_CALL_ENTRY(clock_adjtime64) \ + __COBALT_CALL_ENTRY(clock_nanosleep) \ + __COBALT_CALL_ENTRY(clock_nanosleep64) \ + __COBALT_CALL_ENTRY(thread_setschedparam_ex) \ + __COBALT_CALL_ENTRY(thread_getschedparam_ex) \ + __COBALT_CALL_ENTRY(thread_setschedprio) \ + __COBALT_CALL_ENTRY(thread_create) \ + __COBALT_CALL_ENTRY(thread_setmode) \ + __COBALT_CALL_ENTRY(thread_setname) \ + __COBALT_CALL_ENTRY(thread_kill) \ + __COBALT_CALL_ENTRY(thread_join) \ + __COBALT_CALL_ENTRY(thread_getpid) \ + __COBALT_CALL_ENTRY(thread_getstat) \ + __COBALT_CALL_ENTRY(timer_delete) \ + __COBALT_CALL_ENTRY(timer_create) \ + __COBALT_CALL_ENTRY(timer_settime) \ + __COBALT_CALL_ENTRY(timer_gettime) \ + __COBALT_CALL_ENTRY(timer_getoverrun) \ + /* end */ +#define __COBALT_CALL_MODES \ + __COBALT_MODE(migrate, current) \ + __COBALT_MODE(trace, current) \ + __COBALT_MODE(ftrace_puts, current) \ + __COBALT_MODE(archcall, current) \ + __COBALT_MODE(get_current, current) \ + __COBALT_MODE(backtrace, lostage) \ + __COBALT_MODE(serialdbg, current) \ + __COBALT_MODE(bind, lostage) \ + __COBALT_MODE(extend, lostage) \ + __COBALT_MODE(sched_minprio, current) \ + __COBALT_MODE(sched_maxprio, current) \ + __COBALT_MODE(sched_yield, primary) \ + __COBALT_MODE(sched_setconfig_np, conforming) \ + __COBALT_MODE(sched_getconfig_np, conforming) \ + __COBALT_MODE(sched_weightprio, current) \ + __COBALT_MODE(sched_setscheduler_ex, conforming) \ + __COBALT_MODE(sched_getscheduler_ex, current) \ + __COBALT_MODE(timerfd_create, lostage) \ + __COBALT_MODE(timerfd_settime, primary) \ + __COBALT_MODE(timerfd_gettime, current) \ + __COBALT_MODE(open, lostage) \ + __COBALT_MODE(socket, lostage) \ + __COBALT_MODE(close, lostage) \ + __COBALT_MODE(fcntl, current) \ + __COBALT_MODE(ioctl, handover) \ + __COBALT_MODE(read, handover) \ + __COBALT_MODE(write, handover) \ + __COBALT_MODE(recvmsg, handover) \ + __COBALT_MODE(recvmmsg, primary) \ + __COBALT_MODE(recvmmsg64, primary) \ + __COBALT_MODE(sendmsg, handover) \ + __COBALT_MODE(sendmmsg, primary) \ + __COBALT_MODE(mmap, lostage) \ + __COBALT_MODE(select, primary) \ + __COBALT_MODE(sem_init, current) \ + __COBALT_MODE(sem_post, current) \ + __COBALT_MODE(sem_wait, primary) \ + __COBALT_MODE(sem_timedwait, primary) \ + __COBALT_MODE(sem_timedwait64, primary) \ + __COBALT_MODE(sem_trywait, primary) \ + __COBALT_MODE(sem_getvalue, current) \ + __COBALT_MODE(sem_destroy, current) \ + __COBALT_MODE(sem_broadcast_np, current) \ + __COBALT_MODE(sem_inquire, current) \ + __COBALT_MODE(mutex_check_init, current) \ + __COBALT_MODE(mutex_init, current) \ + __COBALT_MODE(mutex_destroy, current) \ + __COBALT_MODE(mutex_trylock, primary) \ + __COBALT_MODE(mutex_lock, primary) \ + __COBALT_MODE(mutex_timedlock, primary) \ + __COBALT_MODE(mutex_timedlock64, primary) \ + __COBALT_MODE(mutex_unlock, nonrestartable) \ + __COBALT_MODE(event_init, current) \ + __COBALT_MODE(event_wait, primary) \ + __COBALT_MODE(event_wait64, primary) \ + __COBALT_MODE(event_sync, current) \ + __COBALT_MODE(event_destroy, current) \ + __COBALT_MODE(event_inquire, current) \ + __COBALT_MODE(mq_notify, primary) \ + __COBALT_MODE(mq_open, lostage) \ + __COBALT_MODE(mq_close, lostage) \ + __COBALT_MODE(mq_unlink, lostage) \ + __COBALT_MODE(mq_getattr, current) \ + __COBALT_MODE(mq_timedsend, primary) \ + __COBALT_MODE(mq_timedsend64, primary) \ + __COBALT_MODE(mq_timedreceive, primary) \ + __COBALT_MODE(mq_timedreceive64, primary) \ + __COBALT_MODE(sigwait, primary) \ + __COBALT_MODE(sigtimedwait, nonrestartable) \ + __COBALT_MODE(sigtimedwait64, nonrestartable) \ + __COBALT_MODE(sigwaitinfo, nonrestartable) \ + __COBALT_MODE(sigpending, primary) \ + __COBALT_MODE(kill, conforming) \ + __COBALT_MODE(sigqueue, conforming) \ + __COBALT_MODE(corectl, probing) \ + __COBALT_MODE(cond_init, current) \ + __COBALT_MODE(cond_destroy, current) \ + __COBALT_MODE(cond_wait_prologue, nonrestartable) \ + __COBALT_MODE(cond_wait_epilogue, primary) \ + __COBALT_MODE(sem_open, lostage) \ + __COBALT_MODE(sem_close, lostage) \ + __COBALT_MODE(sem_unlink, lostage) \ + __COBALT_MODE(monitor_init, current) \ + __COBALT_MODE(monitor_enter, primary) \ + __COBALT_MODE(monitor_wait, nonrestartable) \ + __COBALT_MODE(monitor_wait64, nonrestartable) \ + __COBALT_MODE(monitor_sync, nonrestartable) \ + __COBALT_MODE(monitor_exit, primary) \ + __COBALT_MODE(monitor_destroy, primary) \ + __COBALT_MODE(clock_getres, current) \ + __COBALT_MODE(clock_getres64, current) \ + __COBALT_MODE(clock_gettime, current) \ + __COBALT_MODE(clock_gettime64, current) \ + __COBALT_MODE(clock_settime, current) \ + __COBALT_MODE(clock_settime64, current) \ + __COBALT_MODE(clock_adjtime, current) \ + __COBALT_MODE(clock_adjtime64, current) \ + __COBALT_MODE(clock_nanosleep, primary) \ + __COBALT_MODE(clock_nanosleep64, primary) \ + __COBALT_MODE(thread_setschedparam_ex, conforming) \ + __COBALT_MODE(thread_getschedparam_ex, current) \ + __COBALT_MODE(thread_setschedprio, conforming) \ + __COBALT_MODE(thread_create, init) \ + __COBALT_MODE(thread_setmode, primary) \ + __COBALT_MODE(thread_setname, current) \ + __COBALT_MODE(thread_kill, conforming) \ + __COBALT_MODE(thread_join, primary) \ + __COBALT_MODE(thread_getpid, current) \ + __COBALT_MODE(thread_getstat, current) \ + __COBALT_MODE(timer_delete, current) \ + __COBALT_MODE(timer_create, current) \ + __COBALT_MODE(timer_settime, primary) \ + __COBALT_MODE(timer_gettime, current) \ + __COBALT_MODE(timer_getoverrun, current) \ + /* end */ diff --git a/kernel/kernel/xenomai/posix/thread.c b/kernel/kernel/xenomai/posix/thread.c new file mode 120000 index 0000000..b163b92 --- /dev/null +++ b/kernel/kernel/xenomai/posix/thread.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/thread.h b/kernel/kernel/xenomai/posix/thread.h new file mode 120000 index 0000000..e887d4f --- /dev/null +++ b/kernel/kernel/xenomai/posix/thread.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/timer.c b/kernel/kernel/xenomai/posix/timer.c new file mode 120000 index 0000000..237cc02 --- /dev/null +++ b/kernel/kernel/xenomai/posix/timer.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/timer.h b/kernel/kernel/xenomai/posix/timer.h new file mode 120000 index 0000000..dd06406 --- /dev/null +++ b/kernel/kernel/xenomai/posix/timer.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/timerfd.c b/kernel/kernel/xenomai/posix/timerfd.c new file mode 120000 index 0000000..f76a00a --- /dev/null +++ b/kernel/kernel/xenomai/posix/timerfd.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/posix/timerfd.h b/kernel/kernel/xenomai/posix/timerfd.h new file mode 120000 index 0000000..7d95e73 --- /dev/null +++ b/kernel/kernel/xenomai/posix/timerfd.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/procfs.c b/kernel/kernel/xenomai/procfs.c new file mode 120000 index 0000000..4056fd1 --- /dev/null +++ b/kernel/kernel/xenomai/procfs.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/procfs.h b/kernel/kernel/xenomai/procfs.h new file mode 120000 index 0000000..fd1d435 --- /dev/null +++ b/kernel/kernel/xenomai/procfs.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/registry.c b/kernel/kernel/xenomai/registry.c new file mode 120000 index 0000000..d33780e --- /dev/null +++ b/kernel/kernel/xenomai/registry.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/Makefile b/kernel/kernel/xenomai/rtdm/Makefile new file mode 120000 index 0000000..ba00841 --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/Makefile @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/core.c b/kernel/kernel/xenomai/rtdm/core.c new file mode 120000 index 0000000..bb434a7 --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/core.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/device.c b/kernel/kernel/xenomai/rtdm/device.c new file mode 120000 index 0000000..4fc6518 --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/device.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/drvlib.c b/kernel/kernel/xenomai/rtdm/drvlib.c new file mode 120000 index 0000000..2583b7b --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/drvlib.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/fd.c b/kernel/kernel/xenomai/rtdm/fd.c new file mode 120000 index 0000000..3ea703a --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/fd.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/internal.h b/kernel/kernel/xenomai/rtdm/internal.h new file mode 120000 index 0000000..7a64daa --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/internal.h @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h \ No newline at end of file diff --git a/kernel/kernel/xenomai/rtdm/wrappers.c b/kernel/kernel/xenomai/rtdm/wrappers.c new file mode 120000 index 0000000..75b2d7a --- /dev/null +++ b/kernel/kernel/xenomai/rtdm/wrappers.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched-idle.c b/kernel/kernel/xenomai/sched-idle.c new file mode 120000 index 0000000..696b902 --- /dev/null +++ b/kernel/kernel/xenomai/sched-idle.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched-quota.c b/kernel/kernel/xenomai/sched-quota.c new file mode 120000 index 0000000..7069986 --- /dev/null +++ b/kernel/kernel/xenomai/sched-quota.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched-rt.c b/kernel/kernel/xenomai/sched-rt.c new file mode 120000 index 0000000..598d169 --- /dev/null +++ b/kernel/kernel/xenomai/sched-rt.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched-sporadic.c b/kernel/kernel/xenomai/sched-sporadic.c new file mode 120000 index 0000000..e9c647e --- /dev/null +++ b/kernel/kernel/xenomai/sched-sporadic.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched-tp.c b/kernel/kernel/xenomai/sched-tp.c new file mode 120000 index 0000000..63f2fd3 --- /dev/null +++ b/kernel/kernel/xenomai/sched-tp.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched-weak.c b/kernel/kernel/xenomai/sched-weak.c new file mode 120000 index 0000000..795eb2b --- /dev/null +++ b/kernel/kernel/xenomai/sched-weak.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/sched.c b/kernel/kernel/xenomai/sched.c new file mode 120000 index 0000000..501961b --- /dev/null +++ b/kernel/kernel/xenomai/sched.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/select.c b/kernel/kernel/xenomai/select.c new file mode 120000 index 0000000..df93574 --- /dev/null +++ b/kernel/kernel/xenomai/select.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/select.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/synch.c b/kernel/kernel/xenomai/synch.c new file mode 120000 index 0000000..9ac2df7 --- /dev/null +++ b/kernel/kernel/xenomai/synch.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/thread.c b/kernel/kernel/xenomai/thread.c new file mode 120000 index 0000000..339f26e --- /dev/null +++ b/kernel/kernel/xenomai/thread.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/time.c b/kernel/kernel/xenomai/time.c new file mode 120000 index 0000000..c414ee1 --- /dev/null +++ b/kernel/kernel/xenomai/time.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/time.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/timer.c b/kernel/kernel/xenomai/timer.c new file mode 120000 index 0000000..2aad719 --- /dev/null +++ b/kernel/kernel/xenomai/timer.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/tree.c b/kernel/kernel/xenomai/tree.c new file mode 120000 index 0000000..64e1e4e --- /dev/null +++ b/kernel/kernel/xenomai/tree.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c \ No newline at end of file diff --git a/kernel/kernel/xenomai/vfile.c b/kernel/kernel/xenomai/vfile.c new file mode 120000 index 0000000..4d992e0 --- /dev/null +++ b/kernel/kernel/xenomai/vfile.c @@ -0,0 +1 @@ +/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c \ No newline at end of file diff --git a/kernel/lib/Kconfig.debug b/kernel/lib/Kconfig.debug index 8608332..77c03b3 100644 --- a/kernel/lib/Kconfig.debug +++ b/kernel/lib/Kconfig.debug @@ -897,6 +897,38 @@ is currently disabled). Drivers need to handle this correctly. Some don't and need to be caught. +config DEBUG_IRQ_PIPELINE + bool "Debug IRQ pipeline" + depends on IRQ_PIPELINE && DEBUG_KERNEL + help + Turn on this option for enabling debug checks related to + interrupt pipelining, like interrupt state consistency and + proper context isolation between the in-band and oob stages. + + If unsure, say N. + +config IRQ_PIPELINE_TORTURE_TEST + bool "Torture tests for IRQ pipeline" + depends on DEBUG_IRQ_PIPELINE + select TORTURE_TEST + default n + help + This option provides a kernel module that runs torture tests + on the IRQ pipeline mechanism. + + Say Y here if you want the IRQ pipeline torture tests to run + when the kernel starts. Say N if you are unsure. + +config DEBUG_DOVETAIL + bool "Debug Dovetail interface" + depends on DOVETAIL && DEBUG_KERNEL + select DEBUG_IRQ_PIPELINE + help + Turn on this option for enabling debug checks related to + running a dual kernel configuration, aka dovetailing. This + option implicitly enables the interrupt pipeline debugging + features. + menu "Debug Oops, Lockups and Hangs" config PANIC_ON_OOPS @@ -1315,6 +1347,27 @@ spin_lock_init()/mutex_init()/etc., or whether there is any lock held during task exit. +config DEBUG_HARD_LOCKS + bool "Debug hard spinlocks" + depends on DEBUG_IRQ_PIPELINE && LOCKDEP && EXPERT + help + Turn on this option for enabling LOCKDEP for hard spinlock + types used in interrupt pipelining. + + Keep in mind that enabling such feature will ruin the + latency figures for any out-of-band code, this is merely + useful for proving the correctness of the locking scheme of + such code without any consideration for real-time + guarantees. You have been warned. + + If unsure, say N. + +if DEBUG_HARD_LOCKS +comment "WARNING! DEBUG_HARD_LOCKS induces **massive** latency" +comment "overhead for the code running on the out-of-band" +comment "interrupt stage." +endif + config LOCKDEP bool depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT diff --git a/kernel/lib/atomic64.c b/kernel/lib/atomic64.c index e98c85a..bf7d040 100644 --- a/kernel/lib/atomic64.c +++ b/kernel/lib/atomic64.c @@ -25,15 +25,15 @@ * Ensure each lock is in a separate cacheline. */ static union { - raw_spinlock_t lock; + hard_spinlock_t lock; char pad[L1_CACHE_BYTES]; } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { [0 ... (NR_LOCKS - 1)] = { - .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), + .lock = __HARD_SPIN_LOCK_INITIALIZER(atomic64_lock.lock), }, }; -static inline raw_spinlock_t *lock_addr(const atomic64_t *v) +static inline hard_spinlock_t *lock_addr(const atomic64_t *v) { unsigned long addr = (unsigned long) v; @@ -45,7 +45,7 @@ s64 atomic64_read(const atomic64_t *v) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + hard_spinlock_t *lock = lock_addr(v); s64 val; raw_spin_lock_irqsave(lock, flags); @@ -58,7 +58,7 @@ void atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + hard_spinlock_t *lock = lock_addr(v); raw_spin_lock_irqsave(lock, flags); v->counter = i; @@ -70,7 +70,7 @@ void atomic64_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + hard_spinlock_t *lock = lock_addr(v); \ \ raw_spin_lock_irqsave(lock, flags); \ v->counter c_op a; \ @@ -82,7 +82,7 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + hard_spinlock_t *lock = lock_addr(v); \ s64 val; \ \ raw_spin_lock_irqsave(lock, flags); \ @@ -96,7 +96,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + hard_spinlock_t *lock = lock_addr(v); \ s64 val; \ \ raw_spin_lock_irqsave(lock, flags); \ @@ -133,7 +133,7 @@ s64 atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + hard_spinlock_t *lock = lock_addr(v); s64 val; raw_spin_lock_irqsave(lock, flags); @@ -148,7 +148,7 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + hard_spinlock_t *lock = lock_addr(v); s64 val; raw_spin_lock_irqsave(lock, flags); @@ -163,7 +163,7 @@ s64 atomic64_xchg(atomic64_t *v, s64 new) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + hard_spinlock_t *lock = lock_addr(v); s64 val; raw_spin_lock_irqsave(lock, flags); @@ -177,7 +177,7 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + hard_spinlock_t *lock = lock_addr(v); s64 val; raw_spin_lock_irqsave(lock, flags); diff --git a/kernel/lib/dump_stack.c b/kernel/lib/dump_stack.c index b9acd9c..aed74e3 100644 --- a/kernel/lib/dump_stack.c +++ b/kernel/lib/dump_stack.c @@ -9,9 +9,11 @@ #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/smp.h> +#include <linux/irqstage.h> #include <linux/atomic.h> #include <linux/kexec.h> #include <linux/utsname.h> +#include <linux/hardirq.h> static char dump_stack_arch_desc_str[128]; @@ -56,6 +58,11 @@ printk("%sHardware name: %s\n", log_lvl, dump_stack_arch_desc_str); +#ifdef CONFIG_IRQ_PIPELINE + printk("%sIRQ stage: %s\n", + log_lvl, current_irq_stage->name); +#endif + print_worker_info(log_lvl, current); } @@ -85,6 +92,29 @@ #ifdef CONFIG_SMP static atomic_t dump_lock = ATOMIC_INIT(-1); +static unsigned long disable_local_irqs(void) +{ + unsigned long flags = 0; /* only to trick the UMR detection */ + + /* + * We neither need nor want to disable in-band IRQs over the + * oob stage, where CPU migration can't happen. Conversely, we + * neither need nor want to disable hard IRQs from the oob + * stage, so that latency won't skyrocket as a result of + * dumping the stack backtrace. + */ + if (running_inband() && !on_pipeline_entry()) + local_irq_save(flags); + + return flags; +} + +static void restore_local_irqs(unsigned long flags) +{ + if (running_inband() && !on_pipeline_entry()) + local_irq_restore(flags); +} + asmlinkage __visible void dump_stack_lvl(const char *log_lvl) { unsigned long flags; @@ -97,7 +127,7 @@ * against other CPUs */ retry: - local_irq_save(flags); + flags = disable_local_irqs(); cpu = smp_processor_id(); old = atomic_cmpxchg(&dump_lock, -1, cpu); if (old == -1) { @@ -105,7 +135,7 @@ } else if (old == cpu) { was_locked = 1; } else { - local_irq_restore(flags); + restore_local_irqs(flags); /* * Wait for the lock to release before jumping to * atomic_cmpxchg() in order to mitigate the thundering herd @@ -120,7 +150,7 @@ if (!was_locked) atomic_set(&dump_lock, -1); - local_irq_restore(flags); + restore_local_irqs(flags); } #else asmlinkage __visible void dump_stack_lvl(const char *log_lvl) diff --git a/kernel/lib/smp_processor_id.c b/kernel/lib/smp_processor_id.c index 2916606..952e2ad 100644 --- a/kernel/lib/smp_processor_id.c +++ b/kernel/lib/smp_processor_id.c @@ -7,12 +7,16 @@ #include <linux/export.h> #include <linux/kprobes.h> #include <linux/sched.h> +#include <linux/irqstage.h> noinstr static unsigned int check_preemption_disabled(const char *what1, const char *what2) { int this_cpu = raw_smp_processor_id(); + if (hard_irqs_disabled() || !running_inband()) + goto out; + if (likely(preempt_count())) goto out; diff --git a/kernel/lib/vdso/Kconfig b/kernel/lib/vdso/Kconfig index d883ac2..7b327e1 100644 --- a/kernel/lib/vdso/Kconfig +++ b/kernel/lib/vdso/Kconfig @@ -30,4 +30,12 @@ Selected by architectures which support time namespaces in the VDSO +config GENERIC_CLOCKSOURCE_VDSO + depends on ARM || ARM64 + select CLKSRC_MMIO + bool + help + Enables access to clocksources via the vDSO based on + generic MMIO operations. + endif diff --git a/kernel/lib/vdso/gettimeofday.c b/kernel/lib/vdso/gettimeofday.c index c6f6dee..57a5627 100644 --- a/kernel/lib/vdso/gettimeofday.c +++ b/kernel/lib/vdso/gettimeofday.c @@ -5,6 +5,245 @@ #include <vdso/datapage.h> #include <vdso/helpers.h> +static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, + struct __kernel_timespec *ts); + +#ifndef vdso_clocksource_ok +static inline bool vdso_clocksource_ok(const struct vdso_data *vd) +{ + return vd->clock_mode != VDSO_CLOCKMODE_NONE; +} +#endif + +#ifndef vdso_cycles_ok +static inline bool vdso_cycles_ok(u64 cycles) +{ + return true; +} +#endif + +#if defined(CONFIG_GENERIC_CLOCKSOURCE_VDSO) && !defined(BUILD_VDSO32) + +#include <linux/fcntl.h> +#include <linux/io.h> +#include <linux/ioctl.h> +#include <uapi/linux/clocksource.h> + +static notrace u64 readl_mmio_up(const struct clksrc_info *vinfo) +{ + const struct clksrc_user_mmio_info *info = &vinfo->mmio; + return readl_relaxed(info->reg_lower); +} + +static notrace u64 readl_mmio_down(const struct clksrc_info *vinfo) +{ + const struct clksrc_user_mmio_info *info = &vinfo->mmio; + return ~(u64)readl_relaxed(info->reg_lower) & info->mask_lower; +} + +static notrace u64 readw_mmio_up(const struct clksrc_info *vinfo) +{ + const struct clksrc_user_mmio_info *info = &vinfo->mmio; + return readw_relaxed(info->reg_lower); +} + +static notrace u64 readw_mmio_down(const struct clksrc_info *vinfo) +{ + const struct clksrc_user_mmio_info *info = &vinfo->mmio; + return ~(u64)readl_relaxed(info->reg_lower) & info->mask_lower; +} + +static notrace u64 readl_dmmio_up(const struct clksrc_info *vinfo) +{ + const struct clksrc_user_mmio_info *info = &vinfo->mmio; + void __iomem *reg_lower, *reg_upper; + u32 upper, old_upper, lower; + + reg_lower = info->reg_lower; + reg_upper = info->reg_upper; + + upper = readl_relaxed(reg_upper); + do { + old_upper = upper; + lower = readl_relaxed(reg_lower); + upper = readl_relaxed(reg_upper); + } while (upper != old_upper); + + return (((u64)upper) << info->bits_lower) | lower; +} + +static notrace u64 readw_dmmio_up(const struct clksrc_info *vinfo) +{ + const struct clksrc_user_mmio_info *info = &vinfo->mmio; + void __iomem *reg_lower, *reg_upper; + u16 upper, old_upper, lower; + + reg_lower = info->reg_lower; + reg_upper = info->reg_upper; + + upper = readw_relaxed(reg_upper); + do { + old_upper = upper; + lower = readw_relaxed(reg_lower); + upper = readw_relaxed(reg_upper); + } while (upper != old_upper); + + return (((u64)upper) << info->bits_lower) | lower; +} + +static notrace __cold vdso_read_cycles_t *get_mmio_read_cycles(unsigned int type) +{ + switch (type) { + case CLKSRC_MMIO_L_UP: + return &readl_mmio_up; + case CLKSRC_MMIO_L_DOWN: + return &readl_mmio_down; + case CLKSRC_MMIO_W_UP: + return &readw_mmio_up; + case CLKSRC_MMIO_W_DOWN: + return &readw_mmio_down; + case CLKSRC_DMMIO_L_UP: + return &readl_dmmio_up; + case CLKSRC_DMMIO_W_UP: + return &readw_dmmio_up; + default: + return NULL; + } +} + +static __always_inline u16 to_cs_type(u32 cs_type_seq) +{ + return cs_type_seq >> 16; +} + +static __always_inline u16 to_seq(u32 cs_type_seq) +{ + return cs_type_seq; +} + +static __always_inline u32 to_cs_type_seq(u16 type, u16 seq) +{ + return (u32)type << 16U | seq; +} + +static notrace noinline __cold +void map_clocksource(const struct vdso_data *vd, struct vdso_priv *vp, + u32 seq, u32 new_cs_type_seq) +{ + vdso_read_cycles_t *read_cycles = NULL; + u32 new_cs_seq, new_cs_type; + struct clksrc_info *info; + int fd, ret; + + new_cs_seq = to_seq(new_cs_type_seq); + new_cs_type = to_cs_type(new_cs_type_seq); + info = &vp->clksrc_info[new_cs_type]; + + if (new_cs_type < CLOCKSOURCE_VDSO_MMIO) + goto done; + + fd = clock_open_device(vd->cs_mmdev, O_RDONLY); + if (fd < 0) + goto fallback_to_syscall; + + if (vdso_read_retry(vd, seq)) { + vdso_read_begin(vd); + if (to_seq(vd->cs_type_seq) != new_cs_seq) { + /* + * cs_mmdev no longer corresponds to + * vd->cs_type_seq. + */ + clock_close_device(fd); + return; + } + } + + ret = clock_ioctl_device(fd, CLKSRC_USER_MMIO_MAP, (long)&info->mmio); + clock_close_device(fd); + if (ret < 0) + goto fallback_to_syscall; + + read_cycles = get_mmio_read_cycles(info->mmio.type); + if (read_cycles == NULL) /* Mmhf, misconfigured. */ + goto fallback_to_syscall; +done: + info->read_cycles = read_cycles; + smp_wmb(); + new_cs_type_seq = to_cs_type_seq(new_cs_type, new_cs_seq); + WRITE_ONCE(vp->current_cs_type_seq, new_cs_type_seq); + + return; + +fallback_to_syscall: + new_cs_type = CLOCKSOURCE_VDSO_NONE; + info = &vp->clksrc_info[new_cs_type]; + goto done; +} + +static inline notrace +bool get_hw_counter(const struct vdso_data *vd, u32 *r_seq, u64 *cycles) +{ + const struct clksrc_info *info; + struct vdso_priv *vp; + u32 seq, cs_type_seq; + unsigned int cs; + + vp = __arch_get_vdso_priv(); + + for (;;) { + seq = vdso_read_begin(vd); + cs_type_seq = READ_ONCE(vp->current_cs_type_seq); + if (likely(to_seq(cs_type_seq) == to_seq(vd->cs_type_seq))) + break; + + map_clocksource(vd, vp, seq, vd->cs_type_seq); + } + + switch (to_cs_type(cs_type_seq)) { + case CLOCKSOURCE_VDSO_NONE: + return false; /* Use fallback. */ + case CLOCKSOURCE_VDSO_ARCHITECTED: + if (unlikely(!vdso_clocksource_ok(vd))) + return false; + *cycles = __arch_get_hw_counter(vd->clock_mode, vd); + if (unlikely(!vdso_cycles_ok(*cycles))) + return false; + break; + default: + cs = to_cs_type(READ_ONCE(cs_type_seq)); + info = &vp->clksrc_info[cs]; + *cycles = info->read_cycles(info); + break; + } + + *r_seq = seq; + + return true; +} + +#else + +static inline notrace +bool get_hw_counter(const struct vdso_data *vd, u32 *r_seq, u64 *cycles) +{ + *r_seq = vdso_read_begin(vd); + + /* + * CAUTION: checking the clocksource mode must happen inside + * the seqlocked section. + */ + if (unlikely(!vdso_clocksource_ok(vd))) + return false; + + *cycles = __arch_get_hw_counter(vd->clock_mode, vd); + if (unlikely(!vdso_cycles_ok(*cycles))) + return false; + + return true; +} + +#endif /* CONFIG_GENERIC_CLOCKSOURCE_VDSO */ + #ifndef vdso_calc_delta /* * Default implementation which works for all sane clocksources. That @@ -31,20 +270,6 @@ } #endif -#ifndef vdso_clocksource_ok -static inline bool vdso_clocksource_ok(const struct vdso_data *vd) -{ - return vd->clock_mode != VDSO_CLOCKMODE_NONE; -} -#endif - -#ifndef vdso_cycles_ok -static inline bool vdso_cycles_ok(u64 cycles) -{ - return true; -} -#endif - #ifdef CONFIG_TIME_NS static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, struct __kernel_timespec *ts) @@ -63,13 +288,7 @@ vdso_ts = &vd->basetime[clk]; do { - seq = vdso_read_begin(vd); - - if (unlikely(!vdso_clocksource_ok(vd))) - return -1; - - cycles = __arch_get_hw_counter(vd->clock_mode, vd); - if (unlikely(!vdso_cycles_ok(cycles))) + if (!get_hw_counter(vd, &seq, &cycles)) return -1; ns = vdso_ts->nsec; last = vd->cycle_last; @@ -117,30 +336,29 @@ do { /* - * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace - * enabled tasks have a special VVAR page installed which - * has vd->seq set to 1 and vd->clock_mode set to - * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks - * this does not affect performance because if vd->seq is - * odd, i.e. a concurrent update is in progress the extra + * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time + * namespace enabled tasks have a special VVAR page + * installed which has vd->seq set to 1 and + * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For + * non time namespace affected tasks this does not + * affect performance because if vd->seq is odd, + * i.e. a concurrent update is in progress the extra * check for vd->clock_mode is just a few extra - * instructions while spin waiting for vd->seq to become - * even again. + * instructions while spin waiting for vd->seq to + * become even again. */ while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) { if (IS_ENABLED(CONFIG_TIME_NS) && - vd->clock_mode == VDSO_CLOCKMODE_TIMENS) + vd->clock_mode == VDSO_CLOCKMODE_TIMENS) return do_hres_timens(vd, clk, ts); cpu_relax(); } + smp_rmb(); - if (unlikely(!vdso_clocksource_ok(vd))) + if (!get_hw_counter(vd, &seq, &cycles)) return -1; - cycles = __arch_get_hw_counter(vd->clock_mode, vd); - if (unlikely(!vdso_cycles_ok(cycles))) - return -1; ns = vdso_ts->nsec; last = vd->cycle_last; ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); diff --git a/kernel/mm/ioremap.c b/kernel/mm/ioremap.c index 5fa1ab4..4071aa6 100644 --- a/kernel/mm/ioremap.c +++ b/kernel/mm/ioremap.c @@ -241,6 +241,7 @@ break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); + arch_advertise_page_mapping(start, end); flush_cache_vmap(start, end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) diff --git a/kernel/mm/kasan/report.c b/kernel/mm/kasan/report.c index 8fff182..ea779a4 100644 --- a/kernel/mm/kasan/report.c +++ b/kernel/mm/kasan/report.c @@ -73,7 +73,7 @@ info->access_addr, current->comm, task_pid_nr(current)); } -static DEFINE_SPINLOCK(report_lock); +static DEFINE_HARD_SPINLOCK(report_lock); static void start_report(unsigned long *flags) { @@ -81,7 +81,7 @@ * Make sure we don't end up in loop. */ kasan_disable_current(); - spin_lock_irqsave(&report_lock, *flags); + raw_spin_lock_irqsave(&report_lock, *flags); pr_err("==================================================================\n"); } @@ -91,7 +91,7 @@ trace_error_report_end(ERROR_DETECTOR_KASAN, addr); pr_err("==================================================================\n"); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) { /* * This thread may hit another WARN() in the panic path. diff --git a/kernel/mm/memory.c b/kernel/mm/memory.c index 834078a..51ec122 100644 --- a/kernel/mm/memory.c +++ b/kernel/mm/memory.c @@ -845,6 +845,14 @@ return 1; /* + * If the source mm belongs to a Dovetail-enabled process, we + * don't want to impose the COW-induced latency on it: make + * sure the child gets its own copy of the page. + */ + if (dovetailing() && test_bit(MMF_DOVETAILED, &src_mm->flags)) + goto do_copy; + + /* * What we want to do is to check whether this page may * have been pinned by the parent process. If so, * instead of wrprotect the pte on both sides, we copy @@ -862,6 +870,7 @@ if (likely(!page_maybe_dma_pinned(page))) return 1; +do_copy: /* * The vma->anon_vma of the child process may be NULL * because the entire vma does not contain anonymous pages. @@ -5696,6 +5705,15 @@ void __might_fault(const char *file, int line) { /* + * When running over the oob stage (e.g. some co-kernel's own + * thread), we should only make sure to run with hw IRQs + * enabled before accessing the memory. + */ + if (running_oob()) { + WARN_ON_ONCE(hard_irqs_disabled()); + return; + } + /* * Some code (nfs/sunrpc) uses socket ops on kernel memory while * holding the mmap_lock, this is safe because kernel memory doesn't * get paged out, therefore we'll never actually fault, and the diff --git a/kernel/mm/mprotect.c b/kernel/mm/mprotect.c index c1c3315..2015be9 100644 --- a/kernel/mm/mprotect.c +++ b/kernel/mm/mprotect.c @@ -41,7 +41,7 @@ { pte_t *pte, oldpte; spinlock_t *ptl; - unsigned long pages = 0; + unsigned long pages = 0, flags; int target_node = NUMA_NO_NODE; bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT; bool prot_numa = cp_flags & MM_CP_PROT_NUMA; @@ -113,6 +113,7 @@ continue; } + flags = hard_local_irq_save(); oldpte = ptep_modify_prot_start(vma, addr, pte); ptent = pte_modify(oldpte, newprot); if (preserve_write) @@ -138,6 +139,7 @@ ptent = pte_mkwrite(ptent); } ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); + hard_local_irq_restore(flags); pages++; } else if (is_swap_pte(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); diff --git a/kernel/mm/vmalloc.c b/kernel/mm/vmalloc.c index 3b56c30..7fccdee 100644 --- a/kernel/mm/vmalloc.c +++ b/kernel/mm/vmalloc.c @@ -272,6 +272,10 @@ return 0; } +void __weak arch_advertise_page_mapping(unsigned long start, unsigned long end) +{ +} + /** * map_kernel_range_noflush - map kernel VM area with the specified pages * @addr: start of the VM area to map @@ -315,6 +319,8 @@ if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); + arch_advertise_page_mapping(start, end); + return 0; } diff --git a/kernel/modules-only.symvers b/kernel/modules-only.symvers index 59b0cfa..c3863bf 100644 --- a/kernel/modules-only.symvers +++ b/kernel/modules-only.symvers @@ -1,18 +1,15 @@ 0x00000000 stv0288_attach drivers/media/dvb-frontends/stv0288 EXPORT_SYMBOL 0x00000000 cx24123_attach drivers/media/dvb-frontends/cx24123 EXPORT_SYMBOL -0x00000000 rtkm_kzalloc drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 au8522_led_ctrl drivers/media/dvb-frontends/au8522_common EXPORT_SYMBOL 0x00000000 dib0070_wbd_offset drivers/media/dvb-frontends/dib0070 EXPORT_SYMBOL 0x00000000 stv0910_attach drivers/media/dvb-frontends/stv0910 EXPORT_SYMBOL_GPL 0x00000000 stv6110_attach drivers/media/dvb-frontends/stv6110 EXPORT_SYMBOL 0x00000000 lnbp22_attach drivers/media/dvb-frontends/lnbp22 EXPORT_SYMBOL 0x00000000 s921_attach drivers/media/dvb-frontends/s921 EXPORT_SYMBOL -0x00000000 dhd_wlan_mem_prealloc drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_static_buf EXPORT_SYMBOL 0x00000000 zl10036_attach drivers/media/dvb-frontends/zl10036 EXPORT_SYMBOL 0x00000000 s5h1411_attach drivers/media/dvb-frontends/s5h1411 EXPORT_SYMBOL 0x00000000 zd1301_demod_get_dvb_frontend drivers/media/dvb-frontends/zd1301_demod EXPORT_SYMBOL 0x00000000 dib3000mc_pid_parse drivers/media/dvb-frontends/dib3000mc EXPORT_SYMBOL -0x00000000 rtkm_kmalloc drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 m88ds3103_attach drivers/media/dvb-frontends/m88ds3103 EXPORT_SYMBOL 0x00000000 dib9000_set_gpio drivers/media/dvb-frontends/dib9000 EXPORT_SYMBOL 0x00000000 xc4000_attach drivers/media/tuners/xc4000 EXPORT_SYMBOL @@ -20,7 +17,6 @@ 0x00000000 au8522_release_state drivers/media/dvb-frontends/au8522_common EXPORT_SYMBOL 0x00000000 lgs8gl5_attach drivers/media/dvb-frontends/lgs8gl5 EXPORT_SYMBOL 0x00000000 dib0090_update_rframp_7090 drivers/media/dvb-frontends/dib0090 EXPORT_SYMBOL -0x00000000 rtkm_prealloc_destroy drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 cxd2820r_attach drivers/media/dvb-frontends/cxd2820r EXPORT_SYMBOL 0x00000000 zl10039_attach drivers/media/dvb-frontends/zl10039 EXPORT_SYMBOL 0x00000000 dib7000p_attach drivers/media/dvb-frontends/dib7000p EXPORT_SYMBOL @@ -98,7 +94,6 @@ 0x00000000 dib9000_set_slave_frontend drivers/media/dvb-frontends/dib9000 EXPORT_SYMBOL 0x00000000 ds3000_attach drivers/media/dvb-frontends/ds3000 EXPORT_SYMBOL 0x00000000 dib0070_ctrl_agc_filter drivers/media/dvb-frontends/dib0070 EXPORT_SYMBOL -0x00000000 rtkm_dump_mstatus drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 dib0090_set_tune_state drivers/media/dvb-frontends/dib0090 EXPORT_SYMBOL 0x00000000 dib0090_get_tune_state drivers/media/dvb-frontends/dib0090 EXPORT_SYMBOL 0x00000000 stv0297_attach drivers/media/dvb-frontends/stv0297 EXPORT_SYMBOL @@ -138,7 +133,6 @@ 0x00000000 tda8083_attach drivers/media/dvb-frontends/tda8083 EXPORT_SYMBOL 0x00000000 dib0090_get_wbd_target drivers/media/dvb-frontends/dib0090 EXPORT_SYMBOL 0x00000000 lgs8gxx_attach drivers/media/dvb-frontends/lgs8gxx EXPORT_SYMBOL -0x00000000 rtkm_kfree drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 nxt200x_attach drivers/media/dvb-frontends/nxt200x EXPORT_SYMBOL 0x00000000 tda826x_attach drivers/media/dvb-frontends/tda826x EXPORT_SYMBOL 0x00000000 dib9000_fw_pid_filter drivers/media/dvb-frontends/dib9000 EXPORT_SYMBOL @@ -147,7 +141,6 @@ 0x00000000 au8522_readreg drivers/media/dvb-frontends/au8522_common EXPORT_SYMBOL 0x00000000 stv6111_attach drivers/media/dvb-frontends/stv6111 EXPORT_SYMBOL_GPL 0x00000000 or51132_attach drivers/media/dvb-frontends/or51132 EXPORT_SYMBOL -0x00000000 rtkm_prealloc_init drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 dib9000_get_tuner_interface drivers/media/dvb-frontends/dib9000 EXPORT_SYMBOL 0x00000000 dibx000_reset_i2c_master drivers/media/dvb-frontends/dibx000_common EXPORT_SYMBOL 0x00000000 cx24110_attach drivers/media/dvb-frontends/cx24110 EXPORT_SYMBOL @@ -163,7 +156,6 @@ 0x00000000 dib0090_register drivers/media/dvb-frontends/dib0090 EXPORT_SYMBOL 0x00000000 cx24113_attach drivers/media/dvb-frontends/cx24113 EXPORT_SYMBOL 0x00000000 fc0013_rc_cal_reset drivers/media/tuners/fc0013 EXPORT_SYMBOL -0x00000000 rtkm_set_trace drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm EXPORT_SYMBOL 0x00000000 stv0900_attach drivers/media/dvb-frontends/stv0900 EXPORT_SYMBOL 0x00000000 stb6000_attach drivers/media/dvb-frontends/stb6000 EXPORT_SYMBOL 0x00000000 lnbp21_attach drivers/media/dvb-frontends/lnbp21 EXPORT_SYMBOL diff --git a/kernel/modules.builtin.modinfo b/kernel/modules.builtin.modinfo index 9e0d13e..7f461d2 100644 --- a/kernel/modules.builtin.modinfo +++ b/kernel/modules.builtin.modinfo Binary files differ diff --git a/kernel/net/Kconfig b/kernel/net/Kconfig index d656716..0d39d1f 100644 --- a/kernel/net/Kconfig +++ b/kernel/net/Kconfig @@ -58,6 +58,9 @@ config SKB_EXTENSIONS bool +config NET_OOB + bool + menu "Networking options" source "net/packet/Kconfig" diff --git a/kernel/net/core/dev.c b/kernel/net/core/dev.c index bc5dcf5..01d2396 100644 --- a/kernel/net/core/dev.c +++ b/kernel/net/core/dev.c @@ -3111,6 +3111,10 @@ } else if (likely(!refcount_dec_and_test(&skb->users))) { return; } + + if (recycle_oob_skb(skb)) + return; + get_kfree_skb_cb(skb)->reason = reason; local_irq_save(flags); skb->next = __this_cpu_read(softnet_data.completion_queue); @@ -3584,7 +3588,12 @@ unsigned int len; int rc; - if (dev_nit_active(dev)) + /* + * Clone-relay outgoing packet to listening taps. Network taps + * interested in out-of-band traffic should be handled by the + * companion core. + */ + if (dev_nit_active(dev) && !skb_is_oob(skb)) dev_queue_xmit_nit(skb, dev); len = skb->len; @@ -4797,6 +4806,81 @@ } EXPORT_SYMBOL_GPL(do_xdp_generic); +#ifdef CONFIG_NET_OOB + +__weak bool netif_oob_deliver(struct sk_buff *skb) +{ + return false; +} + +__weak int netif_xmit_oob(struct sk_buff *skb) +{ + return NET_XMIT_DROP; +} + +static bool netif_receive_oob(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + + if (dev && netif_oob_diversion(dev)) + return netif_oob_deliver(skb); + + return false; +} + +static bool netif_receive_oob_list(struct list_head *head) +{ + struct sk_buff *skb, *next; + struct net_device *dev; + + if (list_empty(head)) + return false; + + dev = list_first_entry(head, struct sk_buff, list)->dev; + if (!dev || !netif_oob_diversion(dev)) + return false; + + /* Callee dequeues every skb it consumes. */ + list_for_each_entry_safe(skb, next, head, list) + netif_oob_deliver(skb); + + return list_empty(head); +} + +__weak void netif_oob_run(struct net_device *dev) +{ } + +static void napi_complete_oob(struct napi_struct *n) +{ + struct net_device *dev = n->dev; + + if (netif_oob_diversion(dev)) + netif_oob_run(dev); +} + +__weak void skb_inband_xmit_backlog(void) +{ } + +#else + +static inline bool netif_receive_oob(struct sk_buff *skb) +{ + return false; +} + +static inline bool netif_receive_oob_list(struct list_head *head) +{ + return false; +} + +static inline void napi_complete_oob(struct napi_struct *n) +{ } + +static inline void skb_inband_xmit_backlog(void) +{ } + +#endif + static int netif_rx_internal(struct sk_buff *skb) { int ret; @@ -4895,6 +4979,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); + + skb_inband_xmit_backlog(); if (sd->completion_queue) { struct sk_buff *clist; @@ -5639,6 +5725,9 @@ { int ret; + if (netif_receive_oob(skb)) + return NET_RX_SUCCESS; + trace_netif_receive_skb_entry(skb); ret = netif_receive_skb_internal(skb); @@ -5662,6 +5751,8 @@ { struct sk_buff *skb; + if (netif_receive_oob_list(head)) + return; if (list_empty(head)) return; if (trace_netif_receive_skb_list_entry_enabled()) { @@ -6152,6 +6243,9 @@ { gro_result_t ret; + if (netif_receive_oob(skb)) + return GRO_NORMAL; + skb_mark_napi_id(skb, napi); trace_napi_gro_receive_entry(skb); @@ -6489,6 +6583,8 @@ unsigned long flags, val, new, timeout = 0; bool ret = true; + napi_complete_oob(n); + /* * 1) Don't let napi dequeue from the cpu poll list * just in case its running on a different cpu. diff --git a/kernel/net/core/net-sysfs.c b/kernel/net/core/net-sysfs.c index 989b3f7..a467fca 100644 --- a/kernel/net/core/net-sysfs.c +++ b/kernel/net/core/net-sysfs.c @@ -386,6 +386,54 @@ } NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); +#ifdef CONFIG_NET_OOB + +__weak int netif_oob_switch_port(struct net_device *dev, bool enabled) +{ + return 0; +} + +__weak bool netif_oob_get_port(struct net_device *dev) +{ + return false; +} + +__weak ssize_t netif_oob_query_pool(struct net_device *dev, char *buf) +{ + return -EIO; +} + +static int switch_oob_port(struct net_device *dev, unsigned long enable) +{ + return netif_oob_switch_port(dev, (bool)enable); +} + +static ssize_t oob_port_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + return netdev_store(dev, attr, buf, len, switch_oob_port); +} + +static ssize_t oob_port_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + + return sprintf(buf, fmt_dec, netif_oob_get_port(netdev)); +} +static DEVICE_ATTR_RW(oob_port); + +static ssize_t oob_pool_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + + return netif_oob_query_pool(netdev, buf); +} +static DEVICE_ATTR_RO(oob_pool); + +#endif + static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) { WRITE_ONCE(dev->gro_flush_timeout, val); @@ -619,6 +667,10 @@ &dev_attr_proto_down.attr, &dev_attr_carrier_up_count.attr, &dev_attr_carrier_down_count.attr, +#ifdef CONFIG_NET_OOB + &dev_attr_oob_port.attr, + &dev_attr_oob_pool.attr, +#endif NULL, }; ATTRIBUTE_GROUPS(net_class); diff --git a/kernel/net/core/skbuff.c b/kernel/net/core/skbuff.c index 382dbdc..ba8222f 100644 --- a/kernel/net/core/skbuff.c +++ b/kernel/net/core/skbuff.c @@ -291,6 +291,108 @@ return skb; } +#ifdef CONFIG_NET_OOB + +struct sk_buff *__netdev_alloc_oob_skb(struct net_device *dev, size_t len, + size_t headroom, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + headroom = ALIGN(NET_SKB_PAD + headroom, NET_SKB_PAD); + skb = __alloc_skb(len + headroom, gfp_mask, + SKB_ALLOC_RX, NUMA_NO_NODE); + if (!skb) + return NULL; + + skb_reserve(skb, headroom); + skb->dev = dev; + skb->oob = true; + + return skb; +} +EXPORT_SYMBOL_GPL(__netdev_alloc_oob_skb); + +void __netdev_free_oob_skb(struct net_device *dev, struct sk_buff *skb) +{ + skb->oob = false; + skb->oob_clone = false; + dev_kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(__netdev_free_oob_skb); + +void netdev_reset_oob_skb(struct net_device *dev, struct sk_buff *skb, + size_t headroom) +{ + unsigned char *data = skb->head; /* Always from kmalloc_reserve(). */ + + if (WARN_ON_ONCE(!skb->oob || skb->oob_clone)) + return; + + memset(skb, 0, offsetof(struct sk_buff, tail)); + __build_skb_around(skb, data, 0); + headroom = ALIGN(NET_SKB_PAD + headroom, NET_SKB_PAD); + skb_reserve(skb, headroom); + skb->oob = true; + skb->dev = dev; +} +EXPORT_SYMBOL_GPL(netdev_reset_oob_skb); + +struct sk_buff *skb_alloc_oob_head(gfp_t gfp_mask) +{ + struct sk_buff *skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask); + + if (!skb) + return NULL; + + /* + * skb heads allocated for out-of-band traffic should be + * reserved for clones, so memset is extraneous in the sense + * that skb_morph_oob() should follow the allocation. + */ + memset(skb, 0, offsetof(struct sk_buff, tail)); + refcount_set(&skb->users, 1); + skb->oob_clone = true; + skb_set_kcov_handle(skb, kcov_common_handle()); + + return skb; +} +EXPORT_SYMBOL_GPL(skb_alloc_oob_head); + +static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb); + +void skb_morph_oob_skb(struct sk_buff *n, struct sk_buff *skb) +{ + __skb_clone(n, skb); + n->oob = true; + n->oob_clone = true; + skb->oob_cloned = true; +} +EXPORT_SYMBOL_GPL(skb_morph_oob_skb); + +bool skb_release_oob_skb(struct sk_buff *skb, int *dref) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (!skb_unref(skb)) + return false; + + /* + * ->nohdr is never set for oob shells, so we always refcount + * the full data (header + payload) when cloned. + */ + *dref = skb->cloned ? atomic_sub_return(1, &shinfo->dataref) : 0; + + return true; +} +EXPORT_SYMBOL_GPL(skb_release_oob_skb); + +__weak bool skb_oob_recycle(struct sk_buff *skb) +{ + return false; +} + +#endif /* CONFIG_NET_OOB */ + /** * __build_skb - build a network buffer * @data: data buffer provided by caller @@ -691,6 +793,9 @@ void __kfree_skb(struct sk_buff *skb) { + if (recycle_oob_skb(skb)) + return; + skb_release_all(skb); kfree_skbmem(skb); } @@ -884,6 +989,9 @@ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); /* drop skb->head and call any destructors for packet */ + if (recycle_oob_skb(skb)) + return; + skb_release_all(skb); /* record skb to CPU local list */ @@ -903,6 +1011,9 @@ } void __kfree_skb_defer(struct sk_buff *skb) { + if (recycle_oob_skb(skb)) + return; + _kfree_skb_defer(skb); } @@ -926,6 +1037,9 @@ return; } + if (recycle_oob_skb(skb)) + return; + _kfree_skb_defer(skb); } EXPORT_SYMBOL(napi_consume_skb); @@ -946,6 +1060,7 @@ skb_dst_copy(new, old); __skb_ext_copy(new, old); __nf_copy(new, old, false); + __skb_oob_copy(new, old); /* Note : this field could be in headers_start/headers_end section * It is not yet because we do not want to have a 16 bit hole diff --git a/kernel/net/packet/af_packet.c b/kernel/net/packet/af_packet.c index eaa030e..9a19b4a 100644 --- a/kernel/net/packet/af_packet.c +++ b/kernel/net/packet/af_packet.c @@ -3309,6 +3309,7 @@ po = pkt_sk(sk); init_completion(&po->skb_completion); sk->sk_family = PF_PACKET; + sk->sk_protocol = protocol; po->num = proto; po->xmit = dev_queue_xmit; diff --git a/kernel/net/sched/Kconfig b/kernel/net/sched/Kconfig index bc4e5da..1bcf1b9 100644 --- a/kernel/net/sched/Kconfig +++ b/kernel/net/sched/Kconfig @@ -117,6 +117,29 @@ To compile this code as a module, choose M here: the module will be called sch_multiq. +config NET_SCH_OOB + tristate "Out-of-band packet queuing (OOB)" + depends on NET_OOB + help + Say Y here if you want to use a Dovetail-aware packet + scheduler for prioritizing egress traffic between the + regular (in-band) network stack and a companion core. This + scheduler helps in two cases: + + - for sending high priority packets originating from the + out-of-band stage to NICs which cannot handle outgoing + packets from that stage directly. In this case, these + packets take precedence over regular traffic for + transmission. + + - for sharing an out-of-band capable interface between the + in-band and out-of-band network stacks, proxying regular + traffic originating from the in-band stage to NICs which + will be processing all packets from the out-of-band stage. + + To compile this code as a module, choose M here: the + module will be called sch_oob. + config NET_SCH_RED tristate "Random Early Detection (RED)" help diff --git a/kernel/net/sched/Makefile b/kernel/net/sched/Makefile index 66bbf9a..20fc082 100644 --- a/kernel/net/sched/Makefile +++ b/kernel/net/sched/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o +obj-$(CONFIG_NET_SCH_OOB) += sch_oob.o obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o diff --git a/kernel/net/sched/sch_oob.c b/kernel/net/sched/sch_oob.c new file mode 100644 index 0000000..22373e8 --- /dev/null +++ b/kernel/net/sched/sch_oob.c @@ -0,0 +1,294 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <net/pkt_sched.h> +#include <net/pkt_cls.h> + +/* + * With Qdisc[2], 0=oob_fallback and 1=inband. User can graft whatever + * qdisc on these slots; both preset to pfifo_ops. skb->oob is checked + * to determine which qdisc should handle the packet eventually. + */ + +struct oob_qdisc_priv { + struct Qdisc *qdisc[2]; /* 0=oob_fallback, 1=in-band */ + struct tcf_proto __rcu *filter_list; + struct tcf_block *block; +}; + +static int oob_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + struct net_device *dev = skb->dev; + struct Qdisc *qdisc; + int ret; + + /* + * If the device accepts oob traffic and can handle it + * directly from the oob stage, pass the outgoing packet to + * the transmit handler of the oob stack. This makes sure that + * all traffic, including the in-band one, flows through the + * oob stack which may implement its own queuing discipline. + * + * netif_xmit_oob() might fail handling the packet, in which + * case we leave it to the in-band packet scheduler, applying + * a best-effort strategy by giving higher priority to oob + * packets over mere in-band traffic. + */ + if (dev && netif_oob_diversion(dev) && netdev_is_oob_capable(dev)) { + ret = netif_xmit_oob(skb); + if (ret == NET_XMIT_SUCCESS) + return NET_XMIT_SUCCESS; + } + + /* + * Out-of-band fast lane is closed. Best effort: use a special + * 'high priority' queue for oob packets we handle from + * in-band context the usual way through the common stack. + */ + qdisc = skb->oob ? p->qdisc[0] : p->qdisc[1]; + ret = qdisc_enqueue(skb, qdisc, to_free); + if (ret == NET_XMIT_SUCCESS) { + sch->q.qlen++; + return NET_XMIT_SUCCESS; + } + + if (net_xmit_drop_count(ret)) + qdisc_qstats_drop(sch); + + return ret; +} + +static struct sk_buff *oob_dequeue(struct Qdisc *sch) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + struct sk_buff *skb; + struct Qdisc *qdisc; + int band; + + /* + * First try to dequeue pending out-of-band packets. If none, + * then check for in-band traffic. + */ + for (band = 0; band < 2; band++) { + qdisc = p->qdisc[band]; + skb = qdisc->dequeue(qdisc); + if (skb) { + qdisc_bstats_update(sch, skb); + sch->q.qlen--; + return skb; + } + } + + return NULL; +} + +static struct sk_buff *oob_peek(struct Qdisc *sch) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + struct sk_buff *skb; + struct Qdisc *qdisc; + int band; + + for (band = 0; band < 2; band++) { + qdisc = p->qdisc[band]; + skb = qdisc->ops->peek(qdisc); + if (skb) + return skb; + } + + return NULL; +} + +static int oob_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + int ret; + + ret = tcf_block_get(&p->block, &p->filter_list, sch, extack); + if (ret) + return ret; + + p->qdisc[0] = qdisc_create_dflt(sch->dev_queue, + &pfifo_qdisc_ops, sch->handle, + extack); + p->qdisc[1] = qdisc_create_dflt(sch->dev_queue, + &pfifo_fast_ops, sch->handle, + extack); + + return 0; +} + +static void oob_reset(struct Qdisc *sch) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + + qdisc_reset(p->qdisc[0]); + qdisc_reset(p->qdisc[1]); + sch->q.qlen = 0; +} + +static void oob_destroy(struct Qdisc *sch) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + + tcf_block_put(p->block); + qdisc_put(p->qdisc[0]); + qdisc_put(p->qdisc[1]); +} + +static int oob_tune(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static int oob_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + return skb->len; +} + +static int oob_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, + struct Qdisc **old, struct netlink_ext_ack *extack) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + unsigned long band = arg - 1; + + if (new == NULL) + new = &noop_qdisc; + + *old = qdisc_replace(sch, new, &p->qdisc[band]); + + return 0; +} + +static struct Qdisc * +oob_leaf(struct Qdisc *sch, unsigned long arg) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + unsigned long band = arg - 1; + + return p->qdisc[band]; +} + +static unsigned long oob_find(struct Qdisc *sch, u32 classid) +{ + unsigned long band = TC_H_MIN(classid); + + return band - 1 >= 2 ? 0 : band; +} + +static int oob_dump_class(struct Qdisc *sch, unsigned long cl, + struct sk_buff *skb, struct tcmsg *tcm) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + + tcm->tcm_handle |= TC_H_MIN(cl); + tcm->tcm_info = p->qdisc[cl - 1]->handle; + + return 0; +} + +static int oob_dump_class_stats(struct Qdisc *sch, unsigned long cl, + struct gnet_dump *d) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + struct Qdisc *cl_q = p->qdisc[cl - 1]; + + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), + d, cl_q->cpu_bstats, &cl_q->bstats) < 0 || + qdisc_qstats_copy(d, cl_q) < 0) + return -1; + + return 0; +} + +static void oob_walk(struct Qdisc *sch, struct qdisc_walker *arg) +{ + int band; + + if (arg->stop) + return; + + for (band = 0; band < 2; band++) { + if (arg->count < arg->skip) { + arg->count++; + continue; + } + if (arg->fn(sch, band + 1, arg) < 0) { + arg->stop = 1; + break; + } + arg->count++; + } +} + +static unsigned long oob_tcf_bind(struct Qdisc *sch, unsigned long parent, + u32 classid) +{ + return oob_find(sch, classid); +} + +static void oob_tcf_unbind(struct Qdisc *q, unsigned long cl) +{ +} + +static struct tcf_block *oob_tcf_block(struct Qdisc *sch, unsigned long cl, + struct netlink_ext_ack *extack) +{ + struct oob_qdisc_priv *p = qdisc_priv(sch); + + if (cl) + return NULL; + + return p->block; +} + +static const struct Qdisc_class_ops oob_class_ops = { + .graft = oob_graft, + .leaf = oob_leaf, + .find = oob_find, + .walk = oob_walk, + .dump = oob_dump_class, + .dump_stats = oob_dump_class_stats, + .tcf_block = oob_tcf_block, + .bind_tcf = oob_tcf_bind, + .unbind_tcf = oob_tcf_unbind, +}; + +static struct Qdisc_ops oob_qdisc_ops __read_mostly = { + .cl_ops = &oob_class_ops, + .id = "oob", + .priv_size = sizeof(struct oob_qdisc_priv), + .enqueue = oob_enqueue, + .dequeue = oob_dequeue, + .peek = oob_peek, + .init = oob_init, + .reset = oob_reset, + .destroy = oob_destroy, + .change = oob_tune, + .dump = oob_dump, + .owner = THIS_MODULE, +}; + +static int __init oob_module_init(void) +{ + return register_qdisc(&oob_qdisc_ops); +} + +static void __exit oob_module_exit(void) +{ + unregister_qdisc(&oob_qdisc_ops); +} + +module_init(oob_module_init) +module_exit(oob_module_exit) + +MODULE_LICENSE("GPL"); diff --git a/kernel/net/socket.c b/kernel/net/socket.c index 938ab3a..ac19dee 100644 --- a/kernel/net/socket.c +++ b/kernel/net/socket.c @@ -141,6 +141,95 @@ #define sock_show_fdinfo NULL #endif +#ifdef CONFIG_NET_OOB + +static inline bool sock_oob_capable(struct socket *sock) +{ + return sock->sk && sock->sk->oob_data; +} + +int __weak sock_oob_attach(struct socket *sock) +{ + return 0; +} + +void __weak sock_oob_detach(struct socket *sock) +{ +} + +int __weak sock_oob_bind(struct socket *sock, struct sockaddr *addr, int len) +{ + return 0; +} + +long __weak sock_inband_ioctl_redirect(struct socket *sock, + unsigned int cmd, unsigned long arg) +{ + return -ENOTTY; +} + +long __weak sock_oob_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return -ENOTTY; +} + +ssize_t __weak sock_oob_write(struct file *filp, + const char __user *u_buf, size_t count) +{ + return -EOPNOTSUPP; +} + +ssize_t __weak sock_oob_read(struct file *filp, + char __user *u_buf, size_t count) +{ + return -EOPNOTSUPP; +} + +__poll_t __weak sock_oob_poll(struct file *filp, + struct oob_poll_wait *wait) +{ + return -EOPNOTSUPP; +} + +#define compat_sock_oob_ioctl compat_ptr_oob_ioctl + +#else /* !CONFIG_NET_OOB */ + +static inline bool sock_oob_capable(struct socket *sock) +{ + return false; +} + +static inline int sock_oob_attach(struct socket *sock) +{ + return 0; +} + +static inline void sock_oob_detach(struct socket *sock) +{ +} + +static int sock_oob_bind(struct socket *sock, + struct sockaddr *addr, int len) +{ + return 0; +} + +static inline long sock_inband_ioctl_redirect(struct socket *sock, + unsigned int cmd, unsigned long arg) +{ + return -ENOTTY; +} + +#define sock_oob_ioctl NULL +#define sock_oob_write NULL +#define sock_oob_read NULL +#define sock_oob_poll NULL +#define compat_sock_oob_ioctl NULL + +#endif /* !CONFIG_NET_OOB */ + /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. @@ -153,8 +242,13 @@ .write_iter = sock_write_iter, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, + .oob_ioctl = sock_oob_ioctl, + .oob_write = sock_oob_write, + .oob_read = sock_oob_read, + .oob_poll = sock_oob_poll, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, + .compat_oob_ioctl = compat_sock_oob_ioctl, #endif .mmap = sock_mmap, .release = sock_close, @@ -427,7 +521,7 @@ static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; - int fd = get_unused_fd_flags(flags); + int fd = get_unused_fd_flags(flags), ret; if (unlikely(fd < 0)) { sock_release(sock); return fd; @@ -435,6 +529,14 @@ newfile = sock_alloc_file(sock, flags, NULL); if (!IS_ERR(newfile)) { + if (IS_ENABLED(CONFIG_NET_OOB) && (flags & SOCK_OOB)) { + ret = sock_oob_attach(sock); + if (ret < 0) { + put_unused_fd(fd); + sock_release(sock); + return ret; + } + } fd_install(fd, newfile); return fd; } @@ -589,6 +691,9 @@ static void __sock_release(struct socket *sock, struct inode *inode) { + if (sock_oob_capable(sock)) + sock_oob_detach(sock); + if (sock->ops) { struct module *owner = sock->ops->owner; @@ -1185,6 +1290,11 @@ false); break; default: + if (sock_oob_capable(sock)) { + err = sock_inband_ioctl_redirect(sock, cmd, arg); + if (!err || err != -ENOIOCTLCMD) + break; + } err = sock_do_ioctl(net, sock, cmd, arg); break; } @@ -1498,10 +1608,18 @@ BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); + BUILD_BUG_ON(SOCK_OOB & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; - if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) + if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK | SOCK_OOB)) return -EINVAL; + /* + * Not every protocol family supports out-of-band operations, + * however PF_OOB certainly does: force SOCK_OOB in, so that + * sock_oob_attach() runs for this socket. + */ + if (IS_ENABLED(CONFIG_NET_OOB) && family == AF_OOB) + flags |= SOCK_OOB; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) @@ -1511,7 +1629,7 @@ if (retval < 0) return retval; - return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); + return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK | O_OOB)); } SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) @@ -1642,6 +1760,9 @@ err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); + if (sock_oob_capable(sock) && !err) + err = sock_oob_bind(sock, (struct sockaddr *) + &address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) diff --git a/kernel/scripts/mkcompile_h b/kernel/scripts/mkcompile_h index a72b154..5a34fe4 100755 --- a/kernel/scripts/mkcompile_h +++ b/kernel/scripts/mkcompile_h @@ -6,8 +6,9 @@ SMP=$3 PREEMPT=$4 PREEMPT_RT=$5 -CC_VERSION="$6" -LD=$7 +IRQPIPE=$6 +CC_VERSION="$7" +LD=$8 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } @@ -45,6 +46,7 @@ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi +if [ -n "$IRQPIPE" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS IRQPIPE"; fi # Truncate to maximum length UTS_LEN=64 diff --git a/kernel/security/selinux/hooks.c b/kernel/security/selinux/hooks.c index 1235854..765efd0 100644 --- a/kernel/security/selinux/hooks.c +++ b/kernel/security/selinux/hooks.c @@ -1280,7 +1280,9 @@ return SECCLASS_SMC_SOCKET; case PF_XDP: return SECCLASS_XDP_SOCKET; -#if PF_MAX > 45 + case PF_OOB: + return SECCLASS_OOB_SOCKET; +#if PF_MAX > 46 #error New address family defined, please update this function. #endif } diff --git a/kernel/security/selinux/include/classmap.h b/kernel/security/selinux/include/classmap.h index 955e8c8..79ea017 100644 --- a/kernel/security/selinux/include/classmap.h +++ b/kernel/security/selinux/include/classmap.h @@ -247,6 +247,8 @@ NULL } }, { "xdp_socket", { COMMON_SOCK_PERMS, NULL } }, + { "oob_socket", + { COMMON_SOCK_PERMS, NULL } }, { "perf_event", { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } }, { "anon_inode", @@ -254,6 +256,6 @@ { NULL } }; -#if PF_MAX > 45 +#if PF_MAX > 46 #error New address family defined, please update secclass_map. #endif diff --git a/kernel/tools/perf/trace/beauty/include/linux/socket.h b/kernel/tools/perf/trace/beauty/include/linux/socket.h index 9aa530d..93b104e 100644 --- a/kernel/tools/perf/trace/beauty/include/linux/socket.h +++ b/kernel/tools/perf/trace/beauty/include/linux/socket.h @@ -223,8 +223,9 @@ * reuses AF_INET address family */ #define AF_XDP 44 /* XDP sockets */ +#define AF_OOB 45 /* Out-of-band domain sockets */ -#define AF_MAX 45 /* For now.. */ +#define AF_MAX 46 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC diff --git a/kernel/vmlinux.symvers b/kernel/vmlinux.symvers index 62cce50..e37a20b 100644 --- a/kernel/vmlinux.symvers +++ b/kernel/vmlinux.symvers @@ -46,6 +46,7 @@ 0x00000000 ZSTD_initCStream_usingCDict vmlinux EXPORT_SYMBOL 0x00000000 __kfifo_alloc vmlinux EXPORT_SYMBOL 0x00000000 reclaim_shmem_address_space vmlinux EXPORT_SYMBOL_GPL +0x00000000 irq_inject_pipeline vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_log_register vmlinux EXPORT_SYMBOL 0x00000000 serial8250_do_shutdown vmlinux EXPORT_SYMBOL_GPL 0x00000000 phy_reset vmlinux EXPORT_SYMBOL_GPL @@ -90,6 +91,7 @@ 0x00000000 fb_validate_mode vmlinux EXPORT_SYMBOL 0x00000000 errseq_sample vmlinux EXPORT_SYMBOL 0x00000000 __unregister_chrdev vmlinux EXPORT_SYMBOL +0x00000000 xntimer_get_date vmlinux EXPORT_SYMBOL_GPL 0x00000000 tracing_alloc_snapshot vmlinux EXPORT_SYMBOL_GPL 0x00000000 srcu_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 g_token_size vmlinux EXPORT_SYMBOL_GPL @@ -98,6 +100,7 @@ 0x00000000 __traceiter_vb2_buf_done vmlinux EXPORT_SYMBOL_GPL 0x00000000 badblocks_store vmlinux EXPORT_SYMBOL_GPL 0x00000000 filp_open_block vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnheap_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 raw_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_ct_delete vmlinux EXPORT_SYMBOL_GPL 0x00000000 __dev_direct_xmit vmlinux EXPORT_SYMBOL @@ -170,6 +173,7 @@ 0x00000000 tty_register_device vmlinux EXPORT_SYMBOL 0x00000000 simple_write_to_buffer vmlinux EXPORT_SYMBOL 0x00000000 __srcu_read_unlock vmlinux EXPORT_SYMBOL_GPL +0x00000000 __hybrid_spin_unlock vmlinux EXPORT_SYMBOL 0x00000000 reset_devices vmlinux EXPORT_SYMBOL 0x00000000 of_find_all_nodes vmlinux EXPORT_SYMBOL 0x00000000 mmc_app_cmd vmlinux EXPORT_SYMBOL_GPL @@ -225,6 +229,7 @@ 0x00000000 scatterwalk_copychunks vmlinux EXPORT_SYMBOL_GPL 0x00000000 pin_user_pages_unlocked vmlinux EXPORT_SYMBOL 0x00000000 ktime_get_real_fast_ns vmlinux EXPORT_SYMBOL_GPL +0x00000000 stage_disabled vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_irq_alloc_generic_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 check_preempt_curr vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_free vmlinux EXPORT_SYMBOL_GPL @@ -280,6 +285,7 @@ 0x00000000 __kfifo_dma_in_prepare_r vmlinux EXPORT_SYMBOL 0x00000000 blk_mq_stop_hw_queues vmlinux EXPORT_SYMBOL 0x00000000 __fat_fs_error vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_toseq_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_preload_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 prof_on vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_free_irq vmlinux EXPORT_SYMBOL @@ -308,6 +314,7 @@ 0x00000000 devm_ioremap_resource vmlinux EXPORT_SYMBOL 0x00000000 sg_miter_start vmlinux EXPORT_SYMBOL 0x00000000 disk_has_partitions vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 freq_qos_remove_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 udp_encap_disable vmlinux EXPORT_SYMBOL 0x00000000 tso_build_data vmlinux EXPORT_SYMBOL @@ -315,7 +322,6 @@ 0x00000000 snd_card_free vmlinux EXPORT_SYMBOL 0x00000000 dm_tm_issue_prefetches vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_m2m_decoder_cmd vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_disable_interrupts vmlinux EXPORT_SYMBOL 0x00000000 dma_buf_map_attachment vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_mali_pm_status vmlinux EXPORT_SYMBOL_GPL 0x00000000 tty_mode_ioctl vmlinux EXPORT_SYMBOL_GPL @@ -324,11 +330,11 @@ 0x00000000 debugfs_lookup_and_remove vmlinux EXPORT_SYMBOL_GPL 0x00000000 load_nls vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 nfs_show_stats vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnvfile_get_blob vmlinux EXPORT_SYMBOL_GPL 0x00000000 rfkill_set_hw_state vmlinux EXPORT_SYMBOL 0x00000000 xfrm_flush_gc vmlinux EXPORT_SYMBOL 0x00000000 rtc_year_days vmlinux EXPORT_SYMBOL 0x00000000 ehci_cf_port_reset_rwsem vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_hw_keysetmac vmlinux EXPORT_SYMBOL 0x00000000 ata_host_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 ata_link_offline vmlinux EXPORT_SYMBOL_GPL 0x00000000 scsi_normalize_sense vmlinux EXPORT_SYMBOL @@ -368,6 +374,7 @@ 0x00000000 seq_escape_mem_ascii vmlinux EXPORT_SYMBOL 0x00000000 vmf_insert_mixed_prot vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_rpm_suspend vmlinux EXPORT_SYMBOL_GPL +0x00000000 __hybrid_spin_lock vmlinux EXPORT_SYMBOL 0x00000000 xfrm_state_add vmlinux EXPORT_SYMBOL 0x00000000 ipt_unregister_table_exit vmlinux EXPORT_SYMBOL 0x00000000 ping_rcv vmlinux EXPORT_SYMBOL_GPL @@ -375,11 +382,11 @@ 0x00000000 of_property_read_u64_index vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_property_read_u32_index vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_ctrl_notify vmlinux EXPORT_SYMBOL -0x00000000 ar9003_mci_send_wlan_channels vmlinux EXPORT_SYMBOL 0x00000000 cn_del_callback vmlinux EXPORT_SYMBOL_GPL 0x00000000 regulator_is_enabled vmlinux EXPORT_SYMBOL_GPL 0x00000000 iov_iter_bvec vmlinux EXPORT_SYMBOL 0x00000000 iov_iter_kvec vmlinux EXPORT_SYMBOL +0x00000000 __xnselect_signal vmlinux EXPORT_SYMBOL_GPL 0x00000000 rt5640_sel_asrc_clk_src vmlinux EXPORT_SYMBOL_GPL 0x00000000 led_get_default_pattern vmlinux EXPORT_SYMBOL_GPL 0x00000000 dvb_register_device vmlinux EXPORT_SYMBOL @@ -444,11 +451,11 @@ 0x00000000 drm_dp_atomic_release_vcpi_slots vmlinux EXPORT_SYMBOL 0x00000000 pci_bus_write_config_dword vmlinux EXPORT_SYMBOL 0x00000000 net_prio_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL +0x00000000 __hybrid_spin_trylock vmlinux EXPORT_SYMBOL 0x00000000 dst_cache_get_ip6 vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_lower_state_changed vmlinux EXPORT_SYMBOL 0x00000000 snd_pcm_lib_default_mmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 iio_buffer_get vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_mci_cleanup vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_aux_init vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_prepare_planes vmlinux EXPORT_SYMBOL 0x00000000 tty_hung_up_p vmlinux EXPORT_SYMBOL @@ -461,7 +468,6 @@ 0x00000000 dev_pm_opp_get_voltage vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_clk_put vmlinux EXPORT_SYMBOL 0x00000000 v4l2_subdev_get_fwnode_pad_1_to_1 vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setmcastfilter vmlinux EXPORT_SYMBOL 0x00000000 scsi_dev_info_list_add_keyed vmlinux EXPORT_SYMBOL 0x00000000 syscore_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 pcim_enable_device vmlinux EXPORT_SYMBOL @@ -475,6 +481,8 @@ 0x00000000 proc_mkdir_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 mpage_writepages vmlinux EXPORT_SYMBOL 0x00000000 write_one_page vmlinux EXPORT_SYMBOL +0x00000000 xnthread_harden vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_try_acquire vmlinux EXPORT_SYMBOL_GPL 0x00000000 async_synchronize_cookie vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_notify_new_peer_candidate vmlinux EXPORT_SYMBOL 0x00000000 udp6_set_csum vmlinux EXPORT_SYMBOL @@ -557,7 +565,6 @@ 0x00000000 clocksource_mmio_readl_up vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_can_discard vmlinux EXPORT_SYMBOL 0x00000000 rk628_control_assert vmlinux EXPORT_SYMBOL -0x00000000 ath_is_mybeacon vmlinux EXPORT_SYMBOL 0x00000000 scsi_flush_work vmlinux EXPORT_SYMBOL_GPL 0x00000000 idr_destroy vmlinux EXPORT_SYMBOL 0x00000000 ida_destroy vmlinux EXPORT_SYMBOL @@ -626,6 +633,7 @@ 0x00000000 idr_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 jbd2_journal_update_sb_errno vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 vfs_unlink vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 xnsched_set_policy vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_compat_sigset vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_task_exe_file vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_stop_tx_ba_cb_irqsafe vmlinux EXPORT_SYMBOL @@ -638,7 +646,6 @@ 0x00000000 snd_dmaengine_pcm_open_request_chan vmlinux EXPORT_SYMBOL_GPL 0x00000000 iio_trigger_generic_data_rdy_poll vmlinux EXPORT_SYMBOL 0x00000000 of_get_child_by_name vmlinux EXPORT_SYMBOL -0x00000000 dt_init_idle_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpufreq_frequency_table_get_index vmlinux EXPORT_SYMBOL_GPL 0x00000000 phy_aneg_done vmlinux EXPORT_SYMBOL 0x00000000 key_revoke vmlinux EXPORT_SYMBOL @@ -666,8 +673,6 @@ 0x00000000 ieee80211_sched_scan_stopped vmlinux EXPORT_SYMBOL 0x00000000 dst_cache_set_ip6 vmlinux EXPORT_SYMBOL_GPL 0x00000000 rtnl_lock vmlinux EXPORT_SYMBOL -0x00000000 rockchip_dmcfreq_vop_bandwidth_update vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_addrxbuf_edma vmlinux EXPORT_SYMBOL 0x00000000 ata_scsi_slave_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_crtc_vblank_restore vmlinux EXPORT_SYMBOL 0x00000000 __drm_atomic_helper_connector_reset vmlinux EXPORT_SYMBOL @@ -685,7 +690,6 @@ 0x00000000 udp_push_pending_frames vmlinux EXPORT_SYMBOL 0x00000000 llc_add_pack vmlinux EXPORT_SYMBOL 0x00000000 hidinput_connect vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_deinit vmlinux EXPORT_SYMBOL 0x00000000 __platform_driver_probe vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_csf_find_queue_group vmlinux EXPORT_SYMBOL 0x00000000 clk_unregister_fixed_factor vmlinux EXPORT_SYMBOL_GPL @@ -709,12 +713,13 @@ 0x00000000 ieee80211_nan_func_terminated vmlinux EXPORT_SYMBOL 0x00000000 dev_get_by_index vmlinux EXPORT_SYMBOL 0x00000000 hid_compare_device_paths vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_abortpcurecv vmlinux EXPORT_SYMBOL 0x00000000 dma_buf_unmap_attachment vmlinux EXPORT_SYMBOL_GPL 0x00000000 serial8250_rpm_get_tx vmlinux EXPORT_SYMBOL_GPL 0x00000000 xa_delete_node vmlinux EXPORT_SYMBOL_GPL 0x00000000 match_strlcpy vmlinux EXPORT_SYMBOL +0x00000000 rtdm_nrtsig_pend vmlinux EXPORT_SYMBOL_GPL 0x00000000 __bpf_call_base vmlinux EXPORT_SYMBOL_GPL +0x00000000 tick_uninstall_proxy vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_freq_khz_to_channel vmlinux EXPORT_SYMBOL 0x00000000 inet6_hash vmlinux EXPORT_SYMBOL_GPL 0x00000000 init_net vmlinux EXPORT_SYMBOL @@ -737,7 +742,6 @@ 0x00000000 swake_up_one vmlinux EXPORT_SYMBOL 0x00000000 snd_soc_calc_bclk vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_interval_ranges vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_register_governor vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_fence_remove_callback vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_connector_tv_reset vmlinux EXPORT_SYMBOL 0x00000000 gpiod_direction_output_raw vmlinux EXPORT_SYMBOL_GPL @@ -832,6 +836,8 @@ 0x00000000 cdc_ncm_tx_fixup vmlinux EXPORT_SYMBOL_GPL 0x00000000 nanddev_bbt_set_block_status vmlinux EXPORT_SYMBOL_GPL 0x00000000 nanddev_bbt_get_block_status vmlinux EXPORT_SYMBOL_GPL +0x00000000 at24_mac3_read vmlinux EXPORT_SYMBOL +0x00000000 at24_mac2_read vmlinux EXPORT_SYMBOL 0x00000000 at24_mac1_read vmlinux EXPORT_SYMBOL 0x00000000 dev_pm_qos_update_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 generic_device_group vmlinux EXPORT_SYMBOL_GPL @@ -972,7 +978,6 @@ 0x00000000 mmc_cqe_post_req vmlinux EXPORT_SYMBOL 0x00000000 ptp_schedule_worker vmlinux EXPORT_SYMBOL 0x00000000 ohci_hub_status_data vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_check_alive vmlinux EXPORT_SYMBOL 0x00000000 dma_buf_cache_map_attachment vmlinux EXPORT_SYMBOL 0x00000000 devm_device_add_group vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_context_mmap vmlinux EXPORT_SYMBOL @@ -996,8 +1001,6 @@ 0x00000000 register_fib_notifier vmlinux EXPORT_SYMBOL 0x00000000 rt5645_sel_asrc_clk_src vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_component_nc_pin_unlocked vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpuidle_disable_device vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setopmode vmlinux EXPORT_SYMBOL 0x00000000 drm_client_init vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_check vmlinux EXPORT_SYMBOL 0x00000000 serial8250_resume_port vmlinux EXPORT_SYMBOL @@ -1026,6 +1029,7 @@ 0x00000000 LZ4_setStreamDecode vmlinux EXPORT_SYMBOL 0x00000000 del_gendisk vmlinux EXPORT_SYMBOL 0x00000000 block_write_full_page vmlinux EXPORT_SYMBOL +0x00000000 xntimer_set_gravity vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpufreq_this_cpu_can_update vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_register_type vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_invert_tuple vmlinux EXPORT_SYMBOL_GPL @@ -1056,6 +1060,7 @@ 0x00000000 drm_property_create_bool vmlinux EXPORT_SYMBOL 0x00000000 regulator_map_voltage_iterate vmlinux EXPORT_SYMBOL_GPL 0x00000000 list_lru_isolate vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnheap_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_prog_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_tdls_oper_request vmlinux EXPORT_SYMBOL 0x00000000 km_new_mapping vmlinux EXPORT_SYMBOL @@ -1075,6 +1080,7 @@ 0x00000000 prandom_seed_full_state vmlinux EXPORT_SYMBOL 0x00000000 blk_queue_write_cache vmlinux EXPORT_SYMBOL_GPL 0x00000000 generic_block_bmap vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_recvmsg vmlinux EXPORT_SYMBOL_GPL 0x00000000 __symbol_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_boot_fast_ns vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpuhp_tasks_frozen vmlinux EXPORT_SYMBOL_GPL @@ -1119,6 +1125,7 @@ 0x00000000 xa_destroy vmlinux EXPORT_SYMBOL 0x00000000 bdev_read_only vmlinux EXPORT_SYMBOL 0x00000000 put_pages_list vmlinux EXPORT_SYMBOL +0x00000000 cobalt_cpu_affinity vmlinux EXPORT_SYMBOL_GPL 0x00000000 from_kprojid vmlinux EXPORT_SYMBOL 0x00000000 skb_copy_datagram_iter vmlinux EXPORT_SYMBOL 0x00000000 sock_wfree vmlinux EXPORT_SYMBOL @@ -1130,6 +1137,7 @@ 0x00000000 utf8nfdicf vmlinux EXPORT_SYMBOL 0x00000000 utf8version_is_supported vmlinux EXPORT_SYMBOL 0x00000000 __posix_acl_create vmlinux EXPORT_SYMBOL +0x00000000 xnthread_resume vmlinux EXPORT_SYMBOL_GPL 0x00000000 round_jiffies vmlinux EXPORT_SYMBOL_GPL 0x00000000 __task_rq_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_proto_csum_replace16 vmlinux EXPORT_SYMBOL @@ -1156,7 +1164,6 @@ 0x00000000 nf_confirm vmlinux EXPORT_SYMBOL_GPL 0x00000000 mm_account_pinned_pages vmlinux EXPORT_SYMBOL_GPL 0x00000000 devfreq_unregister_opp_notifier vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_stop_dma_queue vmlinux EXPORT_SYMBOL 0x00000000 rockchip_rkvdec_driver vmlinux EXPORT_SYMBOL 0x00000000 no_hash_pointers vmlinux EXPORT_SYMBOL_GPL 0x00000000 strpbrk vmlinux EXPORT_SYMBOL @@ -1191,11 +1198,11 @@ 0x00000000 regulator_set_mode vmlinux EXPORT_SYMBOL_GPL 0x00000000 lzo1x_1_compress vmlinux EXPORT_SYMBOL_GPL 0x00000000 set_disk_ro vmlinux EXPORT_SYMBOL +0x00000000 __hybrid_spin_lock_nested vmlinux EXPORT_SYMBOL 0x00000000 svc_alien_sock vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm6_rcv_tnl vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_zone_dflt vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_limit_volume vmlinux EXPORT_SYMBOL_GPL -0x00000000 rockchip_dmcfreq_lock vmlinux EXPORT_SYMBOL 0x00000000 dm_read_arg_group vmlinux EXPORT_SYMBOL 0x00000000 rkcif_subdev_driver vmlinux EXPORT_SYMBOL 0x00000000 bus_find_device vmlinux EXPORT_SYMBOL_GPL @@ -1225,15 +1232,14 @@ 0x00000000 iio_device_attach_buffer vmlinux EXPORT_SYMBOL_GPL 0x00000000 cec_notifier_set_phys_addr vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_set_data_role vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_init_2wire vmlinux EXPORT_SYMBOL 0x00000000 kbase_mem_pool_set_max_size vmlinux EXPORT_SYMBOL 0x00000000 drm_connector_list_iter_begin vmlinux EXPORT_SYMBOL 0x00000000 drm_helper_mode_fill_fb_struct vmlinux EXPORT_SYMBOL 0x00000000 regulator_bulk_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_block_rq_insert vmlinux EXPORT_SYMBOL_GPL 0x00000000 fuse_conn_put vmlinux EXPORT_SYMBOL_GPL +0x00000000 __xntimer_stop vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_irq_setup_generic_chip vmlinux EXPORT_SYMBOL_GPL -0x00000000 irq_get_irq_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 atomic_notifier_call_chain_robust vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_frag_pull_head vmlinux EXPORT_SYMBOL 0x00000000 tcp_seq_next vmlinux EXPORT_SYMBOL @@ -1267,6 +1273,7 @@ 0x00000000 __kernel_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 cma_get_name vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_kmalloc_node vmlinux EXPORT_SYMBOL +0x00000000 xnsynch_flush vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_cancel_work_sync vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_update_owe_info_event vmlinux EXPORT_SYMBOL 0x00000000 svc_fill_write_vector vmlinux EXPORT_SYMBOL_GPL @@ -1274,19 +1281,15 @@ 0x00000000 release_sock vmlinux EXPORT_SYMBOL 0x00000000 dev_pm_opp_disable vmlinux EXPORT_SYMBOL_GPL 0x00000000 __i2c_first_dynamic_bus_num vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_mci_set_bt_version vmlinux EXPORT_SYMBOL -0x00000000 ar9003_is_paprd_enabled vmlinux EXPORT_SYMBOL 0x00000000 phy_validate_pause vmlinux EXPORT_SYMBOL 0x00000000 ipvlan_link_setup vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_clk_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_i2c_encoder_init vmlinux EXPORT_SYMBOL -0x00000000 log_threaded_irq_wakeup_reason vmlinux EXPORT_SYMBOL_GPL 0x00000000 __netlink_ns_capable vmlinux EXPORT_SYMBOL 0x00000000 __sk_receive_skb vmlinux EXPORT_SYMBOL 0x00000000 __snd_rawmidi_transmit_peek vmlinux EXPORT_SYMBOL 0x00000000 snd_pcm_hw_constraint_msbits vmlinux EXPORT_SYMBOL 0x00000000 usbnet_status_stop vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_set_concur_txprio vmlinux EXPORT_SYMBOL 0x00000000 mdiobus_setup_mdiodev_from_board_info vmlinux EXPORT_SYMBOL 0x00000000 clk_set_rate_range vmlinux EXPORT_SYMBOL_GPL 0x00000000 sync_inode vmlinux EXPORT_SYMBOL @@ -1303,6 +1306,7 @@ 0x00000000 crypto_inst_setname vmlinux EXPORT_SYMBOL_GPL 0x00000000 __invalidate_device vmlinux EXPORT_SYMBOL 0x00000000 seq_put_decimal_ull vmlinux EXPORT_SYMBOL +0x00000000 cobalt_remove_config_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_barrier_tasks_rude vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_ct_remove_expect vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_midi_event_reset_encode vmlinux EXPORT_SYMBOL @@ -1357,6 +1361,7 @@ 0x00000000 sata_async_notification vmlinux EXPORT_SYMBOL_GPL 0x00000000 __bforget vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 generic_file_write_iter vmlinux EXPORT_SYMBOL +0x00000000 cobalt_vfroot vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_set_ullong vmlinux EXPORT_SYMBOL 0x00000000 xdr_reserve_space vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet6_add_protocol vmlinux EXPORT_SYMBOL @@ -1380,6 +1385,7 @@ 0x00000000 get_random_bytes vmlinux EXPORT_SYMBOL 0x00000000 gic_pmr_sync vmlinux EXPORT_SYMBOL 0x00000000 ZSTD_findFrameCompressedSize vmlinux EXPORT_SYMBOL +0x00000000 xnthread_prepare_wait vmlinux EXPORT_SYMBOL_GPL 0x00000000 wakeme_after_rcu vmlinux EXPORT_SYMBOL_GPL 0x00000000 probe_irq_off vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_alloc_hw_nm vmlinux EXPORT_SYMBOL @@ -1431,6 +1437,7 @@ 0x00000000 __arch_copy_from_user vmlinux EXPORT_SYMBOL 0x00000000 __page_mapcount vmlinux EXPORT_SYMBOL_GPL 0x00000000 mempool_kfree vmlinux EXPORT_SYMBOL +0x00000000 __rtdm_dev_open vmlinux EXPORT_SYMBOL_GPL 0x00000000 nsecs_to_jiffies64 vmlinux EXPORT_SYMBOL 0x00000000 of_graph_get_remote_port_parent vmlinux EXPORT_SYMBOL 0x00000000 usb_wakeup_enabled_descendants vmlinux EXPORT_SYMBOL_GPL @@ -1452,7 +1459,6 @@ 0x00000000 iio_enum_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 vb2_wait_for_all_buffers vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l_disable_media_source vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_rxprocdesc vmlinux EXPORT_SYMBOL 0x00000000 phy_register_fixup vmlinux EXPORT_SYMBOL 0x00000000 device_bind_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 device_store_ulong vmlinux EXPORT_SYMBOL_GPL @@ -1460,6 +1466,7 @@ 0x00000000 radix_tree_maybe_preload vmlinux EXPORT_SYMBOL 0x00000000 iomap_writepage vmlinux EXPORT_SYMBOL_GPL 0x00000000 file_update_time vmlinux EXPORT_SYMBOL +0x00000000 __rtdm_dev_socket vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_bind_mask vmlinux EXPORT_SYMBOL_GPL 0x00000000 regulatory_set_wiphy_regd_sync_rtnl vmlinux EXPORT_SYMBOL 0x00000000 nf_logger_put vmlinux EXPORT_SYMBOL_GPL @@ -1479,6 +1486,7 @@ 0x00000000 perf_trace_run_bpf_submit vmlinux EXPORT_SYMBOL_GPL 0x00000000 relay_file_operations vmlinux EXPORT_SYMBOL_GPL 0x00000000 init_timer_key vmlinux EXPORT_SYMBOL +0x00000000 irq_pipeline_oopsing vmlinux EXPORT_SYMBOL_GPL 0x00000000 _raw_read_unlock vmlinux EXPORT_SYMBOL 0x00000000 rpc_wake_up_status vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_set_mac_address_user vmlinux EXPORT_SYMBOL @@ -1714,6 +1722,7 @@ 0x00000000 mark_mounts_for_expiry vmlinux EXPORT_SYMBOL_GPL 0x00000000 bdi_put vmlinux EXPORT_SYMBOL 0x00000000 file_check_and_advance_wb_err vmlinux EXPORT_SYMBOL +0x00000000 cobalt_thread_find vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_ipi_raise vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_csk_reqsk_queue_drop_and_put vmlinux EXPORT_SYMBOL 0x00000000 page_pool_update_nid vmlinux EXPORT_SYMBOL @@ -1764,7 +1773,6 @@ 0x00000000 skb_coalesce_rx_frag vmlinux EXPORT_SYMBOL 0x00000000 rk_cryptodev_unregister_dev vmlinux EXPORT_SYMBOL_GPL 0x00000000 power_supply_am_i_supplied vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_reset_tsf vmlinux EXPORT_SYMBOL 0x00000000 regcache_mark_dirty vmlinux EXPORT_SYMBOL_GPL 0x00000000 transport_configure_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 nla_put vmlinux EXPORT_SYMBOL @@ -1793,8 +1801,6 @@ 0x00000000 netdev_walk_all_lower_dev_rcu vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_vb2_v4l2_buf_done vmlinux EXPORT_SYMBOL_GPL 0x00000000 alloc_ep_req vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setrxfilter vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_getrxfilter vmlinux EXPORT_SYMBOL 0x00000000 dev_attr_unload_heads vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_modeset_backoff vmlinux EXPORT_SYMBOL 0x00000000 rockchip_get_iommu_base vmlinux EXPORT_SYMBOL_GPL @@ -1803,6 +1809,8 @@ 0x00000000 locks_end_grace vmlinux EXPORT_SYMBOL_GPL 0x00000000 file_modified vmlinux EXPORT_SYMBOL 0x00000000 kernel_write vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 __vfile_hostlock_put vmlinux EXPORT_SYMBOL_GPL +0x00000000 __vfile_hostlock_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 __devm_irq_alloc_descs vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_put_sb_net vmlinux EXPORT_SYMBOL_GPL 0x00000000 rtnl_af_unregister vmlinux EXPORT_SYMBOL_GPL @@ -1841,6 +1849,7 @@ 0x00000000 inode_set_bytes vmlinux EXPORT_SYMBOL 0x00000000 inode_sub_bytes vmlinux EXPORT_SYMBOL 0x00000000 unpin_user_page vmlinux EXPORT_SYMBOL +0x00000000 do_raw_spin_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_iter_combinations vmlinux EXPORT_SYMBOL 0x00000000 svc_unreg_xprt_class vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_prepare_reply_pages vmlinux EXPORT_SYMBOL_GPL @@ -1853,14 +1862,13 @@ 0x00000000 pps_register_source vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_new_std vmlinux EXPORT_SYMBOL 0x00000000 usb_serial_generic_write vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_paprd_is_done vmlinux EXPORT_SYMBOL -0x00000000 ar9003_get_pll_sqsum_dvc vmlinux EXPORT_SYMBOL 0x00000000 kbase_remove_va_region vmlinux EXPORT_SYMBOL 0x00000000 regulator_desc_list_voltage_linear_range vmlinux EXPORT_SYMBOL_GPL 0x00000000 clkdev_hw_create vmlinux EXPORT_SYMBOL_GPL 0x00000000 pci_release_selected_regions vmlinux EXPORT_SYMBOL 0x00000000 crypto_comp_decompress vmlinux EXPORT_SYMBOL_GPL 0x00000000 debugfs_real_fops vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_sem_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 _raw_read_unlock_irqrestore vmlinux EXPORT_SYMBOL 0x00000000 qword_add vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_max_bc_payload vmlinux EXPORT_SYMBOL_GPL @@ -1945,7 +1953,6 @@ 0x00000000 of_property_count_elems_of_size vmlinux EXPORT_SYMBOL_GPL 0x00000000 sdhci_enable_v4_mode vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_wwan_close vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_getnf vmlinux EXPORT_SYMBOL 0x00000000 scsi_rescan_device vmlinux EXPORT_SYMBOL 0x00000000 drm_modeset_lock_init vmlinux EXPORT_SYMBOL 0x00000000 drm_gem_put_pages vmlinux EXPORT_SYMBOL @@ -1958,6 +1965,7 @@ 0x00000000 forget_cached_acl vmlinux EXPORT_SYMBOL 0x00000000 bd_unlink_disk_holder vmlinux EXPORT_SYMBOL_GPL 0x00000000 task_handoff_unregister vmlinux EXPORT_SYMBOL_GPL +0x00000000 rcu_oob_finish_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 down_killable vmlinux EXPORT_SYMBOL 0x00000000 netdev_master_upper_dev_get_rcu vmlinux EXPORT_SYMBOL 0x00000000 napi_gro_receive vmlinux EXPORT_SYMBOL @@ -2049,7 +2057,6 @@ 0x00000000 __netdev_alloc_skb vmlinux EXPORT_SYMBOL 0x00000000 _snd_pcm_lib_alloc_vmalloc_buffer vmlinux EXPORT_SYMBOL 0x00000000 fsg_show_file vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_computetxtime vmlinux EXPORT_SYMBOL 0x00000000 pm_clk_resume vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_reset_gpu_wait vmlinux EXPORT_SYMBOL 0x00000000 pcie_capability_read_word vmlinux EXPORT_SYMBOL @@ -2062,7 +2069,6 @@ 0x00000000 rkcif_rockit_pause_stream vmlinux EXPORT_SYMBOL 0x00000000 cec_s_phys_addr_from_edid vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_s_ext_ctrls vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_init_btcoex_hw vmlinux EXPORT_SYMBOL 0x00000000 nvme_sync_queues vmlinux EXPORT_SYMBOL_GPL 0x00000000 dw_hdmi_set_plugged_cb vmlinux EXPORT_SYMBOL_GPL 0x00000000 tty_kref_put vmlinux EXPORT_SYMBOL @@ -2081,7 +2087,6 @@ 0x00000000 snd_dmaengine_pcm_open vmlinux EXPORT_SYMBOL_GPL 0x00000000 fill_inquiry_response vmlinux EXPORT_SYMBOL_GPL USB_STORAGE 0x00000000 usb_match_one_id vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_paprd_create_curve vmlinux EXPORT_SYMBOL 0x00000000 spi_delay_exec vmlinux EXPORT_SYMBOL_GPL 0x00000000 sata_deb_timing_long vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_buf_move_notify vmlinux EXPORT_SYMBOL_GPL @@ -2093,7 +2098,6 @@ 0x00000000 ip6_route_output_flags_noref vmlinux EXPORT_SYMBOL_GPL 0x00000000 sk_free vmlinux EXPORT_SYMBOL 0x00000000 asoc_simple_init_jack vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_gpio_get vmlinux EXPORT_SYMBOL 0x00000000 scsi_is_sdev_device vmlinux EXPORT_SYMBOL 0x00000000 regmap_check_range_table vmlinux EXPORT_SYMBOL_GPL 0x00000000 fwnode_graph_get_port_parent vmlinux EXPORT_SYMBOL_GPL @@ -2142,6 +2146,7 @@ 0x00000000 devm_drm_panel_bridge_add_typed vmlinux EXPORT_SYMBOL 0x00000000 rockchip_register_system_status_notifier vmlinux EXPORT_SYMBOL 0x00000000 gpiochip_disable_irq vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_release vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_policy_insert vmlinux EXPORT_SYMBOL 0x00000000 skb_segment vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_suspended vmlinux EXPORT_SYMBOL_GPL @@ -2288,6 +2293,8 @@ 0x00000000 debugfs_file_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 debugfs_file_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 d_find_any_alias vmlinux EXPORT_SYMBOL +0x00000000 __xntimer_get_timeout vmlinux EXPORT_SYMBOL_GPL +0x00000000 dovetail_stop_altsched vmlinux EXPORT_SYMBOL_GPL 0x00000000 sk_detach_filter vmlinux EXPORT_SYMBOL_GPL 0x00000000 sk_attach_filter vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_rawmidi_transmit_empty vmlinux EXPORT_SYMBOL @@ -2337,8 +2344,6 @@ 0x00000000 rpc_run_task vmlinux EXPORT_SYMBOL_GPL 0x00000000 skb_flow_dissect_tunnel_info vmlinux EXPORT_SYMBOL 0x00000000 efivar_entry_set_safe vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpufreq_dbs_governor_exit vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpufreq_dbs_governor_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_get_dev_t vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_i2c_subdev_set_name vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_altmode_update_active vmlinux EXPORT_SYMBOL_GPL @@ -2467,19 +2472,20 @@ 0x00000000 pci_disable_rom vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_phy_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_phy_get vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnbufd_copy_from_kmem vmlinux EXPORT_SYMBOL_GPL 0x00000000 svc_wake_up vmlinux EXPORT_SYMBOL_GPL 0x00000000 xprt_write_space vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_ct_helper_expectfn_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 sip_smc_lastlog_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 fsg_ss_bulk_out_comp_desc vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_usb_get_phy vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setpower vmlinux EXPORT_SYMBOL 0x00000000 phy_do_ioctl_running vmlinux EXPORT_SYMBOL 0x00000000 mtd_blktrans_cease_background vmlinux EXPORT_SYMBOL_GPL 0x00000000 scsi_mode_sense vmlinux EXPORT_SYMBOL 0x00000000 kbase_kcpu_fence_signal_init vmlinux EXPORT_SYMBOL 0x00000000 fb_mode_is_equal vmlinux EXPORT_SYMBOL 0x00000000 fs_param_is_blockdev vmlinux EXPORT_SYMBOL +0x00000000 ___xnlock_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_gp_is_normal vmlinux EXPORT_SYMBOL_GPL 0x00000000 __init_waitqueue_head vmlinux EXPORT_SYMBOL 0x00000000 ns_capable_setid vmlinux EXPORT_SYMBOL @@ -2502,6 +2508,7 @@ 0x00000000 gpiochip_reqres_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_compute_csum vmlinux EXPORT_SYMBOL 0x00000000 blk_mq_kick_requeue_list vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_all_qs vmlinux EXPORT_SYMBOL_GPL 0x00000000 suspend_set_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 dst_alloc vmlinux EXPORT_SYMBOL @@ -2515,6 +2522,7 @@ 0x00000000 proc_create_mount_point vmlinux EXPORT_SYMBOL 0x00000000 single_release vmlinux EXPORT_SYMBOL 0x00000000 get_user_pages vmlinux EXPORT_SYMBOL +0x00000000 enable_oob_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_suspend_global_flags vmlinux EXPORT_SYMBOL_GPL 0x00000000 housekeeping_affine vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip6_append_data vmlinux EXPORT_SYMBOL_GPL @@ -2544,6 +2552,7 @@ 0x00000000 cfb_copyarea vmlinux EXPORT_SYMBOL 0x00000000 __bitmap_xor vmlinux EXPORT_SYMBOL 0x00000000 mnt_want_write vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnselect_bind vmlinux EXPORT_SYMBOL_GPL 0x00000000 panic_reboot_mode vmlinux EXPORT_SYMBOL_GPL 0x00000000 __dev_remove_pack vmlinux EXPORT_SYMBOL 0x00000000 secure_ipv6_port_ephemeral vmlinux EXPORT_SYMBOL @@ -2600,6 +2609,7 @@ 0x00000000 invalidate_bh_lrus vmlinux EXPORT_SYMBOL_GPL 0x00000000 vfs_iter_read vmlinux EXPORT_SYMBOL 0x00000000 unregister_trace_event vmlinux EXPORT_SYMBOL_GPL +0x00000000 irq_pipeline_nmi_enter vmlinux EXPORT_SYMBOL 0x00000000 can_rx_register vmlinux EXPORT_SYMBOL 0x00000000 xt_check_match vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_nat_helper_register vmlinux EXPORT_SYMBOL_GPL @@ -2734,6 +2744,7 @@ 0x00000000 pcim_iomap_regions_request_all vmlinux EXPORT_SYMBOL 0x00000000 blk_mq_rq_cpu vmlinux EXPORT_SYMBOL 0x00000000 dentry_open vmlinux EXPORT_SYMBOL +0x00000000 ___xnsched_run vmlinux EXPORT_SYMBOL_GPL 0x00000000 freq_qos_add_notifier vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_sched_scan_results vmlinux EXPORT_SYMBOL 0x00000000 unregister_netdevice_queue vmlinux EXPORT_SYMBOL @@ -2750,9 +2761,7 @@ 0x00000000 nf_nat_setup_info vmlinux EXPORT_SYMBOL 0x00000000 ethtool_convert_legacy_u32_to_link_mode vmlinux EXPORT_SYMBOL 0x00000000 led_trigger_set_default vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpuidle_pause_and_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 thermal_zone_of_sensor_unregister vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setup_statusring vmlinux EXPORT_SYMBOL 0x00000000 sata_link_debounce vmlinux EXPORT_SYMBOL_GPL 0x00000000 amba_bustype vmlinux EXPORT_SYMBOL_GPL 0x00000000 jbd2_journal_get_undo_access vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver @@ -2785,6 +2794,7 @@ 0x00000000 fs_umode_to_ftype vmlinux EXPORT_SYMBOL_GPL 0x00000000 fiemap_fill_next_extent vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 swap_alloc_cluster vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnselector_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_event_ignore_this_pid vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_queue_xmit_accel vmlinux EXPORT_SYMBOL 0x00000000 dm_tm_unlock vmlinux EXPORT_SYMBOL_GPL @@ -2797,6 +2807,7 @@ 0x00000000 crypto_ahash_digest vmlinux EXPORT_SYMBOL_GPL 0x00000000 crypto_remove_final vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_user_pages_unlocked vmlinux EXPORT_SYMBOL +0x00000000 xnsched_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 svc_authenticate vmlinux EXPORT_SYMBOL_GPL 0x00000000 rtnl_set_sk_err vmlinux EXPORT_SYMBOL 0x00000000 __sock_queue_rcv_skb vmlinux EXPORT_SYMBOL @@ -2804,7 +2815,6 @@ 0x00000000 snd_pcm_create_iec958_consumer_default vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_cdc_wdm_register vmlinux EXPORT_SYMBOL 0x00000000 usb_hcd_pci_remove vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_set_tx_filter vmlinux EXPORT_SYMBOL 0x00000000 phy_device_remove vmlinux EXPORT_SYMBOL 0x00000000 class_remove_file_ns vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_atomic_helper_wait_for_vblanks vmlinux EXPORT_SYMBOL @@ -2870,6 +2880,7 @@ 0x00000000 clkdev_drop vmlinux EXPORT_SYMBOL 0x00000000 kblockd_schedule_work vmlinux EXPORT_SYMBOL 0x00000000 mem_section vmlinux EXPORT_SYMBOL +0x00000000 rtdm_task_join vmlinux EXPORT_SYMBOL_GPL 0x00000000 send_sig_info vmlinux EXPORT_SYMBOL 0x00000000 iounmap vmlinux EXPORT_SYMBOL 0x00000000 cache_seq_next_rcu vmlinux EXPORT_SYMBOL_GPL @@ -2940,11 +2951,11 @@ 0x00000000 xa_load vmlinux EXPORT_SYMBOL 0x00000000 ucs2_as_utf8 vmlinux EXPORT_SYMBOL 0x00000000 set_cached_acl vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mmap_to_user vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_ops_ulong vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_find vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_fill vmlinux EXPORT_SYMBOL 0x00000000 v4l_printk_ioctl vmlinux EXPORT_SYMBOL -0x00000000 ath9k_cmn_init_crypto vmlinux EXPORT_SYMBOL 0x00000000 phy_package_join vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_i2c_encoder_commit vmlinux EXPORT_SYMBOL 0x00000000 tty_buffer_set_limit vmlinux EXPORT_SYMBOL_GPL @@ -3010,6 +3021,7 @@ 0x00000000 blk_mq_flush_busy_ctxs vmlinux EXPORT_SYMBOL_GPL 0x00000000 dcookie_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 locks_mandatory_area vmlinux EXPORT_SYMBOL +0x00000000 xnsynch_sleep_on vmlinux EXPORT_SYMBOL_GPL 0x00000000 xt_table_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 sock_alloc_send_pskb vmlinux EXPORT_SYMBOL 0x00000000 snd_soc_link_compr_shutdown vmlinux EXPORT_SYMBOL_GPL @@ -3067,13 +3079,13 @@ 0x00000000 crypto_shash_tfm_digest vmlinux EXPORT_SYMBOL_GPL 0x00000000 flow_rule_match_basic vmlinux EXPORT_SYMBOL 0x00000000 call_fib_notifiers vmlinux EXPORT_SYMBOL -0x00000000 dfs_pattern_detector_init vmlinux EXPORT_SYMBOL 0x00000000 kbase_free_phy_pages_helper_locked vmlinux EXPORT_SYMBOL 0x00000000 __drm_atomic_helper_connector_destroy_state vmlinux EXPORT_SYMBOL 0x00000000 devm_reset_controller_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 regulator_bulk_force_disable vmlinux EXPORT_SYMBOL_GPL 0x00000000 kstrdup_quotable_cmdline vmlinux EXPORT_SYMBOL_GPL 0x00000000 __kfifo_out_peek_r vmlinux EXPORT_SYMBOL +0x00000000 xnintr_affinity vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip6_frag_next vmlinux EXPORT_SYMBOL 0x00000000 netlink_ns_capable vmlinux EXPORT_SYMBOL 0x00000000 eth_commit_mac_addr_change vmlinux EXPORT_SYMBOL @@ -3166,6 +3178,7 @@ 0x00000000 fuse_dev_install vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_acl vmlinux EXPORT_SYMBOL 0x00000000 set_page_dirty_lock vmlinux EXPORT_SYMBOL +0x00000000 irq_pipeline_nmi_exit vmlinux EXPORT_SYMBOL 0x00000000 cache_unregister_net vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_v4_conn_request vmlinux EXPORT_SYMBOL 0x00000000 tcp_sock_set_keepidle vmlinux EXPORT_SYMBOL @@ -3186,7 +3199,6 @@ 0x00000000 tcp_v4_syn_recv_sock vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_vb2_v4l2_dqbuf vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_put_dev vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_loadnf vmlinux EXPORT_SYMBOL 0x00000000 dw_hdmi_qp_set_sample_rate vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_panel_init vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_link_rate_to_bw_code vmlinux EXPORT_SYMBOL @@ -3219,13 +3231,13 @@ 0x00000000 fd_install vmlinux EXPORT_SYMBOL 0x00000000 _raw_read_lock_bh vmlinux EXPORT_SYMBOL 0x00000000 nf_nat_packet vmlinux EXPORT_SYMBOL_GPL -0x00000000 rockchip_dmcfreq_lock_nested vmlinux EXPORT_SYMBOL 0x00000000 dm_bufio_get_block_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 __phy_resume vmlinux EXPORT_SYMBOL 0x00000000 spi_mem_get_name vmlinux EXPORT_SYMBOL_GPL 0x00000000 bio_devname vmlinux EXPORT_SYMBOL 0x00000000 vfs_fsync_range vmlinux EXPORT_SYMBOL 0x00000000 memory_cgrp_subsys vmlinux EXPORT_SYMBOL +0x00000000 cobalt_signal_send_pid vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_suspend_resume vmlinux EXPORT_SYMBOL_GPL 0x00000000 disable_irq vmlinux EXPORT_SYMBOL 0x00000000 bit_wait_io_timeout vmlinux EXPORT_SYMBOL_GPL @@ -3280,6 +3292,9 @@ 0x00000000 __sg_alloc_table_from_pages vmlinux EXPORT_SYMBOL 0x00000000 blk_finish_plug vmlinux EXPORT_SYMBOL 0x00000000 vmalloc_32_user vmlinux EXPORT_SYMBOL +0x00000000 rtdm_drv_set_sysclass vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_set_periodic vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnclock_core_read_monotonic vmlinux EXPORT_SYMBOL_GPL 0x00000000 rfkill_get_wifi_power_state vmlinux EXPORT_SYMBOL 0x00000000 ping_recvmsg vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_unregister_set_opp_helper vmlinux EXPORT_SYMBOL_GPL @@ -3319,8 +3334,6 @@ 0x00000000 rkcif_sditf_disconnect vmlinux EXPORT_SYMBOL 0x00000000 media_device_pci_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 usbnet_open vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_putrxbuf vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_puttxbuf vmlinux EXPORT_SYMBOL 0x00000000 pci_test_config_bits vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_atomic_add_encoder_bridges vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_dsc_sink_max_slice_count vmlinux EXPORT_SYMBOL @@ -3362,6 +3375,7 @@ 0x00000000 drm_atomic_set_crtc_for_plane vmlinux EXPORT_SYMBOL 0x00000000 splice_direct_to_actor vmlinux EXPORT_SYMBOL 0x00000000 do_traversal_all_lruvec vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_acquire vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_release_client vmlinux EXPORT_SYMBOL_GPL 0x00000000 br_fdb_find_port vmlinux EXPORT_SYMBOL_GPL 0x00000000 genl_lock vmlinux EXPORT_SYMBOL @@ -3378,10 +3392,8 @@ 0x00000000 unlock_buffer vmlinux EXPORT_SYMBOL 0x00000000 simple_transaction_release vmlinux EXPORT_SYMBOL 0x00000000 rcu_read_unlock_strict vmlinux EXPORT_SYMBOL_GPL -0x00000000 iwe_stream_add_value vmlinux EXPORT_SYMBOL 0x00000000 extcon_find_edev_by_node vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_abort_tuning vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_regd_find_country_by_name vmlinux EXPORT_SYMBOL 0x00000000 regmap_irq_get_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 subsys_find_device_by_id vmlinux EXPORT_SYMBOL_GPL 0x00000000 __devm_drm_dev_alloc vmlinux EXPORT_SYMBOL @@ -3446,7 +3458,6 @@ 0x00000000 sip_fiq_debugger_enable_fiq vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_erase vmlinux EXPORT_SYMBOL 0x00000000 cpufreq_quick_get vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_setantenna vmlinux EXPORT_SYMBOL 0x00000000 can_dlc2len vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_mode_equal vmlinux EXPORT_SYMBOL 0x00000000 percpu_counter_set vmlinux EXPORT_SYMBOL @@ -3467,6 +3478,8 @@ 0x00000000 radix_tree_gang_lookup_tag vmlinux EXPORT_SYMBOL 0x00000000 klist_add_head vmlinux EXPORT_SYMBOL_GPL 0x00000000 uuid_gen vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_timer_handler vmlinux EXPORT_SYMBOL_GPL +0x00000000 dovetail_context_switch vmlinux EXPORT_SYMBOL_GPL 0x00000000 flush_dcache_page vmlinux EXPORT_SYMBOL 0x00000000 snd_soc_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_querymenu vmlinux EXPORT_SYMBOL @@ -3491,7 +3504,6 @@ 0x00000000 dma_supported vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_sub_ev_ops vmlinux EXPORT_SYMBOL 0x00000000 usb_alloc_urb vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_releasetxqueue vmlinux EXPORT_SYMBOL 0x00000000 phy_driver_is_genphy vmlinux EXPORT_SYMBOL_GPL 0x00000000 spi_setup vmlinux EXPORT_SYMBOL_GPL 0x00000000 rockchip_connector_update_vfp_for_vrr vmlinux EXPORT_SYMBOL @@ -3499,6 +3511,7 @@ 0x00000000 serial8250_get_port vmlinux EXPORT_SYMBOL_GPL 0x00000000 phy_optional_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 inode_init_once vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 __xnclock_ratelimit vmlinux EXPORT_SYMBOL_GPL 0x00000000 uprobe_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 tracing_snapshot vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_change_overwrite vmlinux EXPORT_SYMBOL_GPL @@ -3516,6 +3529,7 @@ 0x00000000 raid6_datap_recov vmlinux EXPORT_SYMBOL_GPL 0x00000000 scsi_cmd_ioctl vmlinux EXPORT_SYMBOL 0x00000000 page_cache_async_ra vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_signal vmlinux EXPORT_SYMBOL_GPL 0x00000000 arm64_mm_context_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 arm64_mm_context_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_tx_mlme_mgmt vmlinux EXPORT_SYMBOL @@ -3545,13 +3559,13 @@ 0x00000000 clk_bulk_prepare vmlinux EXPORT_SYMBOL_GPL 0x00000000 fb_prepare_logo vmlinux EXPORT_SYMBOL 0x00000000 memset32 vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_ioctl vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_sched_overutilized_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 blocking_notifier_chain_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_is_element_inherited vmlinux EXPORT_SYMBOL 0x00000000 neigh_table_init vmlinux EXPORT_SYMBOL 0x00000000 rk_emmc_transfer vmlinux EXPORT_SYMBOL 0x00000000 mmc_register_driver vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_register_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 input_ff_effect_from_user vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcpm_cc_change vmlinux EXPORT_SYMBOL_GPL 0x00000000 mfd_cell_enable vmlinux EXPORT_SYMBOL @@ -3564,6 +3578,7 @@ 0x00000000 __traceiter_block_rq_issue vmlinux EXPORT_SYMBOL_GPL 0x00000000 acomp_request_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 sysfs_create_link vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_get_iov_flatlen vmlinux EXPORT_SYMBOL_GPL 0x00000000 task_active_pid_ns vmlinux EXPORT_SYMBOL_GPL 0x00000000 svc_set_client vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_fastopen_defer_connect vmlinux EXPORT_SYMBOL @@ -3595,6 +3610,7 @@ 0x00000000 mb_cache_entry_delete vmlinux EXPORT_SYMBOL 0x00000000 seq_release vmlinux EXPORT_SYMBOL 0x00000000 insert_inode_locked vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mmap_vmem vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_size vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 tick_broadcast_control vmlinux EXPORT_SYMBOL_GPL @@ -3630,6 +3646,8 @@ 0x00000000 pstore_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 fs_context_for_submount vmlinux EXPORT_SYMBOL 0x00000000 kfree_link vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mutex_unlock vmlinux EXPORT_SYMBOL_GPL +0x00000000 xenomai_personality vmlinux EXPORT_SYMBOL_GPL 0x00000000 up_write vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_pelt_cfs_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpu_all_bits vmlinux EXPORT_SYMBOL @@ -3660,6 +3678,7 @@ 0x00000000 jbd2_log_start_commit vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 remove_proc_subtree vmlinux EXPORT_SYMBOL 0x00000000 rcu_is_watching vmlinux EXPORT_SYMBOL_GPL +0x00000000 __hybrid_spin_lock_irq vmlinux EXPORT_SYMBOL 0x00000000 asoc_simple_startup vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_component_enable_pin vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_of_parse_card_name vmlinux EXPORT_SYMBOL_GPL @@ -3687,7 +3706,6 @@ 0x00000000 neigh_lookup_nodev vmlinux EXPORT_SYMBOL 0x00000000 v4l2_m2m_get_curr_priv vmlinux EXPORT_SYMBOL 0x00000000 usb_stor_disconnect vmlinux EXPORT_SYMBOL_GPL USB_STORAGE -0x00000000 ath_is_world_regd vmlinux EXPORT_SYMBOL 0x00000000 scsi_change_queue_depth vmlinux EXPORT_SYMBOL 0x00000000 fwnode_get_mac_address vmlinux EXPORT_SYMBOL 0x00000000 dw_pcie_read vmlinux EXPORT_SYMBOL_GPL @@ -3695,7 +3713,6 @@ 0x00000000 mpi_read_raw_from_sgl vmlinux EXPORT_SYMBOL_GPL 0x00000000 mpi_ec_deinit vmlinux EXPORT_SYMBOL_GPL 0x00000000 crc16 vmlinux EXPORT_SYMBOL -0x00000000 log_suspend_abort_reason vmlinux EXPORT_SYMBOL_GPL 0x00000000 sched_show_task vmlinux EXPORT_SYMBOL_GPL 0x00000000 work_on_cpu vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_nvmem_cell_put vmlinux EXPORT_SYMBOL @@ -3709,6 +3726,7 @@ 0x00000000 rhltable_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 bsearch vmlinux EXPORT_SYMBOL 0x00000000 blk_put_request vmlinux EXPORT_SYMBOL +0x00000000 rtdm_munmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 async_synchronize_full_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 dst_blackhole_update_pmtu vmlinux EXPORT_SYMBOL_GPL 0x00000000 sock_zerocopy_put_abort vmlinux EXPORT_SYMBOL_GPL @@ -3716,7 +3734,6 @@ 0x00000000 input_get_timestamp vmlinux EXPORT_SYMBOL 0x00000000 usb_serial_generic_throttle vmlinux EXPORT_SYMBOL_GPL 0x00000000 cdrom_mode_select vmlinux EXPORT_SYMBOL -0x00000000 ar9003_paprd_enable vmlinux EXPORT_SYMBOL 0x00000000 drm_syncobj_create vmlinux EXPORT_SYMBOL 0x00000000 ZSTD_compress_usingCDict vmlinux EXPORT_SYMBOL 0x00000000 __tracepoint_android_fs_datawrite_end vmlinux EXPORT_SYMBOL @@ -3738,17 +3755,18 @@ 0x00000000 rhashtable_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_limits_io_min vmlinux EXPORT_SYMBOL 0x00000000 sysfs_remove_files vmlinux EXPORT_SYMBOL_GPL +0x00000000 dovetail_start_altsched vmlinux EXPORT_SYMBOL_GPL 0x00000000 percpu_up_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_setbufsize vmlinux EXPORT_SYMBOL_GPL 0x00000000 unix_table_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 usbnet_purge_paused_rxq vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_set_gpio vmlinux EXPORT_SYMBOL 0x00000000 scsi_internal_device_unblock_nowait vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_hdmi_avi_infoframe_bars vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_downstream_444_to_420_conversion vmlinux EXPORT_SYMBOL 0x00000000 color_table vmlinux EXPORT_SYMBOL 0x00000000 devm_clk_hw_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 put_disk vmlinux EXPORT_SYMBOL +0x00000000 xnheap_vmalloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 add_timer vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_irq_handler_entry vmlinux EXPORT_SYMBOL_GPL 0x00000000 can_proto_register vmlinux EXPORT_SYMBOL @@ -3839,6 +3857,7 @@ 0x00000000 fuse_dequeue_forget vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_android_fs_dataread_start vmlinux EXPORT_SYMBOL 0x00000000 swapcache_free_entries vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_event_select vmlinux EXPORT_SYMBOL_GPL 0x00000000 clockevents_register_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 cache_check vmlinux EXPORT_SYMBOL_GPL 0x00000000 fib_rules_seq_read vmlinux EXPORT_SYMBOL_GPL @@ -3896,6 +3915,7 @@ 0x00000000 jbd2_journal_unlock_updates vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 sysctl_vfs_cache_pressure vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_mm_vmscan_direct_reclaim_end vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_unregister_personality vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_sync_single_for_cpu vmlinux EXPORT_SYMBOL 0x00000000 panic vmlinux EXPORT_SYMBOL 0x00000000 xprt_wake_up_backlog vmlinux EXPORT_SYMBOL_GPL @@ -3911,12 +3931,12 @@ 0x00000000 __traceiter_nfs_fsync_enter vmlinux EXPORT_SYMBOL_GPL 0x00000000 __kmalloc vmlinux EXPORT_SYMBOL 0x00000000 __vmalloc vmlinux EXPORT_SYMBOL +0x00000000 xnintr_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 xdr_truncate_encode vmlinux EXPORT_SYMBOL 0x00000000 tcp_set_keepalive vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_of_get_sharing_cpus vmlinux EXPORT_SYMBOL_GPL 0x00000000 ohci_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 usbnet_get_ethernet_addr vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_deinit vmlinux EXPORT_SYMBOL 0x00000000 ata_sas_port_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_property_replace_blob vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_bridge_chain_enable vmlinux EXPORT_SYMBOL @@ -3939,6 +3959,7 @@ 0x00000000 pci_find_ht_capability vmlinux EXPORT_SYMBOL_GPL 0x00000000 pci_find_next_ht_capability vmlinux EXPORT_SYMBOL_GPL 0x00000000 gf128mul_init_64k_bbe vmlinux EXPORT_SYMBOL +0x00000000 __xntimer_migrate vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_find_vendor_elem vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_ie_split_ric vmlinux EXPORT_SYMBOL 0x00000000 xt_find_match vmlinux EXPORT_SYMBOL @@ -3978,7 +3999,6 @@ 0x00000000 of_translate_address vmlinux EXPORT_SYMBOL 0x00000000 power_supply_property_is_writeable vmlinux EXPORT_SYMBOL_GPL 0x00000000 cec_unregister_adapter vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_hw_disable_phy_restart vmlinux EXPORT_SYMBOL 0x00000000 drm_encoder_cleanup vmlinux EXPORT_SYMBOL 0x00000000 rockchip_vdpu2_driver vmlinux EXPORT_SYMBOL 0x00000000 rockchip_vdpu1_driver vmlinux EXPORT_SYMBOL @@ -4093,6 +4113,7 @@ 0x00000000 pwmchip_remove vmlinux EXPORT_SYMBOL_GPL 0x00000000 simple_nosetlease vmlinux EXPORT_SYMBOL 0x00000000 mount_nodev vmlinux EXPORT_SYMBOL +0x00000000 rtdm_irq_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_find_acq vmlinux EXPORT_SYMBOL 0x00000000 nf_nat_ipv6_unregister_fn vmlinux EXPORT_SYMBOL_GPL 0x00000000 eth_mac_addr vmlinux EXPORT_SYMBOL @@ -4113,7 +4134,6 @@ 0x00000000 allocate_resource vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_channel_to_freq_khz vmlinux EXPORT_SYMBOL 0x00000000 inet6addr_notifier_call_chain vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 phy_10gbit_features vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_schedule_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 clk_bulk_get_all vmlinux EXPORT_SYMBOL @@ -4204,8 +4224,6 @@ 0x00000000 mbox_client_peek_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 cec_s_log_addrs vmlinux EXPORT_SYMBOL_GPL 0x00000000 rndis_set_host_mac vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_regd_init vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_wait vmlinux EXPORT_SYMBOL 0x00000000 unregister_mtd_chip_driver vmlinux EXPORT_SYMBOL 0x00000000 regulator_has_full_constraints vmlinux EXPORT_SYMBOL_GPL 0x00000000 gpiod_get_raw_array_value_cansleep vmlinux EXPORT_SYMBOL_GPL @@ -4277,6 +4295,8 @@ 0x00000000 crypto_unregister_acomps vmlinux EXPORT_SYMBOL_GPL 0x00000000 key_link vmlinux EXPORT_SYMBOL 0x00000000 read_cache_page vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_put_iovec vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_get_iovec vmlinux EXPORT_SYMBOL_GPL 0x00000000 tick_broadcast_oneshot_control vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_unexpedite_gp vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_pelt_irq_tp vmlinux EXPORT_SYMBOL_GPL @@ -4302,13 +4322,13 @@ 0x00000000 sbitmap_queue_wake_all vmlinux EXPORT_SYMBOL_GPL 0x00000000 nla_put_nohdr vmlinux EXPORT_SYMBOL 0x00000000 copy_to_user_nofault vmlinux EXPORT_SYMBOL_GPL +0x00000000 __rtdm_synch_flush vmlinux EXPORT_SYMBOL_GPL 0x00000000 pid_nr_ns vmlinux EXPORT_SYMBOL_GPL 0x00000000 bt_sock_unregister vmlinux EXPORT_SYMBOL 0x00000000 napi_gro_flush vmlinux EXPORT_SYMBOL 0x00000000 sip_fiq_control vmlinux EXPORT_SYMBOL_GPL 0x00000000 power_supply_temp2resist_simple vmlinux EXPORT_SYMBOL_GPL 0x00000000 rkcif_plat_drv vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_setuptxqueue vmlinux EXPORT_SYMBOL 0x00000000 nanddev_bbt_in_flash_update vmlinux EXPORT_SYMBOL_GPL 0x00000000 ahci_platform_suspend_host vmlinux EXPORT_SYMBOL_GPL 0x00000000 scsi_set_medium_removal vmlinux EXPORT_SYMBOL @@ -4343,7 +4363,6 @@ 0x00000000 nf_ct_helper_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 sdhci_pltfm_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 vb2_ops_wait_finish vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_set_sta_beacon_timers vmlinux EXPORT_SYMBOL 0x00000000 midgard__mali_profiling_control vmlinux EXPORT_SYMBOL 0x00000000 tpm_is_tpm2 vmlinux EXPORT_SYMBOL_GPL 0x00000000 sysrq_mask vmlinux EXPORT_SYMBOL_GPL @@ -4362,6 +4381,8 @@ 0x00000000 add_device_randomness vmlinux EXPORT_SYMBOL 0x00000000 fb_find_nearest_mode vmlinux EXPORT_SYMBOL 0x00000000 gpiod_get_array vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_clock_register vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnheap_check_block vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_put_event_file vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_count_pfkey_auth_supported vmlinux EXPORT_SYMBOL_GPL 0x00000000 secure_tcpv6_seq vmlinux EXPORT_SYMBOL @@ -4435,7 +4456,6 @@ 0x00000000 snd_pcm_stop_xrun vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_hwdep_new vmlinux EXPORT_SYMBOL 0x00000000 ffs_lock vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_rx_accept vmlinux EXPORT_SYMBOL 0x00000000 kbase_csf_scheduler_pm_suspend vmlinux EXPORT_SYMBOL 0x00000000 regulator_suspend_disable vmlinux EXPORT_SYMBOL_GPL 0x00000000 pci_bus_resource_n vmlinux EXPORT_SYMBOL_GPL @@ -4444,6 +4464,7 @@ 0x00000000 bio_free_pages vmlinux EXPORT_SYMBOL 0x00000000 __ksize vmlinux EXPORT_SYMBOL 0x00000000 down_interruptible vmlinux EXPORT_SYMBOL +0x00000000 dovetail_resume_inband vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_abort vmlinux EXPORT_SYMBOL_GPL 0x00000000 netif_carrier_off vmlinux EXPORT_SYMBOL 0x00000000 mmc_request_done vmlinux EXPORT_SYMBOL @@ -4478,7 +4499,6 @@ 0x00000000 inet_csk_prepare_forced_close vmlinux EXPORT_SYMBOL 0x00000000 cqhci_pltfm_init vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_vb2_v4l2_buf_done vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_set_weight vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_mali_page_fault_insert_pages vmlinux EXPORT_SYMBOL_GPL 0x00000000 dw_hdmi_phy_setup_hpd vmlinux EXPORT_SYMBOL_GPL 0x00000000 __rb_erase_color vmlinux EXPORT_SYMBOL @@ -4518,6 +4538,7 @@ 0x00000000 filp_open vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 check_cache_active vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_clock_jiffies vmlinux EXPORT_SYMBOL_GPL +0x00000000 inband_irq_disable vmlinux EXPORT_SYMBOL 0x00000000 smpboot_register_percpu_thread vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_calg_get_byname vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_vs_tcp_conn_listen vmlinux EXPORT_SYMBOL @@ -4630,7 +4651,6 @@ 0x00000000 sdio_register_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_kill_urb vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_get_intf vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_stopdmarecv vmlinux EXPORT_SYMBOL 0x00000000 dev_pm_genpd_set_performance_state vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_crtc_vblank_reset vmlinux EXPORT_SYMBOL 0x00000000 regulator_get_voltage_rdev vmlinux EXPORT_SYMBOL_GPL @@ -4664,6 +4684,7 @@ 0x00000000 skcipher_walk_complete vmlinux EXPORT_SYMBOL_GPL 0x00000000 crypto_remove_spawns vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_empty_cpu vmlinux EXPORT_SYMBOL_GPL +0x00000000 irq_post_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip6_input vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip6tun_encaps vmlinux EXPORT_SYMBOL 0x00000000 xt_replace_table vmlinux EXPORT_SYMBOL_GPL @@ -4674,6 +4695,7 @@ 0x00000000 pci_free_host_bridge vmlinux EXPORT_SYMBOL 0x00000000 __percpu_counter_init vmlinux EXPORT_SYMBOL 0x00000000 get_user_pages_remote vmlinux EXPORT_SYMBOL +0x00000000 __cobalt_sigqueue vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_get_domain_generic_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_set_affinity_notifier vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_hashinfo_init vmlinux EXPORT_SYMBOL_GPL @@ -4707,6 +4729,7 @@ 0x00000000 aes_encrypt vmlinux EXPORT_SYMBOL 0x00000000 sg_miter_next vmlinux EXPORT_SYMBOL 0x00000000 zero_pfn vmlinux EXPORT_SYMBOL +0x00000000 xnintr_disable vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_get_ushort vmlinux EXPORT_SYMBOL 0x00000000 svc_sock_update_bufs vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_sock_set_cork vmlinux EXPORT_SYMBOL @@ -4791,6 +4814,7 @@ 0x00000000 hid_setup_resolution_multiplier vmlinux EXPORT_SYMBOL_GPL 0x00000000 ata_do_dev_read_id vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_csf_firmware_csg_output vmlinux EXPORT_SYMBOL +0x00000000 rtdm_irq_request_affine vmlinux EXPORT_SYMBOL_GPL 0x00000000 __srcu_read_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_domain_alloc_irqs_parent vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_sock_set_pktinfo vmlinux EXPORT_SYMBOL @@ -4799,7 +4823,6 @@ 0x00000000 nvmem_add_cell_table vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_clk_get vmlinux EXPORT_SYMBOL 0x00000000 usb_function_unregister vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_mci_send_message vmlinux EXPORT_SYMBOL 0x00000000 dma_fence_init vmlinux EXPORT_SYMBOL 0x00000000 __drm_atomic_helper_set_config vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_block_split vmlinux EXPORT_SYMBOL_GPL @@ -4810,7 +4833,6 @@ 0x00000000 mr_vif_seq_idx vmlinux EXPORT_SYMBOL 0x00000000 dev_mc_add_excl vmlinux EXPORT_SYMBOL 0x00000000 dev_uc_add_excl vmlinux EXPORT_SYMBOL -0x00000000 rockchip_dmcfreq_unlock vmlinux EXPORT_SYMBOL 0x00000000 media_request_object_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_bus_type vmlinux EXPORT_SYMBOL_GPL 0x00000000 spi_mem_adjust_op_size vmlinux EXPORT_SYMBOL_GPL @@ -4861,6 +4883,7 @@ 0x00000000 blk_queue_io_min vmlinux EXPORT_SYMBOL 0x00000000 nfs_writeback_update_inode vmlinux EXPORT_SYMBOL_GPL 0x00000000 nobh_truncate_page vmlinux EXPORT_SYMBOL +0x00000000 __rtdm_nrtsig_execute vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_generic_frame_duration vmlinux EXPORT_SYMBOL 0x00000000 xprt_lock_connect vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_unregister_dai vmlinux EXPORT_SYMBOL_GPL @@ -4909,7 +4932,6 @@ 0x00000000 __skb_free_datagram_locked vmlinux EXPORT_SYMBOL 0x00000000 lock_sock_fast vmlinux EXPORT_SYMBOL 0x00000000 sdhci_resume_host vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_disable_mib_counters vmlinux EXPORT_SYMBOL 0x00000000 cn_add_callback vmlinux EXPORT_SYMBOL_GPL 0x00000000 alloc_iova_fast vmlinux EXPORT_SYMBOL_GPL 0x00000000 ilookup vmlinux EXPORT_SYMBOL @@ -4999,8 +5021,10 @@ 0x00000000 cpufreq_boost_enabled vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_get_hcd vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_ht_find_item vmlinux EXPORT_SYMBOL +0x00000000 fbcon_set_bitops vmlinux EXPORT_SYMBOL 0x00000000 blk_update_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 jbd2_journal_put_journal_head vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 __oob_irq_restore vmlinux EXPORT_SYMBOL 0x00000000 __wake_up_bit vmlinux EXPORT_SYMBOL 0x00000000 get_task_cred vmlinux EXPORT_SYMBOL 0x00000000 xdr_encode_array2 vmlinux EXPORT_SYMBOL_GPL @@ -5071,7 +5095,6 @@ 0x00000000 udp_tunnel6_xmit_skb vmlinux EXPORT_SYMBOL_GPL 0x00000000 flow_block_cb_alloc vmlinux EXPORT_SYMBOL 0x00000000 sysctl_max_skb_frags vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_phy_disable vmlinux EXPORT_SYMBOL 0x00000000 mtd_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_resv_test_signaled_rcu vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_gem_object_free vmlinux EXPORT_SYMBOL @@ -5169,7 +5192,6 @@ 0x00000000 param_get_int vmlinux EXPORT_SYMBOL 0x00000000 skb_ext_add vmlinux EXPORT_SYMBOL 0x00000000 typec_set_mode vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_process_rxdesc_edma vmlinux EXPORT_SYMBOL 0x00000000 vxlan_dev_create vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_buf_dynamic_attach vmlinux EXPORT_SYMBOL_GPL 0x00000000 start_tty vmlinux EXPORT_SYMBOL @@ -5178,6 +5200,7 @@ 0x00000000 shash_ahash_finup vmlinux EXPORT_SYMBOL_GPL 0x00000000 user_update vmlinux EXPORT_SYMBOL_GPL 0x00000000 configfs_unregister_group vmlinux EXPORT_SYMBOL +0x00000000 xnvfile_init_regular vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_with_offset vmlinux EXPORT_SYMBOL_GPL 0x00000000 resched_curr vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_ct_get_tuplepr vmlinux EXPORT_SYMBOL_GPL @@ -5235,6 +5258,7 @@ 0x00000000 radix_tree_gang_lookup_tag_slot vmlinux EXPORT_SYMBOL 0x00000000 fuse_file_poll vmlinux EXPORT_SYMBOL_GPL 0x00000000 mnt_want_write_file vmlinux EXPORT_SYMBOL_GPL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 rtdm_sem_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 pause_cpus vmlinux EXPORT_SYMBOL_GPL 0x00000000 hci_register_cb vmlinux EXPORT_SYMBOL 0x00000000 ip_ct_attach vmlinux EXPORT_SYMBOL @@ -5252,7 +5276,6 @@ 0x00000000 snd_pcm_fill_iec958_consumer vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_fwnode_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_get_opp_count vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_gettsf32 vmlinux EXPORT_SYMBOL 0x00000000 kbase_pm_disable_interrupts vmlinux EXPORT_SYMBOL 0x00000000 regulator_set_soft_start_regmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 regulator_get_error_flags vmlinux EXPORT_SYMBOL_GPL @@ -5261,10 +5284,8 @@ 0x00000000 svc_seq_show vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_br_fdb_update vmlinux EXPORT_SYMBOL_GPL 0x00000000 asoc_simple_canonicalize_platform vmlinux EXPORT_SYMBOL_GPL -0x00000000 od_unregister_powersave_bias_handler vmlinux EXPORT_SYMBOL_GPL 0x00000000 vb2_core_queue_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_amd_prefetch_quirk vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_key_delete vmlinux EXPORT_SYMBOL 0x00000000 scsi_free_host_dev vmlinux EXPORT_SYMBOL 0x00000000 pm_print_active_wakeup_sources vmlinux EXPORT_SYMBOL_GPL 0x00000000 driver_register vmlinux EXPORT_SYMBOL_GPL @@ -5288,7 +5309,6 @@ 0x00000000 of_dma_is_coherent vmlinux EXPORT_SYMBOL_GPL 0x00000000 input_ff_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_wwan_chars_in_buffer vmlinux EXPORT_SYMBOL -0x00000000 ath_is_49ghz_allowed vmlinux EXPORT_SYMBOL 0x00000000 mfd_cell_disable vmlinux EXPORT_SYMBOL 0x00000000 platform_get_irq_byname vmlinux EXPORT_SYMBOL_GPL 0x00000000 ZSTD_decompressContinue vmlinux EXPORT_SYMBOL @@ -5309,6 +5329,7 @@ 0x00000000 scsi_verify_blk_ioctl vmlinux EXPORT_SYMBOL 0x00000000 configfs_register_group vmlinux EXPORT_SYMBOL 0x00000000 pagecache_isize_extended vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 __warn_printk vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_beacon_get_tim vmlinux EXPORT_SYMBOL 0x00000000 ipv6_dup_options vmlinux EXPORT_SYMBOL_GPL @@ -5324,7 +5345,6 @@ 0x00000000 ieee80211_tkip_add_iv vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_conntrack_locks vmlinux EXPORT_SYMBOL_GPL 0x00000000 iio_read_channel_offset vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_hw_setbssidmask vmlinux EXPORT_SYMBOL 0x00000000 configfs_remove_default_groups vmlinux EXPORT_SYMBOL 0x00000000 zap_vma_ptes vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_pelt_rt_tp vmlinux EXPORT_SYMBOL_GPL @@ -5349,8 +5369,6 @@ 0x00000000 rpc_init_pipe_dir_object vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_mc_leave_group vmlinux EXPORT_SYMBOL 0x00000000 netif_skb_features vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_settsf64 vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_gettsf64 vmlinux EXPORT_SYMBOL 0x00000000 pm_runtime_set_memalloc_noio vmlinux EXPORT_SYMBOL_GPL 0x00000000 iommu_fwspec_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_gpiod_get_array vmlinux EXPORT_SYMBOL_GPL @@ -5410,7 +5428,6 @@ 0x00000000 dev_addr_flush vmlinux EXPORT_SYMBOL 0x00000000 nvmem_device_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 thermal_notify_framework vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_init_global_settings vmlinux EXPORT_SYMBOL 0x00000000 alloc_can_skb vmlinux EXPORT_SYMBOL_GPL 0x00000000 recover_lost_locks vmlinux EXPORT_SYMBOL_GPL 0x00000000 register_nfs_version vmlinux EXPORT_SYMBOL_GPL @@ -5472,13 +5489,13 @@ 0x00000000 usb_wwan_write vmlinux EXPORT_SYMBOL 0x00000000 usb_unlink_anchored_urbs vmlinux EXPORT_SYMBOL_GPL 0x00000000 usbnet_write_cmd vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_getchan_noise vmlinux EXPORT_SYMBOL 0x00000000 xas_nomem vmlinux EXPORT_SYMBOL_GPL 0x00000000 zlib_inflateEnd vmlinux EXPORT_SYMBOL 0x00000000 linear_range_values_in_range_array vmlinux EXPORT_SYMBOL_GPL 0x00000000 __blkdev_issue_discard vmlinux EXPORT_SYMBOL 0x00000000 __tracepoint_task_rename vmlinux EXPORT_SYMBOL_GPL 0x00000000 contig_page_data vmlinux EXPORT_SYMBOL +0x00000000 xnselect_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_num_bc_slots vmlinux EXPORT_SYMBOL_GPL 0x00000000 xt_check_proc_name vmlinux EXPORT_SYMBOL 0x00000000 xt_unregister_target vmlinux EXPORT_SYMBOL @@ -5511,6 +5528,7 @@ 0x00000000 part_end_io_acct vmlinux EXPORT_SYMBOL_GPL 0x00000000 simple_transaction_read vmlinux EXPORT_SYMBOL 0x00000000 mempool_free_slab vmlinux EXPORT_SYMBOL +0x00000000 xnthread_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 tick_nohz_get_sleep_length vmlinux EXPORT_SYMBOL_GPL 0x00000000 __memset_io vmlinux EXPORT_SYMBOL 0x00000000 snd_ctl_register_ioctl_compat vmlinux EXPORT_SYMBOL @@ -5551,7 +5569,6 @@ 0x00000000 snd_dmaengine_pcm_close vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_table_event vmlinux EXPORT_SYMBOL 0x00000000 devm_watchdog_register_device vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_abort_tx_dma vmlinux EXPORT_SYMBOL 0x00000000 spi_controller_dma_map_mem_op_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 ata_pack_xfermask vmlinux EXPORT_SYMBOL_GPL 0x00000000 class_dev_iter_next vmlinux EXPORT_SYMBOL_GPL @@ -5596,6 +5613,7 @@ 0x00000000 noop_direct_IO vmlinux EXPORT_SYMBOL_GPL 0x00000000 vm_unmapped_area vmlinux EXPORT_SYMBOL_GPL 0x00000000 shmem_file_setup_with_mnt vmlinux EXPORT_SYMBOL_GPL +0x00000000 dovetail_init_altsched vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_force_quiescent_state vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_domain_push_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_make_synack vmlinux EXPORT_SYMBOL @@ -5670,6 +5688,7 @@ 0x00000000 __tracepoint_block_rq_remap vmlinux EXPORT_SYMBOL_GPL 0x00000000 proc_mkdir_mode vmlinux EXPORT_SYMBOL 0x00000000 tag_pages_for_writeback vmlinux EXPORT_SYMBOL +0x00000000 xnregistry_bind vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_check_station_change vmlinux EXPORT_SYMBOL 0x00000000 rpcauth_destroy_credcache vmlinux EXPORT_SYMBOL_GPL 0x00000000 flow_indr_dev_register vmlinux EXPORT_SYMBOL @@ -5690,7 +5709,6 @@ 0x00000000 cfg80211_assoc_timeout vmlinux EXPORT_SYMBOL 0x00000000 __napi_alloc_skb vmlinux EXPORT_SYMBOL 0x00000000 usb_disable_xhci_ports vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_hw_get_listen_time vmlinux EXPORT_SYMBOL 0x00000000 scsi_report_opcode vmlinux EXPORT_SYMBOL 0x00000000 pm_clk_create vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_bridge_chain_enable vmlinux EXPORT_SYMBOL @@ -5713,6 +5731,7 @@ 0x00000000 nfs_pageio_init_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 sysfs_unbreak_active_protection vmlinux EXPORT_SYMBOL_GPL 0x00000000 __mnt_is_readonly vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_task_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_cpu_idle vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_close vmlinux EXPORT_SYMBOL 0x00000000 inet_add_offload vmlinux EXPORT_SYMBOL @@ -5723,7 +5742,6 @@ 0x00000000 get_thermal_instance vmlinux EXPORT_SYMBOL 0x00000000 config_ep_by_speed_and_alt vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_register_dev vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_mci_get_interrupt vmlinux EXPORT_SYMBOL 0x00000000 scsi_dma_map vmlinux EXPORT_SYMBOL 0x00000000 bprintf vmlinux EXPORT_SYMBOL_GPL 0x00000000 xz_dec_init vmlinux EXPORT_SYMBOL @@ -5738,7 +5756,6 @@ 0x00000000 ping_getfrag vmlinux EXPORT_SYMBOL_GPL 0x00000000 skb_csum_hwoffload_help vmlinux EXPORT_SYMBOL 0x00000000 of_root vmlinux EXPORT_SYMBOL -0x00000000 ath9k_cmn_get_channel vmlinux EXPORT_SYMBOL 0x00000000 __tracepoint_mali_pm_status vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_edid_duplicate vmlinux EXPORT_SYMBOL 0x00000000 __drm_mm_interval_first vmlinux EXPORT_SYMBOL @@ -5748,6 +5765,8 @@ 0x00000000 register_framebuffer vmlinux EXPORT_SYMBOL 0x00000000 crypto_unregister_templates vmlinux EXPORT_SYMBOL_GPL 0x00000000 migrate_page_states vmlinux EXPORT_SYMBOL +0x00000000 xnsynch_init vmlinux EXPORT_SYMBOL_GPL +0x00000000 ___xnlock_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_pelt_irq_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_pelt_cfs_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_sk_storage_diag_put vmlinux EXPORT_SYMBOL_GPL @@ -5780,7 +5799,6 @@ 0x00000000 nfs_debug vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_replay_seqhi vmlinux EXPORT_SYMBOL 0x00000000 tcp_enter_cwr vmlinux EXPORT_SYMBOL -0x00000000 rockchip_dmcfreq_vop_bandwidth_init vmlinux EXPORT_SYMBOL 0x00000000 hidraw_report_event vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_bitset_del vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_dv_timings_aspect_ratio vmlinux EXPORT_SYMBOL_GPL @@ -5790,6 +5808,7 @@ 0x00000000 mtd_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 nvme_put_ns vmlinux EXPORT_SYMBOL_GPL NVME_TARGET_PASSTHRU 0x00000000 sbitmap_del_wait_queue vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_thread_find_local vmlinux EXPORT_SYMBOL_GPL 0x00000000 memunmap vmlinux EXPORT_SYMBOL 0x00000000 memremap vmlinux EXPORT_SYMBOL 0x00000000 call_rcu vmlinux EXPORT_SYMBOL_GPL @@ -5827,6 +5846,7 @@ 0x00000000 drm_client_framebuffer_delete vmlinux EXPORT_SYMBOL 0x00000000 find_next_zero_bit vmlinux EXPORT_SYMBOL 0x00000000 inc_nlink vmlinux EXPORT_SYMBOL +0x00000000 xnthread_relax vmlinux EXPORT_SYMBOL_GPL 0x00000000 smpboot_unregister_percpu_thread vmlinux EXPORT_SYMBOL_GPL 0x00000000 wiphy_free vmlinux EXPORT_SYMBOL 0x00000000 __netlink_dump_start vmlinux EXPORT_SYMBOL @@ -5840,6 +5860,7 @@ 0x00000000 mnt_drop_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 vmf_insert_pfn_prot vmlinux EXPORT_SYMBOL 0x00000000 __alloc_percpu_gfp vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_get_setsockaddr_args vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_uaddr2sockaddr vmlinux EXPORT_SYMBOL_GPL 0x00000000 xt_compat_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpufreq_freq_transition_begin vmlinux EXPORT_SYMBOL_GPL @@ -5858,7 +5879,6 @@ 0x00000000 perf_event_pause vmlinux EXPORT_SYMBOL_GPL 0x00000000 synchronize_srcu_expedited vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_cong_avoid_ai vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpuidle_resume_and_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_vb2_qbuf vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_altnum_to_altsetting vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_clk_runtime_resume vmlinux EXPORT_SYMBOL_GPL @@ -5869,6 +5889,7 @@ 0x00000000 nfs_close_context vmlinux EXPORT_SYMBOL_GPL 0x00000000 follow_down vmlinux EXPORT_SYMBOL 0x00000000 filemap_write_and_wait_range vmlinux EXPORT_SYMBOL +0x00000000 xnintr_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_rpm_resume vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_rpm_idle vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_cpu_idle vmlinux EXPORT_SYMBOL_GPL @@ -5915,7 +5936,6 @@ 0x00000000 sip_smc_request_share_mem vmlinux EXPORT_SYMBOL_GPL 0x00000000 thermal_cooling_device_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_dwc3_complete_trb vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_gen_timer_free vmlinux EXPORT_SYMBOL 0x00000000 drm_framebuffer_plane_width vmlinux EXPORT_SYMBOL 0x00000000 drm_gem_dmabuf_vunmap vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_connector_reset vmlinux EXPORT_SYMBOL @@ -5934,6 +5954,7 @@ 0x00000000 pcie_update_link_speed vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_mq_start_request vmlinux EXPORT_SYMBOL 0x00000000 nfs_init_cinfo vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_clock_deregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 hci_suspend_dev vmlinux EXPORT_SYMBOL 0x00000000 tcp_v4_send_check vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_expect_find_get vmlinux EXPORT_SYMBOL_GPL @@ -5988,7 +6009,6 @@ 0x00000000 rockchip_cpufreq_opp_set_rate vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_set_clkname vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_usb_gadget_connect vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_set_interrupts vmlinux EXPORT_SYMBOL 0x00000000 __tracepoint_spi_transfer_start vmlinux EXPORT_SYMBOL 0x00000000 ahci_reset_controller vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_resv_get_singleton vmlinux EXPORT_SYMBOL_GPL @@ -6062,7 +6082,6 @@ 0x00000000 perf_aux_output_end vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_prog_select_runtime vmlinux EXPORT_SYMBOL_GPL 0x00000000 kmsg_dump_unregister vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpu_latency_qos_add_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 override_creds vmlinux EXPORT_SYMBOL 0x00000000 system_long_wq vmlinux EXPORT_SYMBOL_GPL 0x00000000 __ip_queue_xmit vmlinux EXPORT_SYMBOL @@ -6095,6 +6114,8 @@ 0x00000000 __bio_clone_fast vmlinux EXPORT_SYMBOL 0x00000000 kern_unmount vmlinux EXPORT_SYMBOL 0x00000000 total_swapcache_pages vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_join vmlinux EXPORT_SYMBOL_GPL +0x00000000 nksched vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_vs_proto_get vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_br_fdb_external_learn_add vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_ctrl_query_fill vmlinux EXPORT_SYMBOL @@ -6138,6 +6159,7 @@ 0x00000000 nfs4_schedule_lease_recovery vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_android_fs_datawrite_start vmlinux EXPORT_SYMBOL 0x00000000 kvrealloc vmlinux EXPORT_SYMBOL +0x00000000 xnregistry_vfreg_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_sta_opmode_change_notify vmlinux EXPORT_SYMBOL 0x00000000 __xfrm_state_destroy vmlinux EXPORT_SYMBOL 0x00000000 snd_soc_lookup_component vmlinux EXPORT_SYMBOL_GPL @@ -6149,6 +6171,7 @@ 0x00000000 pm_genpd_remove_subdomain vmlinux EXPORT_SYMBOL_GPL 0x00000000 tty_port_init vmlinux EXPORT_SYMBOL 0x00000000 complete_request_key vmlinux EXPORT_SYMBOL +0x00000000 cobalt_machine_cpudata vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_sched_util_est_cfs_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 panic_timeout vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_queue vmlinux EXPORT_SYMBOL_GPL @@ -6156,7 +6179,6 @@ 0x00000000 extcon_dev_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 extcon_set_state_sync vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_hcd_resume_root_hub vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_paprd_init_table vmlinux EXPORT_SYMBOL 0x00000000 ZSTD_initDStream vmlinux EXPORT_SYMBOL 0x00000000 ZSTD_initCStream vmlinux EXPORT_SYMBOL 0x00000000 aead_init_geniv vmlinux EXPORT_SYMBOL_GPL @@ -6164,7 +6186,6 @@ 0x00000000 jbd2_journal_grab_journal_head vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 noop_fsync vmlinux EXPORT_SYMBOL 0x00000000 list_lru_destroy vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpu_latency_qos_update_request vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_ibss_joined vmlinux EXPORT_SYMBOL 0x00000000 rpc_count_iostats vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_destroy_pipe_data vmlinux EXPORT_SYMBOL_GPL @@ -6178,6 +6199,7 @@ 0x00000000 crypto_hash_walk_first vmlinux EXPORT_SYMBOL_GPL 0x00000000 alloc_anon_inode vmlinux EXPORT_SYMBOL 0x00000000 vfs_mkobj vmlinux EXPORT_SYMBOL +0x00000000 tick_notify_proxy vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_sendpage vmlinux EXPORT_SYMBOL 0x00000000 dev_set_allmulti vmlinux EXPORT_SYMBOL 0x00000000 dev_get_by_index_rcu vmlinux EXPORT_SYMBOL @@ -6191,6 +6213,8 @@ 0x00000000 xxh32_digest vmlinux EXPORT_SYMBOL 0x00000000 __bitmap_intersects vmlinux EXPORT_SYMBOL 0x00000000 ksize vmlinux EXPORT_SYMBOL +0x00000000 xnvfile_get_string vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_killall vmlinux EXPORT_SYMBOL_GPL 0x00000000 kstat vmlinux EXPORT_SYMBOL 0x00000000 reg_query_regdb_wmm vmlinux EXPORT_SYMBOL 0x00000000 hci_conn_security vmlinux EXPORT_SYMBOL @@ -6275,6 +6299,7 @@ 0x00000000 sg_free_table vmlinux EXPORT_SYMBOL 0x00000000 gf128mul_free_64k vmlinux EXPORT_SYMBOL 0x00000000 locks_remove_posix vmlinux EXPORT_SYMBOL +0x00000000 registry_obj_slots vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_iter_reset vmlinux EXPORT_SYMBOL_GPL 0x00000000 __skb_recv_udp vmlinux EXPORT_SYMBOL 0x00000000 __phy_read_mmd vmlinux EXPORT_SYMBOL @@ -6346,6 +6371,7 @@ 0x00000000 key_instantiate_and_link vmlinux EXPORT_SYMBOL 0x00000000 nfs_check_flags vmlinux EXPORT_SYMBOL_GPL 0x00000000 dentry_path_raw vmlinux EXPORT_SYMBOL +0x00000000 __hybrid_spin_lock_irqsave vmlinux EXPORT_SYMBOL 0x00000000 ipv6_sock_mc_drop vmlinux EXPORT_SYMBOL 0x00000000 xfrm_sad_getinfo vmlinux EXPORT_SYMBOL 0x00000000 nf_logger_find_get vmlinux EXPORT_SYMBOL_GPL @@ -6361,6 +6387,7 @@ 0x00000000 simple_open vmlinux EXPORT_SYMBOL 0x00000000 single_open vmlinux EXPORT_SYMBOL 0x00000000 __page_file_mapping vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnclock_apply_offset vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm6_find_1stfragopt vmlinux EXPORT_SYMBOL 0x00000000 snd_request_card vmlinux EXPORT_SYMBOL 0x00000000 of_graph_get_port_parent vmlinux EXPORT_SYMBOL @@ -6407,6 +6434,8 @@ 0x00000000 rockchip_nvmem_cell_read_u8 vmlinux EXPORT_SYMBOL 0x00000000 dmaengine_desc_set_metadata_len vmlinux EXPORT_SYMBOL_GPL 0x00000000 utf32_to_utf8 vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mutex_destroy vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_task_busy_sleep vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_trace_remove vmlinux EXPORT_SYMBOL_GPL 0x00000000 cleanup_srcu_struct vmlinux EXPORT_SYMBOL_GPL 0x00000000 groups_free vmlinux EXPORT_SYMBOL @@ -6421,17 +6450,18 @@ 0x00000000 gpiod_set_transitory vmlinux EXPORT_SYMBOL_GPL 0x00000000 strreplace vmlinux EXPORT_SYMBOL 0x00000000 gf128mul_64k_bbe vmlinux EXPORT_SYMBOL +0x00000000 cobalt_add_state_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_stop_rx_ba_session vmlinux EXPORT_SYMBOL 0x00000000 inet6addr_validator_notifier_call_chain vmlinux EXPORT_SYMBOL 0x00000000 ip_sock_set_recverr vmlinux EXPORT_SYMBOL 0x00000000 ip_vs_scheduler_err vmlinux EXPORT_SYMBOL 0x00000000 snd_soc_link_compr_startup vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_dai_set_bclk_ratio vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_push_personality vmlinux EXPORT_SYMBOL_GPL 0x00000000 _raw_read_unlock_bh vmlinux EXPORT_SYMBOL 0x00000000 rfkill_register vmlinux EXPORT_SYMBOL 0x00000000 cfg80211_cqm_beacon_loss_notify vmlinux EXPORT_SYMBOL 0x00000000 neigh_parms_alloc vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_unregister_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_btree_cursor_begin vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_os_desc_prepare_interf_dir vmlinux EXPORT_SYMBOL 0x00000000 usb_disabled vmlinux EXPORT_SYMBOL_GPL @@ -6453,6 +6483,7 @@ 0x00000000 iommu_uapi_sva_bind_gpasid vmlinux EXPORT_SYMBOL_GPL 0x00000000 asymmetric_key_id_partial vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_prog_inc vmlinux EXPORT_SYMBOL_GPL +0x00000000 do_raw_spin_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpupri_find_fitness vmlinux EXPORT_SYMBOL_GPL 0x00000000 io_schedule vmlinux EXPORT_SYMBOL 0x00000000 remove_cpu vmlinux EXPORT_SYMBOL_GPL @@ -6466,7 +6497,6 @@ 0x00000000 tee_client_close_context vmlinux EXPORT_SYMBOL_GPL 0x00000000 iio_device_claim_direct_mode vmlinux EXPORT_SYMBOL_GPL 0x00000000 sdhci_runtime_resume_host vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_beaconinit vmlinux EXPORT_SYMBOL 0x00000000 devm_regmap_del_irq_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_regmap_add_irq_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_wakeup_ws_event vmlinux EXPORT_SYMBOL_GPL @@ -6541,13 +6571,13 @@ 0x00000000 fb_destroy_modedb vmlinux EXPORT_SYMBOL 0x00000000 __sg_page_iter_start vmlinux EXPORT_SYMBOL 0x00000000 fsstack_copy_attr_all vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnintr_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_init_replay vmlinux EXPORT_SYMBOL 0x00000000 __nf_ct_try_assign_helper vmlinux EXPORT_SYMBOL_GPL 0x00000000 sock_zerocopy_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_jack_report vmlinux EXPORT_SYMBOL 0x00000000 sdio_signal_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_get_opp_table vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_regd_get_band_ctl vmlinux EXPORT_SYMBOL 0x00000000 scsi_command_normalize_sense vmlinux EXPORT_SYMBOL 0x00000000 devm_kmalloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 __drm_atomic_helper_disable_plane vmlinux EXPORT_SYMBOL @@ -6608,6 +6638,8 @@ 0x00000000 crypto_register_shashes vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_pageio_reset_read_mds vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_anon_bdev vmlinux EXPORT_SYMBOL +0x00000000 rtdm_event_destroy vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsched_class_rt vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_event_length vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_gtk_rekey_notify vmlinux EXPORT_SYMBOL_GPL 0x00000000 sock_kfree_s vmlinux EXPORT_SYMBOL @@ -6624,6 +6656,7 @@ 0x00000000 empty_aops vmlinux EXPORT_SYMBOL 0x00000000 zs_malloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_kfree vmlinux EXPORT_SYMBOL +0x00000000 xnregistry_vfsnap_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_coarse_real_ts64 vmlinux EXPORT_SYMBOL 0x00000000 irq_domain_pop_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 __task_pid_nr_ns vmlinux EXPORT_SYMBOL @@ -6717,6 +6750,7 @@ 0x00000000 sgl_alloc_order vmlinux EXPORT_SYMBOL 0x00000000 aead_exit_geniv vmlinux EXPORT_SYMBOL_GPL 0x00000000 d_move vmlinux EXPORT_SYMBOL +0x00000000 dovetail_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_read_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_wake_up_first vmlinux EXPORT_SYMBOL_GPL 0x00000000 ping_common_sendmsg vmlinux EXPORT_SYMBOL_GPL @@ -6725,6 +6759,7 @@ 0x00000000 touch_atime vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 evict_inodes vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_cpu_idle_time_us vmlinux EXPORT_SYMBOL_GPL +0x00000000 __hybrid_spin_unlock_irq vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_unregister_hw vmlinux EXPORT_SYMBOL 0x00000000 kernel_sendmsg_locked vmlinux EXPORT_SYMBOL 0x00000000 device_set_wakeup_capable vmlinux EXPORT_SYMBOL_GPL @@ -6794,11 +6829,9 @@ 0x00000000 rtnl_link_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 make_flow_keys_digest vmlinux EXPORT_SYMBOL 0x00000000 sk_set_memalloc vmlinux EXPORT_SYMBOL_GPL -0x00000000 gov_update_cpu_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 thermal_zone_device_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 intlog2 vmlinux EXPORT_SYMBOL 0x00000000 usb_gadget_activate vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setrxabort vmlinux EXPORT_SYMBOL 0x00000000 dma_buf_export vmlinux EXPORT_SYMBOL_GPL 0x00000000 clk_unregister_gate vmlinux EXPORT_SYMBOL_GPL 0x00000000 platform_irqchip_probe vmlinux EXPORT_SYMBOL_GPL @@ -6837,7 +6870,6 @@ 0x00000000 __set_page_dirty_buffers vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 unlock_two_nondirectories vmlinux EXPORT_SYMBOL 0x00000000 __wake_up_locked vmlinux EXPORT_SYMBOL_GPL -0x00000000 uclamp_eff_value vmlinux EXPORT_SYMBOL_GPL 0x00000000 bt_sock_poll vmlinux EXPORT_SYMBOL 0x00000000 dev_pre_changeaddr_notify vmlinux EXPORT_SYMBOL 0x00000000 dev_queue_xmit_nit vmlinux EXPORT_SYMBOL_GPL @@ -6851,7 +6883,6 @@ 0x00000000 put_nfs_open_context vmlinux EXPORT_SYMBOL_GPL 0x00000000 mod_zone_page_state vmlinux EXPORT_SYMBOL 0x00000000 perf_event_refresh vmlinux EXPORT_SYMBOL_GPL -0x00000000 sched_uclamp_used vmlinux EXPORT_SYMBOL_GPL 0x00000000 mini_qdisc_pair_swap vmlinux EXPORT_SYMBOL 0x00000000 netdev_reset_tc vmlinux EXPORT_SYMBOL 0x00000000 of_property_read_string_helper vmlinux EXPORT_SYMBOL_GPL @@ -6977,6 +7008,7 @@ 0x00000000 gpiod_put_array vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_kmalloc vmlinux EXPORT_SYMBOL 0x00000000 pcpu_base_addr vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_get_timeout vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_conntrack_helper_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_seq_root vmlinux EXPORT_SYMBOL 0x00000000 media_graph_walk_start vmlinux EXPORT_SYMBOL_GPL @@ -7074,7 +7106,6 @@ 0x00000000 sock_no_sendpage vmlinux EXPORT_SYMBOL 0x00000000 iio_read_channel_raw vmlinux EXPORT_SYMBOL_GPL 0x00000000 led_put vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_startpcureceive vmlinux EXPORT_SYMBOL 0x00000000 __ctzdi2 vmlinux EXPORT_SYMBOL 0x00000000 __clzdi2 vmlinux EXPORT_SYMBOL 0x00000000 blk_pre_runtime_suspend vmlinux EXPORT_SYMBOL @@ -7139,6 +7170,7 @@ 0x00000000 dm_bio_prison_destroy_v2 vmlinux EXPORT_SYMBOL_GPL 0x00000000 video_device_alloc vmlinux EXPORT_SYMBOL 0x00000000 kbase_pm_context_active vmlinux EXPORT_SYMBOL +0x00000000 xntimer_get_overruns vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpus_read_trylock vmlinux EXPORT_SYMBOL_GPL 0x00000000 udp_disconnect vmlinux EXPORT_SYMBOL 0x00000000 ethnl_cable_test_pulse vmlinux EXPORT_SYMBOL_GPL @@ -7148,7 +7180,6 @@ 0x00000000 devm_nvmem_cell_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_choose_configuration vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_free_streams vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_init_scheme vmlinux EXPORT_SYMBOL 0x00000000 ubi_do_get_device_info vmlinux EXPORT_SYMBOL_GPL 0x00000000 regcache_cache_only vmlinux EXPORT_SYMBOL_GPL 0x00000000 pinctrl_gpio_set_config vmlinux EXPORT_SYMBOL_GPL @@ -7156,7 +7187,6 @@ 0x00000000 addrconf_prefix_rcv_add_addr vmlinux EXPORT_SYMBOL_GPL 0x00000000 xt_compat_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 input_set_timestamp vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_gen_timer_stop vmlinux EXPORT_SYMBOL 0x00000000 phy_set_asym_pause vmlinux EXPORT_SYMBOL 0x00000000 mtd_add_partition vmlinux EXPORT_SYMBOL_GPL 0x00000000 fwnode_graph_get_remote_port vmlinux EXPORT_SYMBOL_GPL @@ -7183,7 +7213,6 @@ 0x00000000 svc_max_payload vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_rawmidi_new vmlinux EXPORT_SYMBOL 0x00000000 devm_of_platform_populate vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_updatetxtriglevel vmlinux EXPORT_SYMBOL 0x00000000 pci_pci_problems vmlinux EXPORT_SYMBOL 0x00000000 pcie_capability_read_dword vmlinux EXPORT_SYMBOL 0x00000000 drop_super_exclusive vmlinux EXPORT_SYMBOL @@ -7206,6 +7235,9 @@ 0x00000000 regulator_set_current_limit_regmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 xas_find_conflict vmlinux EXPORT_SYMBOL_GPL 0x00000000 proc_set_size vmlinux EXPORT_SYMBOL +0x00000000 cobalt_get_context vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_sem_up vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnvfile_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_alloc_pages vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_sched_scan_stopped vmlinux EXPORT_SYMBOL 0x00000000 netif_rx_any_context vmlinux EXPORT_SYMBOL @@ -7257,6 +7289,7 @@ 0x00000000 ZSTD_findDecompressedSize vmlinux EXPORT_SYMBOL 0x00000000 bio_list_copy_data vmlinux EXPORT_SYMBOL 0x00000000 nfs_commitdata_release vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_schedule_nrt_work vmlinux EXPORT_SYMBOL_GPL 0x00000000 tracing_on vmlinux EXPORT_SYMBOL_GPL 0x00000000 sock_gen_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 tee_client_open_session vmlinux EXPORT_SYMBOL_GPL @@ -7293,6 +7326,7 @@ 0x00000000 blk_queue_update_readahead vmlinux EXPORT_SYMBOL_GPL 0x00000000 always_delete_dentry vmlinux EXPORT_SYMBOL 0x00000000 setattr_copy vmlinux EXPORT_SYMBOL +0x00000000 xnthread_set_schedparam vmlinux EXPORT_SYMBOL_GPL 0x00000000 em_dev_unregister_perf_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_probe_status vmlinux EXPORT_SYMBOL 0x00000000 br_multicast_list_adjacent vmlinux EXPORT_SYMBOL_GPL @@ -7379,6 +7413,7 @@ 0x00000000 mipi_dsi_packet_format_is_long vmlinux EXPORT_SYMBOL 0x00000000 pcie_has_flr vmlinux EXPORT_SYMBOL_GPL 0x00000000 utf8s_to_utf16s vmlinux EXPORT_SYMBOL +0x00000000 xnsched_unlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 pvclock_gtod_register_notifier vmlinux EXPORT_SYMBOL_GPL 0x00000000 ns_capable vmlinux EXPORT_SYMBOL 0x00000000 cfg80211_report_obss_beacon_khz vmlinux EXPORT_SYMBOL @@ -7388,12 +7423,12 @@ 0x00000000 dm_cell_error vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_bufio_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_control_msg_recv vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_bus_type_strings vmlinux EXPORT_SYMBOL 0x00000000 clk_hw_register_clkdev vmlinux EXPORT_SYMBOL 0x00000000 fb_deferred_io_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 debugfs_attr_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 slash_name vmlinux EXPORT_SYMBOL 0x00000000 memblock_end_of_DRAM vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_cancel vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_coarse_with_offset vmlinux EXPORT_SYMBOL_GPL 0x00000000 sched_trace_cfs_rq_avg vmlinux EXPORT_SYMBOL_GPL 0x00000000 kernel_sigaction vmlinux EXPORT_SYMBOL @@ -7433,14 +7468,13 @@ 0x00000000 v4l2_i2c_tuner_addrs vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_altmode_get_plug vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_dwc3_event vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_init_channels_rates vmlinux EXPORT_SYMBOL 0x00000000 mdio_device_reset vmlinux EXPORT_SYMBOL 0x00000000 spi_statistics_add_transfer_stats vmlinux EXPORT_SYMBOL_GPL 0x00000000 drmm_kfree vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_channel_eq_ok vmlinux EXPORT_SYMBOL 0x00000000 fb_sys_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 fuse_dev_alloc_install vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpu_latency_qos_remove_request vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_peek_pendq vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_ops_long vmlinux EXPORT_SYMBOL 0x00000000 rpc_add_pipe_dir_object vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_csk_update_pmtu vmlinux EXPORT_SYMBOL_GPL @@ -7472,7 +7506,6 @@ 0x00000000 dev_pm_opp_free_cpufreq_table vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_string_ids_n vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_hw_bb_watchdog_check vmlinux EXPORT_SYMBOL 0x00000000 phy_save_page vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_runtime_force_resume vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_pci_remap_cfgspace vmlinux EXPORT_SYMBOL @@ -7481,6 +7514,7 @@ 0x00000000 vli_cmp vmlinux EXPORT_SYMBOL 0x00000000 iomap_page_mkwrite vmlinux EXPORT_SYMBOL_GPL 0x00000000 user_path_create vmlinux EXPORT_SYMBOL +0x00000000 cobalt_signal_send vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_bind vmlinux EXPORT_SYMBOL 0x00000000 xprt_disconnect_done vmlinux EXPORT_SYMBOL_GPL 0x00000000 ipv6_setsockopt vmlinux EXPORT_SYMBOL @@ -7497,7 +7531,6 @@ 0x00000000 inet_frag_find vmlinux EXPORT_SYMBOL 0x00000000 inetdev_by_index vmlinux EXPORT_SYMBOL 0x00000000 __sk_backlog_rcv vmlinux EXPORT_SYMBOL -0x00000000 rockchip_dmcfreq_write_unlock vmlinux EXPORT_SYMBOL 0x00000000 fwnode_usb_role_switch_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_poison_anchored_urbs vmlinux EXPORT_SYMBOL_GPL 0x00000000 con_is_visible vmlinux EXPORT_SYMBOL @@ -7550,7 +7583,6 @@ 0x00000000 __alloc_disk_node vmlinux EXPORT_SYMBOL 0x00000000 vm_unmap_aliases vmlinux EXPORT_SYMBOL_GPL 0x00000000 rockchip_wifi_reset vmlinux EXPORT_SYMBOL -0x00000000 wireless_nlevent_flush vmlinux EXPORT_SYMBOL_GPL 0x00000000 br_ip6_fragment vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_state_lookup_byspi vmlinux EXPORT_SYMBOL 0x00000000 netlink_capable vmlinux EXPORT_SYMBOL @@ -7594,10 +7626,10 @@ 0x00000000 dmi_get_bios_year vmlinux EXPORT_SYMBOL 0x00000000 power_supply_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_assign_descriptors vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_enable vmlinux EXPORT_SYMBOL 0x00000000 serial8250_register_8250_port vmlinux EXPORT_SYMBOL 0x00000000 __tracepoint_kmalloc vmlinux EXPORT_SYMBOL 0x00000000 perf_event_release_kernel vmlinux EXPORT_SYMBOL_GPL +0x00000000 inband_irqs_disabled vmlinux EXPORT_SYMBOL 0x00000000 destroy_workqueue vmlinux EXPORT_SYMBOL_GPL 0x00000000 idle_notifier_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_twsk_put vmlinux EXPORT_SYMBOL_GPL @@ -7620,6 +7652,7 @@ 0x00000000 nfs_pgio_header_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 fat_get_dotdot_entry vmlinux EXPORT_SYMBOL_GPL 0x00000000 writeback_inodes_sb vmlinux EXPORT_SYMBOL +0x00000000 xnclock_deregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_printk_init_buffers vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_get_uint vmlinux EXPORT_SYMBOL 0x00000000 get_wchan vmlinux EXPORT_SYMBOL_GPL @@ -7629,13 +7662,11 @@ 0x00000000 compat_ptr_ioctl vmlinux EXPORT_SYMBOL 0x00000000 page_put_link vmlinux EXPORT_SYMBOL 0x00000000 release_pages vmlinux EXPORT_SYMBOL -0x00000000 schedutil_cpu_util vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_sleep_on vmlinux EXPORT_SYMBOL_GPL 0x00000000 bt_sock_stream_recvmsg vmlinux EXPORT_SYMBOL 0x00000000 mmc_cqe_start_req vmlinux EXPORT_SYMBOL 0x00000000 cec_notifier_cec_adap_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 rk628_audio_fifoints_enabled vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_gettxbuf vmlinux EXPORT_SYMBOL 0x00000000 fixed_phy_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 ata_id_c_string vmlinux EXPORT_SYMBOL_GPL 0x00000000 dpm_resume_start vmlinux EXPORT_SYMBOL_GPL @@ -7686,7 +7717,6 @@ 0x00000000 vb2_thread_stop vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_match_id vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_gadget_ep_match_desc vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_gen_timer_start vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_mst_topology_mgr_destroy vmlinux EXPORT_SYMBOL 0x00000000 devm_hwrng_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 tty_wait_until_sent vmlinux EXPORT_SYMBOL @@ -7708,7 +7738,6 @@ 0x00000000 alarm_forward vmlinux EXPORT_SYMBOL_GPL 0x00000000 __netif_schedule vmlinux EXPORT_SYMBOL 0x00000000 mmc_retune_unpause vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_get_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_attach_device_to_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 clk_fractional_divider_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 generic_fh_to_parent vmlinux EXPORT_SYMBOL_GPL @@ -7717,7 +7746,6 @@ 0x00000000 param_set_invbool vmlinux EXPORT_SYMBOL 0x00000000 __inet_hash vmlinux EXPORT_SYMBOL 0x00000000 iio_trigger_alloc vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_unregister_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_register_driver vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_xhci_dbg_quirks vmlinux EXPORT_SYMBOL_GPL 0x00000000 fixed_phy_unregister vmlinux EXPORT_SYMBOL_GPL @@ -7765,6 +7793,7 @@ 0x00000000 amba_request_regions vmlinux EXPORT_SYMBOL 0x00000000 devm_fwnode_gpiod_get_index vmlinux EXPORT_SYMBOL_GPL 0x00000000 inode_dio_wait vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 rtdm_dev_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_chip_request_resources_parent vmlinux EXPORT_SYMBOL_GPL 0x00000000 snmp_fold_field vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_register_card vmlinux EXPORT_SYMBOL_GPL @@ -7807,6 +7836,7 @@ 0x00000000 jbd2_journal_force_commit_nested vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 seq_pad vmlinux EXPORT_SYMBOL 0x00000000 seq_release_private vmlinux EXPORT_SYMBOL +0x00000000 oob_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 ww_mutex_lock vmlinux EXPORT_SYMBOL 0x00000000 snd_ctl_remove vmlinux EXPORT_SYMBOL 0x00000000 __mdiobus_modify_changed vmlinux EXPORT_SYMBOL_GPL @@ -7832,6 +7862,7 @@ 0x00000000 drm_i2c_encoder_mode_set vmlinux EXPORT_SYMBOL 0x00000000 pcix_get_mmrbc vmlinux EXPORT_SYMBOL 0x00000000 jbd2_journal_check_used_features vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 xnthread_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 system_highpri_wq vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_send_eosp_nullfunc vmlinux EXPORT_SYMBOL 0x00000000 auth_domain_lookup vmlinux EXPORT_SYMBOL_GPL @@ -7846,7 +7877,6 @@ 0x00000000 fb_get_mode vmlinux EXPORT_SYMBOL 0x00000000 __SCK__tp_func_tcp_send_reset vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_detect_change vmlinux EXPORT_SYMBOL -0x00000000 gov_attr_set_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcpm_is_toggling vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_debugfs_remove_files vmlinux EXPORT_SYMBOL 0x00000000 drm_mode_convert_to_umode vmlinux EXPORT_SYMBOL_GPL @@ -7869,6 +7899,7 @@ 0x00000000 midgard_kbase_instr_hwcnt_request_dump vmlinux EXPORT_SYMBOL 0x00000000 nfs_fs_type vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_server_insert_lists vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_sem_select vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_work_queue vmlinux EXPORT_SYMBOL_GPL 0x00000000 resume_cpus vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_disconnect vmlinux EXPORT_SYMBOL @@ -7931,7 +7962,6 @@ 0x00000000 v4l2_m2m_create_bufs vmlinux EXPORT_SYMBOL_GPL 0x00000000 rndis_borrow_net vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_composite_setup_continue vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_mci_get_next_gpm_offset vmlinux EXPORT_SYMBOL 0x00000000 dev_attr_sw_activity vmlinux EXPORT_SYMBOL_GPL 0x00000000 __vfs_setxattr_locked vmlinux EXPORT_SYMBOL_GPL 0x00000000 mangle_path vmlinux EXPORT_SYMBOL @@ -8019,6 +8049,7 @@ 0x00000000 tcpm_pd_transmit_complete vmlinux EXPORT_SYMBOL_GPL 0x00000000 device_init_wakeup vmlinux EXPORT_SYMBOL_GPL 0x00000000 clkdev_hw_alloc vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mutex_timedlock vmlinux EXPORT_SYMBOL_GPL 0x00000000 avenrun vmlinux EXPORT_SYMBOL 0x00000000 system_wq vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_extend_register vmlinux EXPORT_SYMBOL_GPL @@ -8030,6 +8061,7 @@ 0x00000000 drm_gem_dmabuf_mmap vmlinux EXPORT_SYMBOL 0x00000000 rockchip_clk_of_add_provider vmlinux EXPORT_SYMBOL_GPL 0x00000000 mark_page_accessed vmlinux EXPORT_SYMBOL +0x00000000 xnvfile_init_snapshot vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_raw_ts64 vmlinux EXPORT_SYMBOL 0x00000000 in6addr_any vmlinux EXPORT_SYMBOL 0x00000000 inet_protos vmlinux EXPORT_SYMBOL @@ -8044,7 +8076,6 @@ 0x00000000 __snd_usbmidi_create vmlinux EXPORT_SYMBOL 0x00000000 of_get_next_available_child vmlinux EXPORT_SYMBOL 0x00000000 hid_output_report vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpuidle_get_cpu_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 __usb_create_hcd vmlinux EXPORT_SYMBOL_GPL 0x00000000 analogix_dp_audio_hw_params vmlinux EXPORT_SYMBOL_GPL 0x00000000 mpi_cmpabs vmlinux EXPORT_SYMBOL_GPL @@ -8083,7 +8114,6 @@ 0x00000000 file_ns_capable vmlinux EXPORT_SYMBOL 0x00000000 iio_trigger_notify_done vmlinux EXPORT_SYMBOL 0x00000000 v4l2_m2m_streamoff vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_numtxpending vmlinux EXPORT_SYMBOL 0x00000000 ata_xfer_mode2shift vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_clk_set_defaults vmlinux EXPORT_SYMBOL_GPL 0x00000000 pci_find_next_capability vmlinux EXPORT_SYMBOL_GPL @@ -8122,6 +8152,7 @@ 0x00000000 mpi_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 af_alg_get_rsgl vmlinux EXPORT_SYMBOL_GPL 0x00000000 mempool_create_node vmlinux EXPORT_SYMBOL +0x00000000 xnthread_set_slice vmlinux EXPORT_SYMBOL_GPL 0x00000000 tracepoint_probe_register_prio_may_exist vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_gc_set_wake vmlinux EXPORT_SYMBOL_GPL 0x00000000 find_get_pid vmlinux EXPORT_SYMBOL_GPL @@ -8129,8 +8160,6 @@ 0x00000000 dev_set_promiscuity vmlinux EXPORT_SYMBOL 0x00000000 snd_pcm_hw_limit_rates vmlinux EXPORT_SYMBOL 0x00000000 usb_stor_transparent_scsi_command vmlinux EXPORT_SYMBOL_GPL USB_STORAGE -0x00000000 ath9k_hw_btcoex_init_3wire vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_gpio_request_in vmlinux EXPORT_SYMBOL 0x00000000 scsi_bios_ptable vmlinux EXPORT_SYMBOL 0x00000000 dmabuf_page_pool_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_generic_resume vmlinux EXPORT_SYMBOL_GPL @@ -8168,7 +8197,6 @@ 0x00000000 usb_add_function vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_ep_autoconfig_release vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_phy_roothub_calibrate vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_key_config vmlinux EXPORT_SYMBOL 0x00000000 genphy_c45_pma_setup_forced vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_generic_poweroff vmlinux EXPORT_SYMBOL_GPL 0x00000000 analogix_dp_remove vmlinux EXPORT_SYMBOL_GPL @@ -8213,7 +8241,6 @@ 0x00000000 v4l2_src_change_event_subdev_subscribe vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_verify_client vmlinux EXPORT_SYMBOL 0x00000000 input_allocate_device vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_set_txpowerlimit vmlinux EXPORT_SYMBOL 0x00000000 pm_runtime_barrier vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_printk vmlinux EXPORT_SYMBOL 0x00000000 drm_dp_dsc_sink_supported_input_bpcs vmlinux EXPORT_SYMBOL @@ -8224,6 +8251,7 @@ 0x00000000 bitmap_parselist vmlinux EXPORT_SYMBOL 0x00000000 __insert_inode_hash vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 kfree_const vmlinux EXPORT_SYMBOL +0x00000000 xnclock_tick vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_cpu_frequency vmlinux EXPORT_SYMBOL_GPL 0x00000000 revert_creds vmlinux EXPORT_SYMBOL 0x00000000 fs_overflowgid vmlinux EXPORT_SYMBOL @@ -8248,6 +8276,7 @@ 0x00000000 pcie_get_speed_cap vmlinux EXPORT_SYMBOL 0x00000000 debug_locks_silent vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_client_init_is_complete vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_call_state_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 blocking_notifier_call_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 ping_hash vmlinux EXPORT_SYMBOL_GPL 0x00000000 __napi_schedule_irqoff vmlinux EXPORT_SYMBOL @@ -8332,7 +8361,6 @@ 0x00000000 skb_set_owner_w vmlinux EXPORT_SYMBOL 0x00000000 thermal_zone_of_sensor_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 vb2_queue_init vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_rxbuf_alloc vmlinux EXPORT_SYMBOL 0x00000000 dma_heap_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_ipa_control_handle_gpu_sleep_enter vmlinux EXPORT_SYMBOL 0x00000000 drm_mm_scan_remove_block vmlinux EXPORT_SYMBOL @@ -8369,6 +8397,7 @@ 0x00000000 __dec_node_page_state vmlinux EXPORT_SYMBOL 0x00000000 __inc_node_page_state vmlinux EXPORT_SYMBOL 0x00000000 __mod_node_page_state vmlinux EXPORT_SYMBOL +0x00000000 xnthread_set_clock vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_frag_rbtree_purge vmlinux EXPORT_SYMBOL 0x00000000 skb_queue_tail vmlinux EXPORT_SYMBOL 0x00000000 dm_bitset_set_bit vmlinux EXPORT_SYMBOL_GPL @@ -8381,6 +8410,8 @@ 0x00000000 __register_nls vmlinux EXPORT_SYMBOL 0x00000000 register_sysctl vmlinux EXPORT_SYMBOL 0x00000000 vfs_test_lock vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_event_signal vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnarch_generic_full_divmod64 vmlinux EXPORT_SYMBOL_GPL 0x00000000 do_exit vmlinux EXPORT_SYMBOL_GPL 0x00000000 l3mdev_master_upper_ifindex_by_index_rcu vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_sta_register_airtime vmlinux EXPORT_SYMBOL @@ -8398,7 +8429,6 @@ 0x00000000 v4l2_set_edid_phys_addr vmlinux EXPORT_SYMBOL_GPL 0x00000000 media_create_intf_link vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_xhci_urb_enqueue vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_resume_interrupts vmlinux EXPORT_SYMBOL 0x00000000 __put_mtd_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 __get_mtd_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 mtd_wunit_to_pairing_info vmlinux EXPORT_SYMBOL_GPL @@ -8411,6 +8441,7 @@ 0x00000000 simple_dentry_operations vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_device_pm_callback_end vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_cpu_frequency vmlinux EXPORT_SYMBOL_GPL +0x00000000 irq_switch_oob vmlinux EXPORT_SYMBOL_GPL 0x00000000 oops_in_progress vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_helper_log vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_lower_dev_get_private vmlinux EXPORT_SYMBOL @@ -8436,7 +8467,6 @@ 0x00000000 vb2_dma_sg_memops vmlinux EXPORT_SYMBOL_GPL 0x00000000 vb2_cma_sg_memops vmlinux EXPORT_SYMBOL_GPL 0x00000000 create_function_device vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_beacon_config_adhoc vmlinux EXPORT_SYMBOL 0x00000000 software_node_register_node_group vmlinux EXPORT_SYMBOL_GPL 0x00000000 attribute_container_classdev_to_container vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_put_dev vmlinux EXPORT_SYMBOL @@ -8452,7 +8482,6 @@ 0x00000000 kill_pid_usb_asyncio vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_find_sta_by_ifaddr vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_dapm_info_pin_switch vmlinux EXPORT_SYMBOL_GPL -0x00000000 store_sampling_rate vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_bufio_write_dirty_buffers_async vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_switch_set vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_switch_put vmlinux EXPORT_SYMBOL_GPL @@ -8474,6 +8503,7 @@ 0x00000000 __SCK__tp_func_block_rq_issue vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_file_llseek vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_kmalloc_node vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mmap_iomem vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_seq_bitmask vmlinux EXPORT_SYMBOL_GPL 0x00000000 sched_clock_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 schedule_timeout vmlinux EXPORT_SYMBOL @@ -8514,7 +8544,6 @@ 0x00000000 iio_get_debugfs_dentry vmlinux EXPORT_SYMBOL_GPL 0x00000000 hid_open_report vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_opp_of_cpumask_remove_table vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_check_nav vmlinux EXPORT_SYMBOL 0x00000000 phy_advertise_supported vmlinux EXPORT_SYMBOL 0x00000000 scsi_schedule_eh vmlinux EXPORT_SYMBOL_GPL 0x00000000 regmap_attach_dev vmlinux EXPORT_SYMBOL_GPL @@ -8528,7 +8557,6 @@ 0x00000000 ping_init_sock vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_update_features vmlinux EXPORT_SYMBOL 0x00000000 dev_get_port_parent_id vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_btcoex_disable vmlinux EXPORT_SYMBOL 0x00000000 spi_async vmlinux EXPORT_SYMBOL_GPL 0x00000000 ata_sas_port_resume vmlinux EXPORT_SYMBOL_GPL 0x00000000 regmap_mmio_detach_clk vmlinux EXPORT_SYMBOL_GPL @@ -8558,6 +8586,7 @@ 0x00000000 kfree_strarray vmlinux EXPORT_SYMBOL_GPL 0x00000000 llist_del_first vmlinux EXPORT_SYMBOL_GPL 0x00000000 dec_zone_page_state vmlinux EXPORT_SYMBOL +0x00000000 xnvfile_init_link vmlinux EXPORT_SYMBOL_GPL 0x00000000 timespec64_to_jiffies vmlinux EXPORT_SYMBOL 0x00000000 sunrpc_cache_pipe_upcall_timeout vmlinux EXPORT_SYMBOL_GPL 0x00000000 nf_conntrack_max vmlinux EXPORT_SYMBOL_GPL @@ -8586,8 +8615,6 @@ 0x00000000 video_unregister_device vmlinux EXPORT_SYMBOL 0x00000000 __i2c_smbus_xfer vmlinux EXPORT_SYMBOL 0x00000000 typec_find_power_role vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_enable_interrupts vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_write_associd vmlinux EXPORT_SYMBOL 0x00000000 dw_hdmi_phy_reset vmlinux EXPORT_SYMBOL_GPL 0x00000000 mipi_dsi_packet_format_is_short vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_dirtyfb vmlinux EXPORT_SYMBOL @@ -8597,6 +8624,7 @@ 0x00000000 blk_queue_update_dma_alignment vmlinux EXPORT_SYMBOL 0x00000000 fsnotify_wait_marks_destroyed vmlinux EXPORT_SYMBOL_GPL 0x00000000 hrtimer_try_to_cancel vmlinux EXPORT_SYMBOL_GPL +0x00000000 oob_irq_enable vmlinux EXPORT_SYMBOL 0x00000000 irq_chip_get_parent_state vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_chip_set_parent_state vmlinux EXPORT_SYMBOL_GPL 0x00000000 _raw_read_lock_irq vmlinux EXPORT_SYMBOL @@ -8613,6 +8641,7 @@ 0x00000000 blk_mq_quiesce_queue vmlinux EXPORT_SYMBOL_GPL 0x00000000 register_asymmetric_key_parser vmlinux EXPORT_SYMBOL_GPL 0x00000000 mod_node_page_state vmlinux EXPORT_SYMBOL +0x00000000 __rtdm_task_sleep vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_dispose_mapping vmlinux EXPORT_SYMBOL_GPL 0x00000000 pm_suspend vmlinux EXPORT_SYMBOL 0x00000000 usermodehelper_read_unlock vmlinux EXPORT_SYMBOL_GPL @@ -8650,7 +8679,6 @@ 0x00000000 skb_seq_read vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_request_setup vmlinux EXPORT_SYMBOL 0x00000000 i2c_transfer vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_beaconq_setup vmlinux EXPORT_SYMBOL 0x00000000 scsi_device_lookup vmlinux EXPORT_SYMBOL 0x00000000 dma_buf_get_flags vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_i2c_encoder_prepare vmlinux EXPORT_SYMBOL @@ -8659,6 +8687,7 @@ 0x00000000 bitmap_find_next_zero_area_off vmlinux EXPORT_SYMBOL 0x00000000 skcipher_walk_aead_decrypt vmlinux EXPORT_SYMBOL_GPL 0x00000000 wbc_detach_inode vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnheap_set_name vmlinux EXPORT_SYMBOL_GPL 0x00000000 cgroup_path_ns vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_set_int vmlinux EXPORT_SYMBOL 0x00000000 rockchip_wifi_country_code vmlinux EXPORT_SYMBOL @@ -8681,10 +8710,10 @@ 0x00000000 tty_port_register_device_attr_serdev vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_mq_init_queue vmlinux EXPORT_SYMBOL 0x00000000 rsa_parse_pub_key vmlinux EXPORT_SYMBOL_GPL +0x00000000 unlock_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_sched_stat_wait vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_set_short vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_handler_free vmlinux EXPORT_SYMBOL -0x00000000 ath9k_cmn_beacon_config_ap vmlinux EXPORT_SYMBOL 0x00000000 can_rx_offload_queue_tail vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_connector_oob_hotplug_event vmlinux EXPORT_SYMBOL 0x00000000 drm_dsc_pps_payload_pack vmlinux EXPORT_SYMBOL @@ -8746,6 +8775,7 @@ 0x00000000 pci_dev_run_wake vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_sync_inode vmlinux EXPORT_SYMBOL_GPL 0x00000000 generic_fadvise vmlinux EXPORT_SYMBOL +0x00000000 __hybrid_spin_unlock_irqrestore vmlinux EXPORT_SYMBOL 0x00000000 rpc_alloc_iostats vmlinux EXPORT_SYMBOL_GPL 0x00000000 __fib6_flush_trees vmlinux EXPORT_SYMBOL 0x00000000 flow_rule_match_enc_ipv6_addrs vmlinux EXPORT_SYMBOL @@ -8763,6 +8793,7 @@ 0x00000000 phy_power_off vmlinux EXPORT_SYMBOL_GPL 0x00000000 mpi_sub_ui vmlinux EXPORT_SYMBOL_GPL 0x00000000 shmem_mark_page_lazyfree vmlinux EXPORT_SYMBOL_GPL +0x00000000 hard_preempt_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 mr_mfc_seq_next vmlinux EXPORT_SYMBOL 0x00000000 tcp_prot vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_tmpl_alloc vmlinux EXPORT_SYMBOL_GPL @@ -8772,7 +8803,6 @@ 0x00000000 dvb_frontend_sleep_until vmlinux EXPORT_SYMBOL 0x00000000 media_entity_get_fwnode_pad vmlinux EXPORT_SYMBOL_GPL 0x00000000 rndis_msg_parser vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_reload_chainmask vmlinux EXPORT_SYMBOL 0x00000000 root_device_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 mipi_dsi_turn_on_peripheral vmlinux EXPORT_SYMBOL 0x00000000 fuse_direct_io vmlinux EXPORT_SYMBOL_GPL @@ -8794,7 +8824,6 @@ 0x00000000 mempool_free vmlinux EXPORT_SYMBOL 0x00000000 llc_sap_open vmlinux EXPORT_SYMBOL 0x00000000 neigh_carrier_down vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_governor_latency_req vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_unregister_altmode vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_stor_Bulk_reset vmlinux EXPORT_SYMBOL_GPL USB_STORAGE 0x00000000 attribute_container_unregister vmlinux EXPORT_SYMBOL_GPL @@ -8840,7 +8869,6 @@ 0x00000000 dvb_ringbuffer_write_user vmlinux EXPORT_SYMBOL 0x00000000 i2c_smbus_write_byte_data vmlinux EXPORT_SYMBOL 0x00000000 usb_unlink_urb vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_hw_cycle_counters_update vmlinux EXPORT_SYMBOL 0x00000000 phylink_ethtool_nway_reset vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_pm_domain_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_csf_firmware_trace_buffer_update_trace_enable_bit vmlinux EXPORT_SYMBOL @@ -8912,6 +8940,7 @@ 0x00000000 seq_hlist_next_percpu vmlinux EXPORT_SYMBOL 0x00000000 kfree_sensitive vmlinux EXPORT_SYMBOL 0x00000000 clear_page_dirty_for_io vmlinux EXPORT_SYMBOL +0x00000000 xnselect_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_sched_cpu_capacity_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 srcu_init_notifier_head vmlinux EXPORT_SYMBOL_GPL 0x00000000 __memcpy_fromio vmlinux EXPORT_SYMBOL @@ -8945,6 +8974,7 @@ 0x00000000 sha224_zero_message_hash vmlinux EXPORT_SYMBOL_GPL 0x00000000 write_dirty_buffer vmlinux EXPORT_SYMBOL 0x00000000 noop_llseek vmlinux EXPORT_SYMBOL +0x00000000 rtdm_timer_stop vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_array_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_entries_cpu vmlinux EXPORT_SYMBOL_GPL 0x00000000 kstat_irqs_cpu vmlinux EXPORT_SYMBOL_GPL @@ -8987,6 +9017,7 @@ 0x00000000 jbd2_wait_inode_data vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 proc_mkdir vmlinux EXPORT_SYMBOL 0x00000000 lookup_positive_unlocked vmlinux EXPORT_SYMBOL +0x00000000 xnheap_vfree vmlinux EXPORT_SYMBOL_GPL 0x00000000 dummy_irq_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpu_mitigations_auto_nosmt vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_usb_get_phy_by_phandle vmlinux EXPORT_SYMBOL_GPL @@ -8996,6 +9027,7 @@ 0x00000000 phy_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_cached_acl_rcu vmlinux EXPORT_SYMBOL 0x00000000 get_mem_cgroup_from_mm vmlinux EXPORT_SYMBOL +0x00000000 xnthread_get_period vmlinux EXPORT_SYMBOL_GPL 0x00000000 alarm_start_relative vmlinux EXPORT_SYMBOL_GPL 0x00000000 proc_dointvec_jiffies vmlinux EXPORT_SYMBOL 0x00000000 tcp_peek_len vmlinux EXPORT_SYMBOL @@ -9014,6 +9046,7 @@ 0x00000000 kstrtos16_from_user vmlinux EXPORT_SYMBOL 0x00000000 __bitmap_or vmlinux EXPORT_SYMBOL 0x00000000 get_dcookie vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_timer_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_event_raw_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 relay_late_setup_files vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_sched_switch vmlinux EXPORT_SYMBOL_GPL @@ -9025,12 +9058,12 @@ 0x00000000 sata_lpm_ignore_phy_events vmlinux EXPORT_SYMBOL_GPL 0x00000000 strspn vmlinux EXPORT_SYMBOL 0x00000000 strlen vmlinux EXPORT_SYMBOL +0x00000000 xntimer_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_array_set_clr_event vmlinux EXPORT_SYMBOL_GPL 0x00000000 rfkill_destroy vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_rx_ba_timer_expired vmlinux EXPORT_SYMBOL 0x00000000 hci_recv_frame vmlinux EXPORT_SYMBOL 0x00000000 v4l2_m2m_ctx_release vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_count_streams vmlinux EXPORT_SYMBOL 0x00000000 stmmac_init_tstamp_counter vmlinux EXPORT_SYMBOL_GPL 0x00000000 scsi_host_block vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_vma_offset_add vmlinux EXPORT_SYMBOL @@ -9094,6 +9127,7 @@ 0x00000000 drm_edid_block_valid vmlinux EXPORT_SYMBOL 0x00000000 get_options vmlinux EXPORT_SYMBOL 0x00000000 vfs_iter_write vmlinux EXPORT_SYMBOL +0x00000000 xnsynch_forget_sleeper vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_coarse_ts64 vmlinux EXPORT_SYMBOL 0x00000000 udp_sock_create6 vmlinux EXPORT_SYMBOL_GPL 0x00000000 fib_new_table vmlinux EXPORT_SYMBOL_GPL @@ -9107,6 +9141,7 @@ 0x00000000 iomap_dio_iopoll vmlinux EXPORT_SYMBOL_GPL 0x00000000 bh_uptodate_or_lock vmlinux EXPORT_SYMBOL 0x00000000 mem_cgroup_from_task vmlinux EXPORT_SYMBOL +0x00000000 disable_oob_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_chswitch_done vmlinux EXPORT_SYMBOL 0x00000000 xfrm_state_walk_init vmlinux EXPORT_SYMBOL 0x00000000 udp4_hwcsum vmlinux EXPORT_SYMBOL_GPL @@ -9123,11 +9158,11 @@ 0x00000000 param_get_hexint vmlinux EXPORT_SYMBOL 0x00000000 __sock_cmsg_send vmlinux EXPORT_SYMBOL 0x00000000 snd_pci_quirk_lookup vmlinux EXPORT_SYMBOL -0x00000000 ath9k_cmn_process_rate vmlinux EXPORT_SYMBOL 0x00000000 of_can_transceiver vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_mdio_find_bus vmlinux EXPORT_SYMBOL 0x00000000 pci_bus_write_config_word vmlinux EXPORT_SYMBOL 0x00000000 sg_alloc_table vmlinux EXPORT_SYMBOL +0x00000000 cobalt_signal_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 handle_simple_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 async_synchronize_cookie_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 call_usermodehelper_exec vmlinux EXPORT_SYMBOL @@ -9136,7 +9171,6 @@ 0x00000000 power_supply_batinfo_ocv2cap vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_async_subdev_notifier_register vmlinux EXPORT_SYMBOL 0x00000000 v4l2_ctrl_handler_init_class vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_reset_calvalid vmlinux EXPORT_SYMBOL 0x00000000 drm_gem_prime_fd_to_handle vmlinux EXPORT_SYMBOL 0x00000000 drm_scdc_write vmlinux EXPORT_SYMBOL 0x00000000 pci_release_region vmlinux EXPORT_SYMBOL @@ -9156,7 +9190,10 @@ 0x00000000 __clk_get_name vmlinux EXPORT_SYMBOL_GPL 0x00000000 gen_pool_dma_alloc vmlinux EXPORT_SYMBOL 0x00000000 seq_list_start vmlinux EXPORT_SYMBOL +0x00000000 __xntimer_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_find_matching_fwspec vmlinux EXPORT_SYMBOL_GPL +0x00000000 __hybrid_spin_trylock_irqsave vmlinux EXPORT_SYMBOL +0x00000000 do_raw_spin_trylock vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_sched_update_nr_running_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_queue_work vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_workqueue_execute_end vmlinux EXPORT_SYMBOL_GPL @@ -9165,9 +9202,9 @@ 0x00000000 km_state_notify vmlinux EXPORT_SYMBOL 0x00000000 register_ip_vs_scheduler vmlinux EXPORT_SYMBOL 0x00000000 snd_pcm_set_ops vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_enable_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_run_dependencies vmlinux EXPORT_SYMBOL_GPL 0x00000000 sbitmap_prepare_to_wait vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_mmap_kmem vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_prog_add vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_seq_putmem vmlinux EXPORT_SYMBOL_GPL 0x00000000 _raw_write_lock vmlinux EXPORT_SYMBOL @@ -9242,7 +9279,6 @@ 0x00000000 nvmem_device_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 dvb_generic_release vmlinux EXPORT_SYMBOL 0x00000000 usb_clear_halt vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_setuprxdesc vmlinux EXPORT_SYMBOL 0x00000000 swphy_read_reg vmlinux EXPORT_SYMBOL_GPL 0x00000000 ubi_sync vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_bridge_hpd_notify vmlinux EXPORT_SYMBOL_GPL @@ -9336,6 +9372,8 @@ 0x00000000 pci_choose_state vmlinux EXPORT_SYMBOL 0x00000000 nfs_access_add_cache vmlinux EXPORT_SYMBOL_GPL 0x00000000 delete_from_page_cache vmlinux EXPORT_SYMBOL +0x00000000 rtdm_timer_destroy vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnvfile_init_dir vmlinux EXPORT_SYMBOL_GPL 0x00000000 ktime_get_seconds vmlinux EXPORT_SYMBOL_GPL 0x00000000 wait_for_completion_killable vmlinux EXPORT_SYMBOL 0x00000000 put_task_stack vmlinux EXPORT_SYMBOL_GPL @@ -9404,6 +9442,8 @@ 0x00000000 kbase_reg_read vmlinux EXPORT_SYMBOL 0x00000000 clk_multiplier_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 nla_put_64bit vmlinux EXPORT_SYMBOL +0x00000000 xnintr_detach vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnintr_attach vmlinux EXPORT_SYMBOL_GPL 0x00000000 unregister_ftrace_export vmlinux EXPORT_SYMBOL_GPL 0x00000000 wait_for_completion_timeout vmlinux EXPORT_SYMBOL 0x00000000 set_current_groups vmlinux EXPORT_SYMBOL @@ -9415,7 +9455,6 @@ 0x00000000 rk628_i2c_register vmlinux EXPORT_SYMBOL 0x00000000 usb_stor_probe2 vmlinux EXPORT_SYMBOL_GPL USB_STORAGE 0x00000000 usb_stor_probe1 vmlinux EXPORT_SYMBOL_GPL USB_STORAGE -0x00000000 ar9003_hw_bb_watchdog_dbg_info vmlinux EXPORT_SYMBOL 0x00000000 kbase_reg_write vmlinux EXPORT_SYMBOL 0x00000000 update_region vmlinux EXPORT_SYMBOL 0x00000000 dma_request_chan_by_mask vmlinux EXPORT_SYMBOL_GPL @@ -9423,6 +9462,7 @@ 0x00000000 public_key_signature_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_show_options vmlinux EXPORT_SYMBOL_GPL 0x00000000 page_frag_free vmlinux EXPORT_SYMBOL +0x00000000 cobalt_clock_find vmlinux EXPORT_SYMBOL_GPL 0x00000000 sprint_symbol vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_pelt_se_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_pelt_dl_tp vmlinux EXPORT_SYMBOL_GPL @@ -9439,6 +9479,8 @@ 0x00000000 kstrtobool_from_user vmlinux EXPORT_SYMBOL 0x00000000 fuse_len_args vmlinux EXPORT_SYMBOL_GPL 0x00000000 shrink_dcache_parent vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mutex_init vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_kick vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_seq_vprintf vmlinux EXPORT_SYMBOL_GPL 0x00000000 synchronize_net vmlinux EXPORT_SYMBOL 0x00000000 netdev_adjacent_change_abort vmlinux EXPORT_SYMBOL @@ -9461,6 +9503,7 @@ 0x00000000 lease_unregister_notifier vmlinux EXPORT_SYMBOL_GPL 0x00000000 kern_path_create vmlinux EXPORT_SYMBOL 0x00000000 init_on_free vmlinux EXPORT_SYMBOL +0x00000000 cobalt_yield vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_create_mapping_affinity vmlinux EXPORT_SYMBOL_GPL 0x00000000 _raw_write_lock_bh vmlinux EXPORT_SYMBOL 0x00000000 xdr_terminate_string vmlinux EXPORT_SYMBOL_GPL @@ -9510,7 +9553,6 @@ 0x00000000 devm_power_supply_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 rk628_txphy_get_bus_width vmlinux EXPORT_SYMBOL 0x00000000 rk628_txphy_set_bus_width vmlinux EXPORT_SYMBOL -0x00000000 ar9003_mci_state vmlinux EXPORT_SYMBOL 0x00000000 phy_start_machine vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_mdiobus_alloc_size vmlinux EXPORT_SYMBOL 0x00000000 __pm_runtime_suspend vmlinux EXPORT_SYMBOL_GPL @@ -9518,6 +9560,7 @@ 0x00000000 drm_atomic_helper_update_plane vmlinux EXPORT_SYMBOL 0x00000000 crc_t10dif vmlinux EXPORT_SYMBOL 0x00000000 kvasprintf_const vmlinux EXPORT_SYMBOL +0x00000000 compat_ptr_oob_ioctl vmlinux EXPORT_SYMBOL 0x00000000 param_get_charp vmlinux EXPORT_SYMBOL 0x00000000 xdp_rxq_info_unreg vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_next_lower_dev_rcu vmlinux EXPORT_SYMBOL @@ -9547,6 +9590,7 @@ 0x00000000 sysfs_create_mount_point vmlinux EXPORT_SYMBOL_GPL 0x00000000 mount_bdev vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 can_do_mlock vmlinux EXPORT_SYMBOL +0x00000000 __xnthread_discard vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_barrier vmlinux EXPORT_SYMBOL_GPL 0x00000000 ndisc_mc_map vmlinux EXPORT_SYMBOL 0x00000000 nf_nat_masquerade_ipv6 vmlinux EXPORT_SYMBOL_GPL @@ -9614,7 +9658,6 @@ 0x00000000 irq_do_set_affinity vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_mkpipe_dentry vmlinux EXPORT_SYMBOL_GPL 0x00000000 ipt_unregister_table_pre_exit vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_ani_monitor vmlinux EXPORT_SYMBOL 0x00000000 ahci_stop_engine vmlinux EXPORT_SYMBOL_GPL 0x00000000 rockchip_init_opp_table vmlinux EXPORT_SYMBOL 0x00000000 pci_release_regions vmlinux EXPORT_SYMBOL @@ -9639,7 +9682,10 @@ 0x00000000 siphash_1u32 vmlinux EXPORT_SYMBOL 0x00000000 generic_check_addressable vmlinux EXPORT_SYMBOL 0x00000000 get_tree_nodev vmlinux EXPORT_SYMBOL +0x00000000 rtdm_event_init vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnbufd_copy_to_kmem vmlinux EXPORT_SYMBOL_GPL 0x00000000 perf_event_read_value vmlinux EXPORT_SYMBOL_GPL +0x00000000 dovetail_stop vmlinux EXPORT_SYMBOL_GPL 0x00000000 __refrigerator vmlinux EXPORT_SYMBOL 0x00000000 irqchip_fwnode_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet6_csk_addr2sockaddr vmlinux EXPORT_SYMBOL_GPL @@ -9649,7 +9695,6 @@ 0x00000000 input_mt_init_slots vmlinux EXPORT_SYMBOL 0x00000000 usb_disable_lpm vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_disable_ltm vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_gen_timer_alloc vmlinux EXPORT_SYMBOL 0x00000000 phy_10gbit_full_features vmlinux EXPORT_SYMBOL_GPL 0x00000000 ata_link_printk vmlinux EXPORT_SYMBOL 0x00000000 devres_close_group vmlinux EXPORT_SYMBOL_GPL @@ -9686,6 +9731,7 @@ 0x00000000 drm_panel_bridge_add vmlinux EXPORT_SYMBOL 0x00000000 iommu_unregister_device_fault_handler vmlinux EXPORT_SYMBOL_GPL 0x00000000 proc_symlink vmlinux EXPORT_SYMBOL +0x00000000 cobalt_kernel_ppd vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_br_fdb_external_learn_add vmlinux EXPORT_SYMBOL_GPL 0x00000000 __rtnl_link_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 rtnl_kfree_skbs vmlinux EXPORT_SYMBOL @@ -9731,6 +9777,7 @@ 0x00000000 drm_mode_create vmlinux EXPORT_SYMBOL 0x00000000 memchr vmlinux EXPORT_SYMBOL 0x00000000 kern_unmount_array vmlinux EXPORT_SYMBOL +0x00000000 xnsynch_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet6_release vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_expect_hsize vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_get_parent vmlinux EXPORT_SYMBOL @@ -9785,7 +9832,6 @@ 0x00000000 tcp_mtup_init vmlinux EXPORT_SYMBOL 0x00000000 perf_pmu_name vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_cqe_request_done vmlinux EXPORT_SYMBOL -0x00000000 cpuidle_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 __media_remove_intf_links vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_match_altmode vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_clk_unregister vmlinux EXPORT_SYMBOL_GPL @@ -9800,7 +9846,6 @@ 0x00000000 snd_soc_jack_report vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_dapm_put_enum_double vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_dapm_get_enum_double vmlinux EXPORT_SYMBOL_GPL -0x00000000 dbs_update vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_tm_commit vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_pm_set_policy vmlinux EXPORT_SYMBOL 0x00000000 kbase_pm_get_policy vmlinux EXPORT_SYMBOL @@ -9819,7 +9864,6 @@ 0x00000000 mmc_set_timing vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_accept_partial_bio vmlinux EXPORT_SYMBOL_GPL 0x00000000 usbnet_get_endpoints vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_set_tsfadjust vmlinux EXPORT_SYMBOL 0x00000000 phylink_set_pcs vmlinux EXPORT_SYMBOL_GPL 0x00000000 nvme_sync_io_queues vmlinux EXPORT_SYMBOL_GPL 0x00000000 pci_bus_read_dev_vendor_id vmlinux EXPORT_SYMBOL @@ -9884,6 +9928,7 @@ 0x00000000 usb_alloc_coherent vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_tree_mtd vmlinux EXPORT_SYMBOL_GPL 0x00000000 crypto_larval_kill vmlinux EXPORT_SYMBOL_GPL +0x00000000 test_and_lock_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_set_default_host vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_csk_get_port vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_is_rx_handler_busy vmlinux EXPORT_SYMBOL_GPL @@ -9926,6 +9971,7 @@ 0x00000000 jbd2_journal_inode_ranged_wait vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 __SCK__tp_func_android_fs_dataread_end vmlinux EXPORT_SYMBOL 0x00000000 freeze_super vmlinux EXPORT_SYMBOL +0x00000000 cobalt_runstate vmlinux EXPORT_SYMBOL_GPL 0x00000000 __devm_request_region vmlinux EXPORT_SYMBOL 0x00000000 l3mdev_fib_table_by_index vmlinux EXPORT_SYMBOL_GPL 0x00000000 hid_connect vmlinux EXPORT_SYMBOL_GPL @@ -9949,7 +9995,6 @@ 0x00000000 cfg80211_auth_timeout vmlinux EXPORT_SYMBOL 0x00000000 devm_snd_soc_register_dai vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_ctrl_new_std_menu vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_bstuck_nfcal vmlinux EXPORT_SYMBOL 0x00000000 ata_common_sdev_attrs vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_add_edid_modes vmlinux EXPORT_SYMBOL 0x00000000 fsl8250_handle_irq vmlinux EXPORT_SYMBOL_GPL @@ -9997,6 +10042,7 @@ 0x00000000 seq_hlist_start_head_rcu vmlinux EXPORT_SYMBOL 0x00000000 current_time vmlinux EXPORT_SYMBOL 0x00000000 user_path_at_empty vmlinux EXPORT_SYMBOL +0x00000000 rtdm_dev_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 page_pool_release_page vmlinux EXPORT_SYMBOL 0x00000000 dev_mc_flush vmlinux EXPORT_SYMBOL 0x00000000 dev_uc_flush vmlinux EXPORT_SYMBOL @@ -10021,6 +10067,8 @@ 0x00000000 dma_fence_release vmlinux EXPORT_SYMBOL 0x00000000 dw_hdmi_qp_set_audio_infoframe vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_client_register vmlinux EXPORT_SYMBOL +0x00000000 cobalt_personality vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_wakeup_one_sleeper vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_rpm_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 do_trace_rcu_torture_read vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpufreq_update_util_data vmlinux EXPORT_SYMBOL_GPL @@ -10032,8 +10080,6 @@ 0x00000000 __tracepoint_rpm_idle vmlinux EXPORT_SYMBOL_GPL 0x00000000 bt_procfs_cleanup vmlinux EXPORT_SYMBOL 0x00000000 mr_fill_mroute vmlinux EXPORT_SYMBOL -0x00000000 gov_attr_set_put vmlinux EXPORT_SYMBOL_GPL -0x00000000 gov_attr_set_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 media_device_usb_allocate vmlinux EXPORT_SYMBOL_GPL 0x00000000 wakeup_source_remove vmlinux EXPORT_SYMBOL_GPL 0x00000000 stop_tty vmlinux EXPORT_SYMBOL @@ -10047,7 +10093,6 @@ 0x00000000 nfs_pageio_reset_write_mds vmlinux EXPORT_SYMBOL_GPL 0x00000000 unlock_page_memcg vmlinux EXPORT_SYMBOL 0x00000000 si_swapinfo vmlinux EXPORT_SYMBOL_GPL -0x00000000 iwe_stream_add_point vmlinux EXPORT_SYMBOL 0x00000000 xfrm_policy_destroy vmlinux EXPORT_SYMBOL 0x00000000 inet_del_offload vmlinux EXPORT_SYMBOL 0x00000000 sysctl_fb_tunnels_only_for_init_net vmlinux EXPORT_SYMBOL @@ -10132,7 +10177,6 @@ 0x00000000 kfree_skb_list vmlinux EXPORT_SYMBOL 0x00000000 snd_soc_of_parse_audio_simple_widgets vmlinux EXPORT_SYMBOL_GPL 0x00000000 ohci_setup vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_kill_interrupts vmlinux EXPORT_SYMBOL 0x00000000 deregister_mtd_blktrans vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_dp_psr_setup_time vmlinux EXPORT_SYMBOL 0x00000000 drm_simple_encoder_init vmlinux EXPORT_SYMBOL @@ -10173,6 +10217,7 @@ 0x00000000 gpiochip_add_pin_range vmlinux EXPORT_SYMBOL_GPL 0x00000000 generate_random_uuid vmlinux EXPORT_SYMBOL 0x00000000 get_mem_cgroup_from_page vmlinux EXPORT_SYMBOL +0x00000000 xnheap_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_policy_hash_rebuild vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_tcp_seqadj_set vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_net_ns_by_pid vmlinux EXPORT_SYMBOL_GPL @@ -10199,7 +10244,6 @@ 0x00000000 xfrm_input_register_afinfo vmlinux EXPORT_SYMBOL 0x00000000 udp_ioctl vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_acct_add vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_init vmlinux EXPORT_SYMBOL 0x00000000 __mdiobus_write vmlinux EXPORT_SYMBOL 0x00000000 genphy_loopback vmlinux EXPORT_SYMBOL 0x00000000 tty_port_default_client_ops vmlinux EXPORT_SYMBOL_GPL @@ -10219,6 +10263,7 @@ 0x00000000 pwm_set_chip_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_queue_max_write_same_sectors vmlinux EXPORT_SYMBOL 0x00000000 may_umount vmlinux EXPORT_SYMBOL +0x00000000 xnbufd_map_umem vmlinux EXPORT_SYMBOL_GPL 0x00000000 pids_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_switch_client_transport vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_hsq_suspend vmlinux EXPORT_SYMBOL_GPL @@ -10257,6 +10302,7 @@ 0x00000000 regulator_bulk_disable vmlinux EXPORT_SYMBOL_GPL 0x00000000 memweight vmlinux EXPORT_SYMBOL 0x00000000 jbd2_journal_ack_err vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 nkvdso vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_sched_stat_blocked vmlinux EXPORT_SYMBOL_GPL 0x00000000 emergency_restart vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_upper_dev_link vmlinux EXPORT_SYMBOL @@ -10279,7 +10325,6 @@ 0x00000000 cpufreq_driver_target vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_post_suspending vmlinux EXPORT_SYMBOL_GPL 0x00000000 vb2_fop_read vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_get_tsf_offset vmlinux EXPORT_SYMBOL 0x00000000 mtd_panic_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 soc_device_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 tpm_transmit_cmd vmlinux EXPORT_SYMBOL_GPL @@ -10299,9 +10344,6 @@ 0x00000000 ohci_restart vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_phy_get_charger_current vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_phy_set_charger_current vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_get_hw_crypto_keytype vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_btcoex_bt_stomp vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_txstart vmlinux EXPORT_SYMBOL 0x00000000 phy_get_eee_err vmlinux EXPORT_SYMBOL 0x00000000 nvme_reset_ctrl vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_self_refresh_helper_init vmlinux EXPORT_SYMBOL @@ -10318,7 +10360,6 @@ 0x00000000 dm_shift_arg vmlinux EXPORT_SYMBOL 0x00000000 v4l2_async_unregister_subdev vmlinux EXPORT_SYMBOL 0x00000000 usb_stor_resume vmlinux EXPORT_SYMBOL_GPL USB_STORAGE -0x00000000 ath9k_hw_gpio_request_out vmlinux EXPORT_SYMBOL 0x00000000 open_candev vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_mdiobus_register vmlinux EXPORT_SYMBOL 0x00000000 ubi_open_volume vmlinux EXPORT_SYMBOL_GPL @@ -10329,7 +10370,6 @@ 0x00000000 __generic_file_write_iter vmlinux EXPORT_SYMBOL 0x00000000 xfrm_lookup vmlinux EXPORT_SYMBOL 0x00000000 skb_store_bits vmlinux EXPORT_SYMBOL -0x00000000 ath_reg_notifier_apply vmlinux EXPORT_SYMBOL 0x00000000 ata_sas_scsi_ioctl vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_resv_fini vmlinux EXPORT_SYMBOL 0x00000000 plist_add vmlinux EXPORT_SYMBOL_GPL @@ -10343,7 +10383,6 @@ 0x00000000 snd_soc_put_volsw vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_get_volsw vmlinux EXPORT_SYMBOL_GPL 0x00000000 devm_devfreq_register_opp_notifier vmlinux EXPORT_SYMBOL -0x00000000 cpufreq_dbs_governor_stop vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_bufio_new vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_smbus_read_byte vmlinux EXPORT_SYMBOL 0x00000000 i2c_probe_func_quick_read vmlinux EXPORT_SYMBOL_GPL @@ -10383,7 +10422,6 @@ 0x00000000 nfs_file_fsync vmlinux EXPORT_SYMBOL_GPL 0x00000000 mark_buffer_write_io_error vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 profile_event_register vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpu_latency_qos_request_active vmlinux EXPORT_SYMBOL_GPL 0x00000000 __nf_conntrack_helper_find vmlinux EXPORT_SYMBOL_GPL 0x00000000 gether_register_netdev vmlinux EXPORT_SYMBOL_GPL 0x00000000 ppp_input_error vmlinux EXPORT_SYMBOL @@ -10492,6 +10530,7 @@ 0x00000000 usbnet_manage_power vmlinux EXPORT_SYMBOL 0x00000000 kbase_mmu_report_mcu_as_fault_and_reset vmlinux EXPORT_SYMBOL 0x00000000 clk_hw_register vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_wakeup_many_sleepers vmlinux EXPORT_SYMBOL_GPL 0x00000000 disable_irq_nosync vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_tx_prepare_skb vmlinux EXPORT_SYMBOL 0x00000000 cfg80211_nan_match vmlinux EXPORT_SYMBOL @@ -10515,6 +10554,7 @@ 0x00000000 extcon_unregister_notifier_all vmlinux EXPORT_SYMBOL_GPL 0x00000000 media_entity_remote_pad vmlinux EXPORT_SYMBOL_GPL 0x00000000 platform_get_resource_byname vmlinux EXPORT_SYMBOL_GPL +0x00000000 fbcon_update_vcs vmlinux EXPORT_SYMBOL 0x00000000 of_find_node_by_type vmlinux EXPORT_SYMBOL 0x00000000 usb_stor_adjust_quirks vmlinux EXPORT_SYMBOL_GPL USB_STORAGE 0x00000000 dwc3_stop_active_transfer vmlinux EXPORT_SYMBOL_GPL @@ -10539,17 +10579,18 @@ 0x00000000 pci_bus_write_config_byte vmlinux EXPORT_SYMBOL 0x00000000 devm_gpiod_put_array vmlinux EXPORT_SYMBOL_GPL 0x00000000 free_anon_bdev vmlinux EXPORT_SYMBOL +0x00000000 rtdm_sem_timeddown vmlinux EXPORT_SYMBOL_GPL 0x00000000 raw_hash_sk vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_seq_start vmlinux EXPORT_SYMBOL 0x00000000 dst_cache_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 rps_sock_flow_table vmlinux EXPORT_SYMBOL 0x00000000 iio_trigger_poll vmlinux EXPORT_SYMBOL 0x00000000 usb_stor_control_msg vmlinux EXPORT_SYMBOL_GPL USB_STORAGE -0x00000000 ath9k_hw_reset vmlinux EXPORT_SYMBOL 0x00000000 spi_res_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_genpd_add_subdomain vmlinux EXPORT_SYMBOL_GPL 0x00000000 __drm_get_edid_firmware_path vmlinux EXPORT_SYMBOL 0x00000000 blk_mq_complete_request vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_exp_batches_completed vmlinux EXPORT_SYMBOL_GPL 0x00000000 request_any_context_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_clnt_swap_activate vmlinux EXPORT_SYMBOL_GPL @@ -10640,6 +10681,7 @@ 0x00000000 drm_modeset_lock vmlinux EXPORT_SYMBOL 0x00000000 pci_walk_bus vmlinux EXPORT_SYMBOL_GPL 0x00000000 block_is_partially_uptodate vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 xnthread_wait_period vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_overruns vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_workqueue_execute_end vmlinux EXPORT_SYMBOL_GPL 0x00000000 l3mdev_table_lookup_unregister vmlinux EXPORT_SYMBOL_GPL @@ -10650,6 +10692,7 @@ 0x00000000 drm_atomic_helper_commit_hw_done vmlinux EXPORT_SYMBOL 0x00000000 uart_resume_port vmlinux EXPORT_SYMBOL 0x00000000 gpiod_get_value_cansleep vmlinux EXPORT_SYMBOL_GPL +0x00000000 __xnthread_test_cancel vmlinux EXPORT_SYMBOL_GPL 0x00000000 sysctl_rmem_max vmlinux EXPORT_SYMBOL 0x00000000 sysctl_wmem_max vmlinux EXPORT_SYMBOL 0x00000000 iio_trigger_using_own vmlinux EXPORT_SYMBOL @@ -10664,6 +10707,7 @@ 0x00000000 crypto_grab_spawn vmlinux EXPORT_SYMBOL_GPL 0x00000000 jbd2_journal_errno vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 register_vmap_purge_notifier vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnclock_divrem_billion vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_rss_key_fill vmlinux EXPORT_SYMBOL 0x00000000 dm_bitset_cursor_next vmlinux EXPORT_SYMBOL_GPL 0x00000000 input_free_minor vmlinux EXPORT_SYMBOL @@ -10719,6 +10763,7 @@ 0x00000000 drm_mode_prune_invalid vmlinux EXPORT_SYMBOL 0x00000000 mpi_clear vmlinux EXPORT_SYMBOL_GPL 0x00000000 vfs_readlink vmlinux EXPORT_SYMBOL +0x00000000 inband_irq_enable vmlinux EXPORT_SYMBOL 0x00000000 complete_and_exit vmlinux EXPORT_SYMBOL 0x00000000 cfg80211_unregister_wdev vmlinux EXPORT_SYMBOL 0x00000000 inet6_lookup vmlinux EXPORT_SYMBOL_GPL @@ -10777,6 +10822,7 @@ 0x00000000 crypto_register_acomp vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs4_disable_idmapping vmlinux EXPORT_SYMBOL_GPL 0x00000000 d_genocide vmlinux EXPORT_SYMBOL +0x00000000 rtdm_event_clear vmlinux EXPORT_SYMBOL_GPL 0x00000000 tracing_generic_entry_update vmlinux EXPORT_SYMBOL_GPL 0x00000000 __module_put_and_exit vmlinux EXPORT_SYMBOL 0x00000000 ns_to_kernel_old_timeval vmlinux EXPORT_SYMBOL @@ -10807,7 +10853,6 @@ 0x00000000 d_rehash vmlinux EXPORT_SYMBOL 0x00000000 d_drop vmlinux EXPORT_SYMBOL 0x00000000 file_open_root vmlinux EXPORT_SYMBOL -0x00000000 iwe_stream_add_event vmlinux EXPORT_SYMBOL 0x00000000 tso_build_hdr vmlinux EXPORT_SYMBOL 0x00000000 sock_recv_errqueue vmlinux EXPORT_SYMBOL 0x00000000 __mmc_claim_host vmlinux EXPORT_SYMBOL @@ -10817,7 +10862,6 @@ 0x00000000 pci_get_class vmlinux EXPORT_SYMBOL 0x00000000 nfs_initiate_pgio vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_create_rpc_client vmlinux EXPORT_SYMBOL_GPL -0x00000000 log_abnormal_wakeup_reason vmlinux EXPORT_SYMBOL_GPL 0x00000000 region_intersects vmlinux EXPORT_SYMBOL_GPL 0x00000000 netif_rx vmlinux EXPORT_SYMBOL 0x00000000 snd_pcm_hw_refine vmlinux EXPORT_SYMBOL @@ -10848,6 +10892,7 @@ 0x00000000 gpiochip_get_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 reciprocal_value vmlinux EXPORT_SYMBOL 0x00000000 unregister_binfmt vmlinux EXPORT_SYMBOL +0x00000000 xnregistry_vlink_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 make_kgid vmlinux EXPORT_SYMBOL 0x00000000 _raw_read_trylock vmlinux EXPORT_SYMBOL 0x00000000 housekeeping_enabled vmlinux EXPORT_SYMBOL_GPL @@ -10874,16 +10919,18 @@ 0x00000000 dw_hdmi_qp_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 klist_del vmlinux EXPORT_SYMBOL_GPL 0x00000000 __lock_buffer vmlinux EXPORT_SYMBOL +0x00000000 rtdm_event_pulse vmlinux EXPORT_SYMBOL_GPL 0x00000000 memory_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_openreq_init_rwin vmlinux EXPORT_SYMBOL 0x00000000 xdp_rxq_info_reg_mem_model vmlinux EXPORT_SYMBOL_GPL 0x00000000 devfreq_remove_governor vmlinux EXPORT_SYMBOL 0x00000000 dm_copy_name_and_uuid vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_paprd_populate_single_table vmlinux EXPORT_SYMBOL 0x00000000 analogix_dp_phy_test vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_detach_device_from_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 debug_locks vmlinux EXPORT_SYMBOL_GPL 0x00000000 aead_geniv_alloc vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_put vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 perf_event_update_userpage vmlinux EXPORT_SYMBOL_GPL 0x00000000 trace_print_bitmask_seq vmlinux EXPORT_SYMBOL_GPL 0x00000000 fib6_check_nexthop vmlinux EXPORT_SYMBOL_GPL @@ -10931,7 +10978,6 @@ 0x00000000 __nla_validate vmlinux EXPORT_SYMBOL 0x00000000 vfs_dedupe_file_range vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_proberesp_get vmlinux EXPORT_SYMBOL -0x00000000 wireless_send_event vmlinux EXPORT_SYMBOL 0x00000000 xprt_release_xprt_cong vmlinux EXPORT_SYMBOL_GPL 0x00000000 ipv6_recv_error vmlinux EXPORT_SYMBOL_GPL 0x00000000 udp_sk_rx_dst_set vmlinux EXPORT_SYMBOL @@ -11049,6 +11095,7 @@ 0x00000000 pwmchip_add vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_queue_max_discard_segments vmlinux EXPORT_SYMBOL_GPL 0x00000000 pkcs7_parse_message vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_sem_down vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_fib_metrics_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 flow_block_cb_decref vmlinux EXPORT_SYMBOL 0x00000000 of_prop_next_string vmlinux EXPORT_SYMBOL_GPL @@ -11066,13 +11113,12 @@ 0x00000000 backlight_device_set_brightness vmlinux EXPORT_SYMBOL 0x00000000 pcie_capability_write_word vmlinux EXPORT_SYMBOL 0x00000000 copy_page_to_iter vmlinux EXPORT_SYMBOL +0x00000000 xnthread_call_mayday vmlinux EXPORT_SYMBOL_GPL 0x00000000 css_next_descendant_pre vmlinux EXPORT_SYMBOL_GPL 0x00000000 ioremap_cache vmlinux EXPORT_SYMBOL 0x00000000 snd_device_get_state vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_find_device_by_node vmlinux EXPORT_SYMBOL 0x00000000 vb2_streamoff vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_setup_ht_cap vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_gpio_free vmlinux EXPORT_SYMBOL 0x00000000 ata_port_pbar_desc vmlinux EXPORT_SYMBOL_GPL 0x00000000 __blkdev_issue_zeroout vmlinux EXPORT_SYMBOL 0x00000000 kblockd_mod_delayed_work_on vmlinux EXPORT_SYMBOL @@ -11100,7 +11146,6 @@ 0x00000000 tcp_rtx_synack vmlinux EXPORT_SYMBOL 0x00000000 skb_put vmlinux EXPORT_SYMBOL 0x00000000 v4l2_m2m_ioctl_reqbufs vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_name vmlinux EXPORT_SYMBOL 0x00000000 ubi_leb_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 mtd_concat_create vmlinux EXPORT_SYMBOL 0x00000000 scsi_host_put vmlinux EXPORT_SYMBOL @@ -11118,6 +11163,7 @@ 0x00000000 pinctrl_get vmlinux EXPORT_SYMBOL_GPL 0x00000000 configfs_depend_item vmlinux EXPORT_SYMBOL 0x00000000 generic_block_fiemap vmlinux EXPORT_SYMBOL +0x00000000 rtdm_mutex_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 add_wait_queue vmlinux EXPORT_SYMBOL 0x00000000 param_ops_invbool vmlinux EXPORT_SYMBOL 0x00000000 dev_add_pack vmlinux EXPORT_SYMBOL @@ -11176,7 +11222,6 @@ 0x00000000 skb_tstamp_tx vmlinux EXPORT_SYMBOL_GPL 0x00000000 devfreq_monitor_resume vmlinux EXPORT_SYMBOL 0x00000000 sip_hdcp_config vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_hw_keyreset vmlinux EXPORT_SYMBOL 0x00000000 mdio_xpcs_get_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 __scsi_format_command vmlinux EXPORT_SYMBOL 0x00000000 tty_set_operations vmlinux EXPORT_SYMBOL @@ -11207,7 +11252,6 @@ 0x00000000 asoc_simple_parse_clk vmlinux EXPORT_SYMBOL_GPL 0x00000000 media_graph_walk_next vmlinux EXPORT_SYMBOL_GPL 0x00000000 media_graph_walk_init vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_rx_skb_postprocess vmlinux EXPORT_SYMBOL 0x00000000 drm_mode_validate_driver vmlinux EXPORT_SYMBOL 0x00000000 fb_deferred_io_fsync vmlinux EXPORT_SYMBOL_GPL 0x00000000 pci_alloc_irq_vectors_affinity vmlinux EXPORT_SYMBOL @@ -11260,6 +11304,7 @@ 0x00000000 xas_store vmlinux EXPORT_SYMBOL_GPL 0x00000000 crypto_register_scomps vmlinux EXPORT_SYMBOL_GPL 0x00000000 sysfs_emit vmlinux EXPORT_SYMBOL_GPL +0x00000000 xntimer_format_time vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_powernv_throttle vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_get_fils_discovery_tmpl vmlinux EXPORT_SYMBOL 0x00000000 net_namespace_list vmlinux EXPORT_SYMBOL_GPL @@ -11297,7 +11342,6 @@ 0x00000000 perf_event_period vmlinux EXPORT_SYMBOL_GPL 0x00000000 rfc1042_header vmlinux EXPORT_SYMBOL 0x00000000 iio_buffer_init vmlinux EXPORT_SYMBOL -0x00000000 rockchip_dmcfreq_vop_bandwidth_request vmlinux EXPORT_SYMBOL 0x00000000 devm_devfreq_unregister_notifier vmlinux EXPORT_SYMBOL 0x00000000 sdio_get_host_pm_caps vmlinux EXPORT_SYMBOL_GPL 0x00000000 media_device_register_entity_notify vmlinux EXPORT_SYMBOL_GPL @@ -11308,6 +11352,7 @@ 0x00000000 hdmi_vendor_infoframe_pack_only vmlinux EXPORT_SYMBOL 0x00000000 rockchip_pcie_get_phys vmlinux EXPORT_SYMBOL_GPL 0x00000000 add_swap_extent vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_heap vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_sched_stat_iowait vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_rx_mgmt_khz vmlinux EXPORT_SYMBOL 0x00000000 svc_rpcb_cleanup vmlinux EXPORT_SYMBOL_GPL @@ -11362,6 +11407,7 @@ 0x00000000 bioset_init vmlinux EXPORT_SYMBOL 0x00000000 bioset_exit vmlinux EXPORT_SYMBOL 0x00000000 nfs_sb_deactive vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnsynch_wakeup_this_sleeper vmlinux EXPORT_SYMBOL_GPL 0x00000000 irq_alloc_generic_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 udp_destruct_sock vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_hashinfo2_init_mod vmlinux EXPORT_SYMBOL_GPL @@ -11395,7 +11441,6 @@ 0x00000000 snd_seq_autoload_exit vmlinux EXPORT_SYMBOL 0x00000000 snd_seq_autoload_init vmlinux EXPORT_SYMBOL 0x00000000 __devm_iio_device_register vmlinux EXPORT_SYMBOL_GPL -0x00000000 rockchip_dmcfreq_write_trylock vmlinux EXPORT_SYMBOL 0x00000000 vb2_mmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 rk628_rxphy_power_on vmlinux EXPORT_SYMBOL 0x00000000 ahci_do_softreset vmlinux EXPORT_SYMBOL_GPL @@ -11466,6 +11511,7 @@ 0x00000000 kbasep_find_enclosing_cpu_mapping_offset vmlinux EXPORT_SYMBOL 0x00000000 pci_root_buses vmlinux EXPORT_SYMBOL 0x00000000 blk_freeze_queue_start vmlinux EXPORT_SYMBOL_GPL +0x00000000 run_oob_call vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_syn_ack_timeout vmlinux EXPORT_SYMBOL 0x00000000 nf_log_unregister vmlinux EXPORT_SYMBOL 0x00000000 iio_alloc_pollfunc vmlinux EXPORT_SYMBOL_GPL @@ -11478,6 +11524,7 @@ 0x00000000 gpiochip_irqchip_add_key vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_block_bio_remap vmlinux EXPORT_SYMBOL_GPL 0x00000000 d_hash_and_lookup vmlinux EXPORT_SYMBOL +0x00000000 xnthread_demote vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_irq_handler_exit vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_bss_iter vmlinux EXPORT_SYMBOL 0x00000000 snd_rawmidi_drain_input vmlinux EXPORT_SYMBOL @@ -11514,6 +11561,7 @@ 0x00000000 drm_legacy_ioremap_wc vmlinux EXPORT_SYMBOL 0x00000000 nfs_reconfigure vmlinux EXPORT_SYMBOL_GPL 0x00000000 vzalloc vmlinux EXPORT_SYMBOL +0x00000000 nkclock vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_contiguous_default_area vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_pelt_thermal_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_free_hw vmlinux EXPORT_SYMBOL @@ -11528,6 +11576,7 @@ 0x00000000 ZSTD_CStreamWorkspaceBound vmlinux EXPORT_SYMBOL 0x00000000 unregister_key_type vmlinux EXPORT_SYMBOL 0x00000000 nfs_commit_inode vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_put_iovec vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_shutdown_client vmlinux EXPORT_SYMBOL_GPL 0x00000000 skb_mpls_pop vmlinux EXPORT_SYMBOL_GPL 0x00000000 iio_buffer_set_attrs vmlinux EXPORT_SYMBOL_GPL @@ -11554,6 +11603,7 @@ 0x00000000 gpiod_is_active_low vmlinux EXPORT_SYMBOL_GPL 0x00000000 __kfifo_len_r vmlinux EXPORT_SYMBOL 0x00000000 vfs_create_mount vmlinux EXPORT_SYMBOL +0x00000000 rtdm_iomap_to_user vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_iter_advance vmlinux EXPORT_SYMBOL_GPL 0x00000000 srcutorture_get_gp_data vmlinux EXPORT_SYMBOL_GPL 0x00000000 read_sanitised_ftr_reg vmlinux EXPORT_SYMBOL_GPL @@ -11579,6 +11629,7 @@ 0x00000000 block_truncate_page vmlinux EXPORT_SYMBOL 0x00000000 vfs_tmpfile vmlinux EXPORT_SYMBOL 0x00000000 generic_write_checks vmlinux EXPORT_SYMBOL +0x00000000 cobalt_pop_personality vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_park vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_dev_state_flush vmlinux EXPORT_SYMBOL 0x00000000 netdev_increment_features vmlinux EXPORT_SYMBOL @@ -11672,6 +11723,7 @@ 0x00000000 drm_atomic_get_bridge_state vmlinux EXPORT_SYMBOL 0x00000000 of_phy_simple_xlate vmlinux EXPORT_SYMBOL_GPL 0x00000000 mpi_ec_get_affine vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_can_mmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 wake_up_process vmlinux EXPORT_SYMBOL 0x00000000 fs_overflowuid vmlinux EXPORT_SYMBOL @@ -11704,6 +11756,7 @@ 0x00000000 blk_mq_delay_run_hw_queue vmlinux EXPORT_SYMBOL 0x00000000 crypto_alg_tested vmlinux EXPORT_SYMBOL_GPL 0x00000000 iomap_seek_hole vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_event_timedwait vmlinux EXPORT_SYMBOL_GPL 0x00000000 sprint_symbol_no_offset vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_pelt_thermal_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 led_init_core vmlinux EXPORT_SYMBOL_GPL @@ -11793,6 +11846,7 @@ 0x00000000 pci_scan_root_bus vmlinux EXPORT_SYMBOL 0x00000000 create_empty_buffers vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 init_special_inode vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 rtdm_fd_fcntl vmlinux EXPORT_SYMBOL_GPL 0x00000000 on_each_cpu_cond vmlinux EXPORT_SYMBOL 0x00000000 cfg80211_mgmt_tx_status vmlinux EXPORT_SYMBOL 0x00000000 __xfrm_dst_lookup vmlinux EXPORT_SYMBOL @@ -11956,6 +12010,8 @@ 0x00000000 gpiochip_add_pingroup_range vmlinux EXPORT_SYMBOL_GPL 0x00000000 klist_node_attached vmlinux EXPORT_SYMBOL_GPL 0x00000000 rhashtable_walk_exit vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnselect vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnheap_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 sched_trace_rd_span vmlinux EXPORT_SYMBOL_GPL 0x00000000 flush_work vmlinux EXPORT_SYMBOL_GPL 0x00000000 udp_tunnel_notify_add_rx_port vmlinux EXPORT_SYMBOL_GPL @@ -11983,6 +12039,7 @@ 0x00000000 pci_msi_unmask_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 dump_stack vmlinux EXPORT_SYMBOL 0x00000000 mpage_writepage vmlinux EXPORT_SYMBOL +0x00000000 cobalt_pipeline vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_record_off vmlinux EXPORT_SYMBOL_GPL 0x00000000 set_security_override vmlinux EXPORT_SYMBOL 0x00000000 raw_seq_start vmlinux EXPORT_SYMBOL_GPL @@ -12005,6 +12062,7 @@ 0x00000000 crypto_unregister_shash vmlinux EXPORT_SYMBOL_GPL 0x00000000 crypto_unregister_ahash vmlinux EXPORT_SYMBOL_GPL 0x00000000 kmalloc_order_trace vmlinux EXPORT_SYMBOL +0x00000000 xnregistry_enter vmlinux EXPORT_SYMBOL_GPL 0x00000000 stop_one_cpu_nowait vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_hrtimer_expire_exit vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_sched_stat_runtime vmlinux EXPORT_SYMBOL_GPL @@ -12013,7 +12071,6 @@ 0x00000000 tcp_sock_set_user_timeout vmlinux EXPORT_SYMBOL 0x00000000 snd_ctl_find_numid vmlinux EXPORT_SYMBOL 0x00000000 devm_devfreq_add_device vmlinux EXPORT_SYMBOL -0x00000000 cpufreq_dbs_governor_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 policy_has_boost_freq vmlinux EXPORT_SYMBOL_GPL 0x00000000 dm_bio_detain vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_port_register_altmodes vmlinux EXPORT_SYMBOL_GPL @@ -12080,6 +12137,8 @@ 0x00000000 gpiod_set_raw_value_cansleep vmlinux EXPORT_SYMBOL_GPL 0x00000000 __sg_alloc_table vmlinux EXPORT_SYMBOL 0x00000000 shrink_dcache_sb vmlinux EXPORT_SYMBOL +0x00000000 xnthread_set_mode vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnregistry_remove vmlinux EXPORT_SYMBOL_GPL 0x00000000 __tracepoint_xdp_bulk_tx vmlinux EXPORT_SYMBOL_GPL 0x00000000 percpu_free_rwsem vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_ops_int vmlinux EXPORT_SYMBOL @@ -12096,7 +12155,6 @@ 0x00000000 page_cache_sync_ra vmlinux EXPORT_SYMBOL_GPL 0x00000000 raw_v4_hashinfo vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_bytes_info_ext vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpuidle_driver_state_disabled vmlinux EXPORT_SYMBOL_GPL 0x00000000 typec_register_plug vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_gem_object_init vmlinux EXPORT_SYMBOL 0x00000000 devm_clk_hw_get_clk vmlinux EXPORT_SYMBOL_GPL @@ -12190,7 +12248,6 @@ 0x00000000 nf_conntrack_helper_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 xdp_rxq_info_unreg_mem_model vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_for_each_dev vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_mci_setup vmlinux EXPORT_SYMBOL 0x00000000 dev_driver_string vmlinux EXPORT_SYMBOL 0x00000000 corestack_driver_control vmlinux EXPORT_SYMBOL 0x00000000 pcie_get_readrq vmlinux EXPORT_SYMBOL @@ -12240,13 +12297,13 @@ 0x00000000 iunique vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 try_lookup_one_len vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_kmem_cache_free vmlinux EXPORT_SYMBOL +0x00000000 cobalt_remove_state_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 unregister_die_notifier vmlinux EXPORT_SYMBOL_GPL 0x00000000 xfrm_policy_walk_done vmlinux EXPORT_SYMBOL 0x00000000 inet_dgram_ops vmlinux EXPORT_SYMBOL 0x00000000 media_pipeline_start vmlinux EXPORT_SYMBOL_GPL 0x00000000 input_release_device vmlinux EXPORT_SYMBOL 0x00000000 usb_add_gadget vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_btcoex_init_mci vmlinux EXPORT_SYMBOL 0x00000000 ata_dev_next vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_format_info vmlinux EXPORT_SYMBOL 0x00000000 __do_once_done vmlinux EXPORT_SYMBOL @@ -12269,8 +12326,6 @@ 0x00000000 hwmon_device_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_interface_id vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_store_new_id vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_get_txq_props vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_set_txq_props vmlinux EXPORT_SYMBOL 0x00000000 sdev_evt_send_simple vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_find_mipi_dsi_host_by_node vmlinux EXPORT_SYMBOL 0x00000000 drm_fb_helper_blank vmlinux EXPORT_SYMBOL @@ -12281,6 +12336,7 @@ 0x00000000 seq_hex_dump vmlinux EXPORT_SYMBOL 0x00000000 frame_vector_create vmlinux EXPORT_SYMBOL 0x00000000 truncate_inode_pages_final vmlinux EXPORT_SYMBOL +0x00000000 xnvfile_get_integer vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpu_pm_enter vmlinux EXPORT_SYMBOL_GPL 0x00000000 freezer_cgrp_subsys vmlinux EXPORT_SYMBOL_GPL 0x00000000 __wake_up_locked_key vmlinux EXPORT_SYMBOL_GPL @@ -12342,6 +12398,8 @@ 0x00000000 sgl_free_order vmlinux EXPORT_SYMBOL 0x00000000 debugfs_create_file vmlinux EXPORT_SYMBOL_GPL 0x00000000 new_inode vmlinux EXPORT_SYMBOL +0x00000000 xnbufd_unmap_kwrite vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnbufd_unmap_uwrite vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_restart_call_prepare vmlinux EXPORT_SYMBOL_GPL 0x00000000 build_skb_around vmlinux EXPORT_SYMBOL 0x00000000 snd_ctl_find_id vmlinux EXPORT_SYMBOL @@ -12377,6 +12435,7 @@ 0x00000000 nfs_refresh_inode vmlinux EXPORT_SYMBOL_GPL 0x00000000 generic_remap_file_range_prep vmlinux EXPORT_SYMBOL 0x00000000 wbc_attach_and_unlock_inode vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_timer_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 dynevent_create vmlinux EXPORT_SYMBOL_GPL 0x00000000 ndo_dflt_fdb_del vmlinux EXPORT_SYMBOL 0x00000000 sdio_release_host vmlinux EXPORT_SYMBOL_GPL @@ -12402,6 +12461,7 @@ 0x00000000 __tracepoint_block_split vmlinux EXPORT_SYMBOL_GPL 0x00000000 jbd2_journal_release_jbd_inode vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 wbc_account_cgroup_owner vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_close vmlinux EXPORT_SYMBOL_GPL 0x00000000 stack_trace_save vmlinux EXPORT_SYMBOL_GPL 0x00000000 inet_csk_reset_keepalive_timer vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_tcp_send_reset vmlinux EXPORT_SYMBOL_GPL @@ -12429,9 +12489,9 @@ 0x00000000 of_get_cpu_state_node vmlinux EXPORT_SYMBOL 0x00000000 v4l2_m2m_dqbuf vmlinux EXPORT_SYMBOL_GPL 0x00000000 i2c_mux_alloc vmlinux EXPORT_SYMBOL_GPL -0x00000000 ar9003_paprd_setup_gain_table vmlinux EXPORT_SYMBOL 0x00000000 drm_gem_dumb_destroy vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_check_plane_damage vmlinux EXPORT_SYMBOL +0x00000000 soft_cursor vmlinux EXPORT_SYMBOL 0x00000000 __rht_bucket_nested vmlinux EXPORT_SYMBOL_GPL 0x00000000 fsnotify_init_mark vmlinux EXPORT_SYMBOL_GPL 0x00000000 ping_seq_stop vmlinux EXPORT_SYMBOL_GPL @@ -12550,6 +12610,7 @@ 0x00000000 key_put vmlinux EXPORT_SYMBOL 0x00000000 bmap vmlinux EXPORT_SYMBOL 0x00000000 kmemdup vmlinux EXPORT_SYMBOL +0x00000000 nklock vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_free_pages vmlinux EXPORT_SYMBOL_GPL 0x00000000 param_set_hexint vmlinux EXPORT_SYMBOL 0x00000000 show_regs vmlinux EXPORT_SYMBOL_GPL @@ -12562,6 +12623,7 @@ 0x00000000 __drm_atomic_helper_plane_reset vmlinux EXPORT_SYMBOL 0x00000000 prandom_bytes vmlinux EXPORT_SYMBOL 0x00000000 generic_parse_monolithic vmlinux EXPORT_SYMBOL +0x00000000 inband_stage vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_stop_iface vmlinux EXPORT_SYMBOL 0x00000000 tcp_gro_complete vmlinux EXPORT_SYMBOL 0x00000000 tcp_register_congestion_control vmlinux EXPORT_SYMBOL_GPL @@ -12619,7 +12681,6 @@ 0x00000000 snd_soc_close_delayed_work vmlinux EXPORT_SYMBOL_GPL 0x00000000 tee_device_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 iio_trigger_free vmlinux EXPORT_SYMBOL -0x00000000 governor_sysfs_ops vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_ipa_term_model vmlinux EXPORT_SYMBOL 0x00000000 drm_atomic_helper_commit_modeset_enables vmlinux EXPORT_SYMBOL 0x00000000 __tracepoint_block_rq_issue vmlinux EXPORT_SYMBOL_GPL @@ -12640,6 +12701,7 @@ 0x00000000 memory_read_from_buffer vmlinux EXPORT_SYMBOL 0x00000000 seq_read_iter vmlinux EXPORT_SYMBOL 0x00000000 alarmtimer_get_rtcdev vmlinux EXPORT_SYMBOL_GPL +0x00000000 irq_pipeline_active vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_probereq_get vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_send_bar vmlinux EXPORT_SYMBOL 0x00000000 xprt_force_disconnect vmlinux EXPORT_SYMBOL_GPL @@ -12695,10 +12757,10 @@ 0x00000000 refresh_frequency_limits vmlinux EXPORT_SYMBOL 0x00000000 typec_altmode_exit vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_property_create_range vmlinux EXPORT_SYMBOL +0x00000000 tick_install_proxy vmlinux EXPORT_SYMBOL_GPL 0x00000000 getboottime64 vmlinux EXPORT_SYMBOL_GPL 0x00000000 dequeue_signal vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_role_switch_register vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_beacon_config_sta vmlinux EXPORT_SYMBOL 0x00000000 clocks_calc_mult_shift vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_iter_keys vmlinux EXPORT_SYMBOL 0x00000000 flow_rule_match_mpls vmlinux EXPORT_SYMBOL @@ -12722,6 +12784,7 @@ 0x00000000 pci_reset_function_locked vmlinux EXPORT_SYMBOL_GPL 0x00000000 pinctrl_add_gpio_ranges vmlinux EXPORT_SYMBOL_GPL 0x00000000 phy_pm_runtime_forbid vmlinux EXPORT_SYMBOL_GPL +0x00000000 synthetic_irq_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 rfkill_blocked vmlinux EXPORT_SYMBOL 0x00000000 ip6_push_pending_frames vmlinux EXPORT_SYMBOL_GPL 0x00000000 dev_change_net_namespace vmlinux EXPORT_SYMBOL_GPL @@ -12733,6 +12796,7 @@ 0x00000000 blk_queue_rq_timeout vmlinux EXPORT_SYMBOL_GPL 0x00000000 blk_queue_flag_clear vmlinux EXPORT_SYMBOL 0x00000000 nfs_path vmlinux EXPORT_SYMBOL_GPL +0x00000000 rcu_oob_prepare_lock vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_sched_stat_runtime vmlinux EXPORT_SYMBOL_GPL 0x00000000 cache_seq_stop_rcu vmlinux EXPORT_SYMBOL_GPL 0x00000000 xdr_init_decode_pages vmlinux EXPORT_SYMBOL_GPL @@ -12767,6 +12831,7 @@ 0x00000000 __SCK__tp_func_kmem_cache_free vmlinux EXPORT_SYMBOL 0x00000000 ring_buffer_dropped_events_cpu vmlinux EXPORT_SYMBOL_GPL 0x00000000 posix_clock_register vmlinux EXPORT_SYMBOL_GPL +0x00000000 inband_irq_save vmlinux EXPORT_SYMBOL 0x00000000 async_schedule_node_domain vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_calc_tx_airtime vmlinux EXPORT_SYMBOL_GPL 0x00000000 usbnet_read_cmd_nopm vmlinux EXPORT_SYMBOL_GPL @@ -12782,7 +12847,6 @@ 0x00000000 unix_tot_inflight vmlinux EXPORT_SYMBOL 0x00000000 xfrm_ealg_get_byid vmlinux EXPORT_SYMBOL_GPL 0x00000000 xt_copy_counters vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_set_rx_bufsize vmlinux EXPORT_SYMBOL 0x00000000 syscon_regmap_lookup_by_compatible vmlinux EXPORT_SYMBOL_GPL 0x00000000 request_partial_firmware_into_buf vmlinux EXPORT_SYMBOL 0x00000000 device_pm_wait_for_dev vmlinux EXPORT_SYMBOL_GPL @@ -12809,6 +12873,7 @@ 0x00000000 mipi_dsi_dcs_write vmlinux EXPORT_SYMBOL 0x00000000 iommu_dma_enable_best_fit_algo vmlinux EXPORT_SYMBOL 0x00000000 linear_range_get_max_value vmlinux EXPORT_SYMBOL_GPL +0x00000000 rtdm_fd_sendmsg vmlinux EXPORT_SYMBOL_GPL 0x00000000 housekeeping_overridden vmlinux EXPORT_SYMBOL_GPL 0x00000000 ip_tunnel_encap_del_ops vmlinux EXPORT_SYMBOL 0x00000000 dst_blackhole_redirect vmlinux EXPORT_SYMBOL_GPL @@ -12835,6 +12900,7 @@ 0x00000000 xa_find vmlinux EXPORT_SYMBOL 0x00000000 swap_type_to_swap_info vmlinux EXPORT_SYMBOL_GPL 0x00000000 atomic_dec_and_mutex_lock vmlinux EXPORT_SYMBOL +0x00000000 dovetail_leave_inband vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_delayed_work_timer_fn vmlinux EXPORT_SYMBOL 0x00000000 __traceiter_irq_handler_entry vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_ap_probereq_get vmlinux EXPORT_SYMBOL @@ -12855,6 +12921,7 @@ 0x00000000 crypto_grab_ahash vmlinux EXPORT_SYMBOL_GPL 0x00000000 kernel_read_file_from_path_initns vmlinux EXPORT_SYMBOL_GPL 0x00000000 inode_set_flags vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver +0x00000000 cobalt_signal_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 dma_mmap_attrs vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_resume_disconnect vmlinux EXPORT_SYMBOL_GPL 0x00000000 bt_accept_enqueue vmlinux EXPORT_SYMBOL @@ -12934,7 +13001,6 @@ 0x00000000 flush_rcu_work vmlinux EXPORT_SYMBOL 0x00000000 __cpuhp_state_remove_instance vmlinux EXPORT_SYMBOL_GPL 0x00000000 unregister_inetaddr_notifier vmlinux EXPORT_SYMBOL -0x00000000 ath9k_cmn_update_txpow vmlinux EXPORT_SYMBOL 0x00000000 kbase_csf_firmware_global_output vmlinux EXPORT_SYMBOL 0x00000000 mipi_dsi_compression_mode vmlinux EXPORT_SYMBOL 0x00000000 amba_release_regions vmlinux EXPORT_SYMBOL @@ -12944,9 +13010,9 @@ 0x00000000 af_alg_free_sg vmlinux EXPORT_SYMBOL_GPL 0x00000000 af_alg_make_sg vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_kernel_pages vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_register_personality vmlinux EXPORT_SYMBOL_GPL 0x00000000 perf_event_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 nexthop_free_rcu vmlinux EXPORT_SYMBOL_GPL -0x00000000 od_register_powersave_bias_handler vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_dwc3_readl vmlinux EXPORT_SYMBOL_GPL 0x00000000 phy_reset_after_clk_enable vmlinux EXPORT_SYMBOL 0x00000000 errno_to_blk_status vmlinux EXPORT_SYMBOL_GPL @@ -12958,8 +13024,6 @@ 0x00000000 nf_hook_entries_delete_raw vmlinux EXPORT_SYMBOL_GPL 0x00000000 netlink_ack vmlinux EXPORT_SYMBOL 0x00000000 sock_no_bind vmlinux EXPORT_SYMBOL -0x00000000 ath_printk vmlinux EXPORT_SYMBOL -0x00000000 ath9k_hw_disable vmlinux EXPORT_SYMBOL 0x00000000 phy_unregister_fixup_for_uid vmlinux EXPORT_SYMBOL 0x00000000 dmaengine_unmap_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 clk_bulk_unprepare vmlinux EXPORT_SYMBOL_GPL @@ -12972,7 +13036,6 @@ 0x00000000 dm_tm_create_with_sm vmlinux EXPORT_SYMBOL_GPL 0x00000000 v4l2_event_unsubscribe_all vmlinux EXPORT_SYMBOL_GPL 0x00000000 xhci_check_bandwidth vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_resettxqueue vmlinux EXPORT_SYMBOL 0x00000000 kbase_mem_pool_grow vmlinux EXPORT_SYMBOL 0x00000000 dw_hdmi_cec_wake_ops_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 analogix_dp_probe vmlinux EXPORT_SYMBOL_GPL @@ -13011,7 +13074,9 @@ 0x00000000 __SCK__tp_func_device_pm_callback_end vmlinux EXPORT_SYMBOL_GPL 0x00000000 is_module_sig_enforced vmlinux EXPORT_SYMBOL 0x00000000 get_state_synchronize_rcu vmlinux EXPORT_SYMBOL_GPL +0x00000000 irq_pipeline vmlinux EXPORT_SYMBOL 0x00000000 vprintk vmlinux EXPORT_SYMBOL +0x00000000 irq_send_oob_ipi vmlinux EXPORT_SYMBOL_GPL 0x00000000 ipv6_sock_mc_join vmlinux EXPORT_SYMBOL 0x00000000 __udp4_lib_lookup vmlinux EXPORT_SYMBOL_GPL 0x00000000 tcp_poll vmlinux EXPORT_SYMBOL @@ -13036,9 +13101,9 @@ 0x00000000 pwm_request_from_chip vmlinux EXPORT_SYMBOL_GPL 0x00000000 register_key_type vmlinux EXPORT_SYMBOL 0x00000000 grab_cache_page_write_begin vmlinux EXPORT_SYMBOL +0x00000000 rtdm_fd_get_setsockopt_args vmlinux EXPORT_SYMBOL_GPL 0x00000000 lock_system_sleep vmlinux EXPORT_SYMBOL_GPL 0x00000000 rt_mutex_lock_interruptible vmlinux EXPORT_SYMBOL_GPL -0x00000000 clocksource_mmio_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 rk_get_temperature vmlinux EXPORT_SYMBOL 0x00000000 vb2_core_reqbufs vmlinux EXPORT_SYMBOL_GPL 0x00000000 nanddev_init vmlinux EXPORT_SYMBOL_GPL @@ -13062,6 +13127,8 @@ 0x00000000 simple_write_begin vmlinux EXPORT_SYMBOL 0x00000000 redirty_page_for_writepage vmlinux EXPORT_SYMBOL 0x00000000 bdi_set_max_ratio vmlinux EXPORT_SYMBOL +0x00000000 xnsynch_requeue_sleeper vmlinux EXPORT_SYMBOL_GPL +0x00000000 inband_irq_restore vmlinux EXPORT_SYMBOL 0x00000000 ieee80211_ctstoself_get vmlinux EXPORT_SYMBOL 0x00000000 nf_conntrack_htable_size vmlinux EXPORT_SYMBOL_GPL 0x00000000 netdev_emerg vmlinux EXPORT_SYMBOL @@ -13090,6 +13157,8 @@ 0x00000000 blk_check_plugged vmlinux EXPORT_SYMBOL 0x00000000 debugfs_initialized vmlinux EXPORT_SYMBOL_GPL 0x00000000 lookup_one_len_unlocked vmlinux EXPORT_SYMBOL +0x00000000 rtdm_event_wait vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnclock_set_wallclock vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_map_put vmlinux EXPORT_SYMBOL_GPL 0x00000000 tracing_snapshot_cond_enable vmlinux EXPORT_SYMBOL_GPL 0x00000000 task_cputime_adjusted vmlinux EXPORT_SYMBOL_GPL @@ -13126,6 +13195,7 @@ 0x00000000 fb_get_buffer_offset vmlinux EXPORT_SYMBOL 0x00000000 key_payload_reserve vmlinux EXPORT_SYMBOL 0x00000000 pin_user_pages_fast_only vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnthread_unblock vmlinux EXPORT_SYMBOL_GPL 0x00000000 ftrace_set_notrace vmlinux EXPORT_SYMBOL_GPL 0x00000000 blocking_notifier_chain_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_get_key_rx_seq vmlinux EXPORT_SYMBOL @@ -13187,6 +13257,7 @@ 0x00000000 drm_atomic_helper_commit_planes_on_crtc vmlinux EXPORT_SYMBOL 0x00000000 regulator_list_hardware_vsel vmlinux EXPORT_SYMBOL_GPL 0x00000000 kmem_cache_size vmlinux EXPORT_SYMBOL +0x00000000 rtdm_get_iovec vmlinux EXPORT_SYMBOL_GPL 0x00000000 regulatory_hint vmlinux EXPORT_SYMBOL 0x00000000 wiphy_rfkill_start_polling vmlinux EXPORT_SYMBOL 0x00000000 inet_ioctl vmlinux EXPORT_SYMBOL @@ -13235,6 +13306,7 @@ 0x00000000 iomap_file_buffered_write vmlinux EXPORT_SYMBOL_GPL 0x00000000 get_cached_acl vmlinux EXPORT_SYMBOL 0x00000000 wait_for_stable_page vmlinux EXPORT_SYMBOL_GPL +0x00000000 __xntimer_set_affinity vmlinux EXPORT_SYMBOL_GPL 0x00000000 event_triggers_post_call vmlinux EXPORT_SYMBOL_GPL 0x00000000 cpu_bit_bitmap vmlinux EXPORT_SYMBOL_GPL 0x00000000 rpc_free_iostats vmlinux EXPORT_SYMBOL_GPL @@ -13250,6 +13322,7 @@ 0x00000000 crypto_register_kpp vmlinux EXPORT_SYMBOL_GPL 0x00000000 touch_buffer vmlinux EXPORT_SYMBOL 0x00000000 alloc_file_pseudo vmlinux EXPORT_SYMBOL +0x00000000 xnclock_register vmlinux EXPORT_SYMBOL_GPL 0x00000000 prepare_to_swait_event vmlinux EXPORT_SYMBOL 0x00000000 __kthread_should_park vmlinux EXPORT_SYMBOL_GPL 0x00000000 wiphy_apply_custom_regulatory vmlinux EXPORT_SYMBOL @@ -13292,6 +13365,7 @@ 0x00000000 phy_pm_runtime_put_sync vmlinux EXPORT_SYMBOL_GPL 0x00000000 fat_update_time vmlinux EXPORT_SYMBOL_GPL 0x00000000 reclaim_pages vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_thread_lookup vmlinux EXPORT_SYMBOL_GPL 0x00000000 rcu_inkernel_boot_has_ended vmlinux EXPORT_SYMBOL_GPL 0x00000000 try_wait_for_completion vmlinux EXPORT_SYMBOL 0x00000000 param_set_ulong vmlinux EXPORT_SYMBOL @@ -13299,7 +13373,6 @@ 0x00000000 tcp_mmap vmlinux EXPORT_SYMBOL 0x00000000 nf_ct_expect_alloc vmlinux EXPORT_SYMBOL_GPL 0x00000000 of_alias_get_alias_list vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_hw_intrpend vmlinux EXPORT_SYMBOL 0x00000000 spi_unregister_device vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_fb_helper_deferred_io vmlinux EXPORT_SYMBOL 0x00000000 drm_kms_helper_poll_init vmlinux EXPORT_SYMBOL @@ -13335,9 +13408,9 @@ 0x00000000 __SCK__tp_func_nfs_xdr_status vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_kmem_cache_alloc_node vmlinux EXPORT_SYMBOL 0x00000000 register_user_hw_breakpoint vmlinux EXPORT_SYMBOL_GPL +0x00000000 hard_preempt_disable vmlinux EXPORT_SYMBOL_GPL 0x00000000 cfg80211_rx_control_port vmlinux EXPORT_SYMBOL 0x00000000 xfrm_policy_walk_init vmlinux EXPORT_SYMBOL -0x00000000 cpufreq_dbs_governor_limits vmlinux EXPORT_SYMBOL_GPL 0x00000000 driver_attach vmlinux EXPORT_SYMBOL_GPL 0x00000000 gpiochip_irq_map vmlinux EXPORT_SYMBOL_GPL 0x00000000 ZSTD_copyDCtx vmlinux EXPORT_SYMBOL @@ -13393,6 +13466,7 @@ 0x00000000 nfs_may_open vmlinux EXPORT_SYMBOL_GPL 0x00000000 jbd2_journal_dirty_metadata vmlinux EXPORT_SYMBOL VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver 0x00000000 account_locked_vm vmlinux EXPORT_SYMBOL_GPL +0x00000000 cobalt_add_config_chain vmlinux EXPORT_SYMBOL_GPL 0x00000000 io_cgrp_subsys_enabled_key vmlinux EXPORT_SYMBOL_GPL 0x00000000 handle_fasteoi_irq vmlinux EXPORT_SYMBOL_GPL 0x00000000 task_rq_lock vmlinux EXPORT_SYMBOL_GPL @@ -13442,6 +13516,7 @@ 0x00000000 drm_vblank_work_schedule vmlinux EXPORT_SYMBOL 0x00000000 drm_display_mode_to_videomode vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_dp_downstream_420_passthrough vmlinux EXPORT_SYMBOL +0x00000000 fbcon_modechange_possible vmlinux EXPORT_SYMBOL_GPL 0x00000000 fb_get_options vmlinux EXPORT_SYMBOL 0x00000000 phy_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 kasprintf vmlinux EXPORT_SYMBOL @@ -13532,7 +13607,6 @@ 0x00000000 dev_pm_opp_of_register_em vmlinux EXPORT_SYMBOL_GPL 0x00000000 gserial_suspend vmlinux EXPORT_SYMBOL_GPL 0x00000000 usb_intf_get_dma_device vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath_gen_timer_isr vmlinux EXPORT_SYMBOL 0x00000000 dev_pm_qos_expose_latency_tolerance vmlinux EXPORT_SYMBOL_GPL 0x00000000 kbase_gator_hwcnt_init_names vmlinux EXPORT_SYMBOL 0x00000000 tty_put_char vmlinux EXPORT_SYMBOL_GPL @@ -13569,6 +13643,7 @@ 0x00000000 dw_pcie_find_ext_capability vmlinux EXPORT_SYMBOL_GPL 0x00000000 nfs_free_inode vmlinux EXPORT_SYMBOL_GPL 0x00000000 seq_list_next vmlinux EXPORT_SYMBOL +0x00000000 xnselector_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 __irq_alloc_domain_generic_chips vmlinux EXPORT_SYMBOL_GPL 0x00000000 __SCK__tp_func_pelt_dl_tp vmlinux EXPORT_SYMBOL_GPL 0x00000000 unregister_inet6addr_notifier vmlinux EXPORT_SYMBOL @@ -13658,7 +13733,6 @@ 0x00000000 nvmem_register_notifier vmlinux EXPORT_SYMBOL_GPL 0x00000000 mmc_of_parse_voltage vmlinux EXPORT_SYMBOL 0x00000000 dm_array_cursor_end vmlinux EXPORT_SYMBOL_GPL -0x00000000 ath9k_cmn_process_rssi vmlinux EXPORT_SYMBOL 0x00000000 phy_mii_ioctl vmlinux EXPORT_SYMBOL 0x00000000 spi_bitbang_init vmlinux EXPORT_SYMBOL_GPL 0x00000000 drm_atomic_helper_plane_destroy_state vmlinux EXPORT_SYMBOL @@ -13713,6 +13787,8 @@ 0x00000000 unregister_asymmetric_key_parser vmlinux EXPORT_SYMBOL_GPL 0x00000000 fuse_dev_release vmlinux EXPORT_SYMBOL_GPL 0x00000000 default_llseek vmlinux EXPORT_SYMBOL +0x00000000 xnbufd_unmap_kread vmlinux EXPORT_SYMBOL_GPL +0x00000000 xnbufd_unmap_uread vmlinux EXPORT_SYMBOL_GPL 0x00000000 ring_buffer_oldest_event_ts vmlinux EXPORT_SYMBOL_GPL 0x00000000 ipi_get_hwirq vmlinux EXPORT_SYMBOL_GPL 0x00000000 svc_bind vmlinux EXPORT_SYMBOL_GPL @@ -13727,12 +13803,12 @@ 0x00000000 dw_hdmi_probe vmlinux EXPORT_SYMBOL_GPL 0x00000000 pinconf_generic_dt_subnode_to_map vmlinux EXPORT_SYMBOL_GPL 0x00000000 guid_parse vmlinux EXPORT_SYMBOL +0x00000000 xnbufd_map_kmem vmlinux EXPORT_SYMBOL_GPL 0x00000000 io_cgrp_subsys_on_dfl_key vmlinux EXPORT_SYMBOL_GPL 0x00000000 ieee80211_get_unsol_bcast_probe_resp_tmpl vmlinux EXPORT_SYMBOL 0x00000000 rpc_peeraddr vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_dmaengine_pcm_unregister vmlinux EXPORT_SYMBOL_GPL 0x00000000 snd_soc_dapm_nc_pin_unlocked vmlinux EXPORT_SYMBOL_GPL -0x00000000 cpuidle_register_driver vmlinux EXPORT_SYMBOL_GPL 0x00000000 ptp_cancel_worker_sync vmlinux EXPORT_SYMBOL 0x00000000 phylink_mii_c22_pcs_set_advertisement vmlinux EXPORT_SYMBOL_GPL 0x00000000 scsi_eh_prep_cmnd vmlinux EXPORT_SYMBOL @@ -13759,6 +13835,7 @@ 0x00000000 vfs_fsync vmlinux EXPORT_SYMBOL 0x00000000 poll_freewait vmlinux EXPORT_SYMBOL 0x00000000 si_meminfo vmlinux EXPORT_SYMBOL +0x00000000 xnbufd_invalidate vmlinux EXPORT_SYMBOL_GPL 0x00000000 kthread_create_on_node vmlinux EXPORT_SYMBOL 0x00000000 snd_timer_pause vmlinux EXPORT_SYMBOL 0x00000000 typec_cable_set_identity vmlinux EXPORT_SYMBOL_GPL @@ -13804,6 +13881,7 @@ 0x00000000 __SCK__tp_func_usb_gadget_disconnect vmlinux EXPORT_SYMBOL_GPL 0x00000000 property_entries_free vmlinux EXPORT_SYMBOL_GPL 0x00000000 gpio_to_desc vmlinux EXPORT_SYMBOL_GPL +0x00000000 xntimer_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 bpf_offload_dev_destroy vmlinux EXPORT_SYMBOL_GPL 0x00000000 __traceiter_module_get vmlinux EXPORT_SYMBOL 0x00000000 tcp_ca_openreq_child vmlinux EXPORT_SYMBOL_GPL diff --git a/kernel/xenomai-v3.2.4/.clang-format b/kernel/xenomai-v3.2.4/.clang-format new file mode 100755 index 0000000..2ffd69a --- /dev/null +++ b/kernel/xenomai-v3.2.4/.clang-format @@ -0,0 +1,493 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# clang-format configuration file. Intended for clang-format >= 4. +# +# For more information, see: +# +# Documentation/process/clang-format.rst +# https://clang.llvm.org/docs/ClangFormat.html +# https://clang.llvm.org/docs/ClangFormatStyleOptions.html +# +--- +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +#AlignEscapedNewlines: Left # Unknown to clang-format-4.0 +AlignOperands: true +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: false +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: true + AfterNamespace: true + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + #AfterExternBlock: false # Unknown to clang-format-5.0 + BeforeCatch: false + BeforeElse: false + IndentBraces: false + #SplitEmptyFunction: true # Unknown to clang-format-4.0 + #SplitEmptyRecord: true # Unknown to clang-format-4.0 + #SplitEmptyNamespace: true # Unknown to clang-format-4.0 +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0 +BreakBeforeTernaryOperators: false +BreakConstructorInitializersBeforeComma: false +#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0 +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +#CompactNamespaces: false # Unknown to clang-format-4.0 +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 8 +ContinuationIndentWidth: 8 +Cpp11BracedListStyle: false +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +#FixNamespaceComments: false # Unknown to clang-format-4.0 + +# Taken from: +# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \ +# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \ +# | sort | uniq +ForEachMacros: + - 'apei_estatus_for_each_section' + - 'ata_for_each_dev' + - 'ata_for_each_link' + - '__ata_qc_for_each' + - 'ata_qc_for_each' + - 'ata_qc_for_each_raw' + - 'ata_qc_for_each_with_internal' + - 'ax25_for_each' + - 'ax25_uid_for_each' + - '__bio_for_each_bvec' + - 'bio_for_each_bvec' + - 'bio_for_each_integrity_vec' + - '__bio_for_each_segment' + - 'bio_for_each_segment' + - 'bio_for_each_segment_all' + - 'bio_list_for_each' + - 'bip_for_each_vec' + - 'blkg_for_each_descendant_post' + - 'blkg_for_each_descendant_pre' + - 'blk_queue_for_each_rl' + - 'bond_for_each_slave' + - 'bond_for_each_slave_rcu' + - 'bpf_for_each_spilled_reg' + - 'btree_for_each_safe128' + - 'btree_for_each_safe32' + - 'btree_for_each_safe64' + - 'btree_for_each_safel' + - 'card_for_each_dev' + - 'cgroup_taskset_for_each' + - 'cgroup_taskset_for_each_leader' + - 'cpufreq_for_each_entry' + - 'cpufreq_for_each_entry_idx' + - 'cpufreq_for_each_valid_entry' + - 'cpufreq_for_each_valid_entry_idx' + - 'css_for_each_child' + - 'css_for_each_descendant_post' + - 'css_for_each_descendant_pre' + - 'device_for_each_child_node' + - 'drm_atomic_crtc_for_each_plane' + - 'drm_atomic_crtc_state_for_each_plane' + - 'drm_atomic_crtc_state_for_each_plane_state' + - 'drm_atomic_for_each_plane_damage' + - 'drm_connector_for_each_possible_encoder' + - 'drm_for_each_connector_iter' + - 'drm_for_each_crtc' + - 'drm_for_each_encoder' + - 'drm_for_each_encoder_mask' + - 'drm_for_each_fb' + - 'drm_for_each_legacy_plane' + - 'drm_for_each_plane' + - 'drm_for_each_plane_mask' + - 'drm_for_each_privobj' + - 'drm_mm_for_each_hole' + - 'drm_mm_for_each_node' + - 'drm_mm_for_each_node_in_range' + - 'drm_mm_for_each_node_safe' + - 'flow_action_for_each' + - 'for_each_active_drhd_unit' + - 'for_each_active_iommu' + - 'for_each_available_child_of_node' + - 'for_each_bio' + - 'for_each_board_func_rsrc' + - 'for_each_bvec' + - 'for_each_card_components' + - 'for_each_card_links' + - 'for_each_card_links_safe' + - 'for_each_card_prelinks' + - 'for_each_card_rtds' + - 'for_each_card_rtds_safe' + - 'for_each_cgroup_storage_type' + - 'for_each_child_of_node' + - 'for_each_clear_bit' + - 'for_each_clear_bit_from' + - 'for_each_cmsghdr' + - 'for_each_compatible_node' + - 'for_each_component_dais' + - 'for_each_component_dais_safe' + - 'for_each_comp_order' + - 'for_each_console' + - 'for_each_cpu' + - 'for_each_cpu_and' + - 'for_each_cpu_not' + - 'for_each_cpu_wrap' + - 'for_each_dev_addr' + - 'for_each_dma_cap_mask' + - 'for_each_dpcm_be' + - 'for_each_dpcm_be_rollback' + - 'for_each_dpcm_be_safe' + - 'for_each_dpcm_fe' + - 'for_each_drhd_unit' + - 'for_each_dss_dev' + - 'for_each_efi_memory_desc' + - 'for_each_efi_memory_desc_in_map' + - 'for_each_element' + - 'for_each_element_extid' + - 'for_each_element_id' + - 'for_each_endpoint_of_node' + - 'for_each_evictable_lru' + - 'for_each_fib6_node_rt_rcu' + - 'for_each_fib6_walker_rt' + - 'for_each_free_mem_range' + - 'for_each_free_mem_range_reverse' + - 'for_each_func_rsrc' + - 'for_each_hstate' + - 'for_each_if' + - 'for_each_iommu' + - 'for_each_ip_tunnel_rcu' + - 'for_each_irq_nr' + - 'for_each_link_codecs' + - 'for_each_lru' + - 'for_each_matching_node' + - 'for_each_matching_node_and_match' + - 'for_each_memblock' + - 'for_each_memblock_type' + - 'for_each_memcg_cache_index' + - 'for_each_mem_pfn_range' + - 'for_each_mem_range' + - 'for_each_mem_range_rev' + - 'for_each_migratetype_order' + - 'for_each_msi_entry' + - 'for_each_msi_entry_safe' + - 'for_each_net' + - 'for_each_netdev' + - 'for_each_netdev_continue' + - 'for_each_netdev_continue_rcu' + - 'for_each_netdev_feature' + - 'for_each_netdev_in_bond_rcu' + - 'for_each_netdev_rcu' + - 'for_each_netdev_reverse' + - 'for_each_netdev_safe' + - 'for_each_net_rcu' + - 'for_each_new_connector_in_state' + - 'for_each_new_crtc_in_state' + - 'for_each_new_mst_mgr_in_state' + - 'for_each_new_plane_in_state' + - 'for_each_new_private_obj_in_state' + - 'for_each_node' + - 'for_each_node_by_name' + - 'for_each_node_by_type' + - 'for_each_node_mask' + - 'for_each_node_state' + - 'for_each_node_with_cpus' + - 'for_each_node_with_property' + - 'for_each_of_allnodes' + - 'for_each_of_allnodes_from' + - 'for_each_of_cpu_node' + - 'for_each_of_pci_range' + - 'for_each_old_connector_in_state' + - 'for_each_old_crtc_in_state' + - 'for_each_old_mst_mgr_in_state' + - 'for_each_oldnew_connector_in_state' + - 'for_each_oldnew_crtc_in_state' + - 'for_each_oldnew_mst_mgr_in_state' + - 'for_each_oldnew_plane_in_state' + - 'for_each_oldnew_plane_in_state_reverse' + - 'for_each_oldnew_private_obj_in_state' + - 'for_each_old_plane_in_state' + - 'for_each_old_private_obj_in_state' + - 'for_each_online_cpu' + - 'for_each_online_node' + - 'for_each_online_pgdat' + - 'for_each_pci_bridge' + - 'for_each_pci_dev' + - 'for_each_pci_msi_entry' + - 'for_each_populated_zone' + - 'for_each_possible_cpu' + - 'for_each_present_cpu' + - 'for_each_prime_number' + - 'for_each_prime_number_from' + - 'for_each_process' + - 'for_each_process_thread' + - 'for_each_property_of_node' + - 'for_each_registered_fb' + - 'for_each_reserved_mem_region' + - 'for_each_rtd_codec_dai' + - 'for_each_rtd_codec_dai_rollback' + - 'for_each_rtdcom' + - 'for_each_rtdcom_safe' + - 'for_each_set_bit' + - 'for_each_set_bit_from' + - 'for_each_sg' + - 'for_each_sg_dma_page' + - 'for_each_sg_page' + - 'for_each_sibling_event' + - 'for_each_subelement' + - 'for_each_subelement_extid' + - 'for_each_subelement_id' + - '__for_each_thread' + - 'for_each_thread' + - 'for_each_zone' + - 'for_each_zone_zonelist' + - 'for_each_zone_zonelist_nodemask' + - 'fwnode_for_each_available_child_node' + - 'fwnode_for_each_child_node' + - 'fwnode_graph_for_each_endpoint' + - 'gadget_for_each_ep' + - 'genradix_for_each' + - 'genradix_for_each_from' + - 'hash_for_each' + - 'hash_for_each_possible' + - 'hash_for_each_possible_rcu' + - 'hash_for_each_possible_rcu_notrace' + - 'hash_for_each_possible_safe' + - 'hash_for_each_rcu' + - 'hash_for_each_safe' + - 'hctx_for_each_ctx' + - 'hlist_bl_for_each_entry' + - 'hlist_bl_for_each_entry_rcu' + - 'hlist_bl_for_each_entry_safe' + - 'hlist_for_each' + - 'hlist_for_each_entry' + - 'hlist_for_each_entry_continue' + - 'hlist_for_each_entry_continue_rcu' + - 'hlist_for_each_entry_continue_rcu_bh' + - 'hlist_for_each_entry_from' + - 'hlist_for_each_entry_from_rcu' + - 'hlist_for_each_entry_rcu' + - 'hlist_for_each_entry_rcu_bh' + - 'hlist_for_each_entry_rcu_notrace' + - 'hlist_for_each_entry_safe' + - '__hlist_for_each_rcu' + - 'hlist_for_each_safe' + - 'hlist_nulls_for_each_entry' + - 'hlist_nulls_for_each_entry_from' + - 'hlist_nulls_for_each_entry_rcu' + - 'hlist_nulls_for_each_entry_safe' + - 'i3c_bus_for_each_i2cdev' + - 'i3c_bus_for_each_i3cdev' + - 'ide_host_for_each_port' + - 'ide_port_for_each_dev' + - 'ide_port_for_each_present_dev' + - 'idr_for_each_entry' + - 'idr_for_each_entry_continue' + - 'idr_for_each_entry_ul' + - 'inet_bind_bucket_for_each' + - 'inet_lhash2_for_each_icsk_rcu' + - 'key_for_each' + - 'key_for_each_safe' + - 'klp_for_each_func' + - 'klp_for_each_func_safe' + - 'klp_for_each_func_static' + - 'klp_for_each_object' + - 'klp_for_each_object_safe' + - 'klp_for_each_object_static' + - 'kvm_for_each_memslot' + - 'kvm_for_each_vcpu' + - 'list_for_each' + - 'list_for_each_codec' + - 'list_for_each_codec_safe' + - 'list_for_each_entry' + - 'list_for_each_entry_continue' + - 'list_for_each_entry_continue_rcu' + - 'list_for_each_entry_continue_reverse' + - 'list_for_each_entry_from' + - 'list_for_each_entry_from_rcu' + - 'list_for_each_entry_from_reverse' + - 'list_for_each_entry_lockless' + - 'list_for_each_entry_rcu' + - 'list_for_each_entry_reverse' + - 'list_for_each_entry_safe' + - 'list_for_each_entry_safe_continue' + - 'list_for_each_entry_safe_from' + - 'list_for_each_entry_safe_reverse' + - 'list_for_each_prev' + - 'list_for_each_prev_safe' + - 'list_for_each_safe' + - 'llist_for_each' + - 'llist_for_each_entry' + - 'llist_for_each_entry_safe' + - 'llist_for_each_safe' + - 'media_device_for_each_entity' + - 'media_device_for_each_intf' + - 'media_device_for_each_link' + - 'media_device_for_each_pad' + - 'mp_bvec_for_each_page' + - 'mp_bvec_for_each_segment' + - 'nanddev_io_for_each_page' + - 'netdev_for_each_lower_dev' + - 'netdev_for_each_lower_private' + - 'netdev_for_each_lower_private_rcu' + - 'netdev_for_each_mc_addr' + - 'netdev_for_each_uc_addr' + - 'netdev_for_each_upper_dev_rcu' + - 'netdev_hw_addr_list_for_each' + - 'nft_rule_for_each_expr' + - 'nla_for_each_attr' + - 'nla_for_each_nested' + - 'nlmsg_for_each_attr' + - 'nlmsg_for_each_msg' + - 'nr_neigh_for_each' + - 'nr_neigh_for_each_safe' + - 'nr_node_for_each' + - 'nr_node_for_each_safe' + - 'of_for_each_phandle' + - 'of_property_for_each_string' + - 'of_property_for_each_u32' + - 'pci_bus_for_each_resource' + - 'ping_portaddr_for_each_entry' + - 'plist_for_each' + - 'plist_for_each_continue' + - 'plist_for_each_entry' + - 'plist_for_each_entry_continue' + - 'plist_for_each_entry_safe' + - 'plist_for_each_safe' + - 'pnp_for_each_card' + - 'pnp_for_each_dev' + - 'protocol_for_each_card' + - 'protocol_for_each_dev' + - 'queue_for_each_hw_ctx' + - 'radix_tree_for_each_slot' + - 'radix_tree_for_each_tagged' + - 'rbtree_postorder_for_each_entry_safe' + - 'rdma_for_each_port' + - 'resource_list_for_each_entry' + - 'resource_list_for_each_entry_safe' + - 'rhl_for_each_entry_rcu' + - 'rhl_for_each_rcu' + - 'rht_for_each' + - 'rht_for_each_from' + - 'rht_for_each_entry' + - 'rht_for_each_entry_from' + - 'rht_for_each_entry_rcu' + - 'rht_for_each_entry_rcu_from' + - 'rht_for_each_entry_safe' + - 'rht_for_each_rcu' + - 'rht_for_each_rcu_from' + - '__rq_for_each_bio' + - 'rq_for_each_bvec' + - 'rq_for_each_segment' + - 'scsi_for_each_prot_sg' + - 'scsi_for_each_sg' + - 'sctp_for_each_hentry' + - 'sctp_skb_for_each' + - 'shdma_for_each_chan' + - '__shost_for_each_device' + - 'shost_for_each_device' + - 'sk_for_each' + - 'sk_for_each_bound' + - 'sk_for_each_entry_offset_rcu' + - 'sk_for_each_from' + - 'sk_for_each_rcu' + - 'sk_for_each_safe' + - 'sk_nulls_for_each' + - 'sk_nulls_for_each_from' + - 'sk_nulls_for_each_rcu' + - 'snd_array_for_each' + - 'snd_pcm_group_for_each_entry' + - 'snd_soc_dapm_widget_for_each_path' + - 'snd_soc_dapm_widget_for_each_path_safe' + - 'snd_soc_dapm_widget_for_each_sink_path' + - 'snd_soc_dapm_widget_for_each_source_path' + - 'tb_property_for_each' + - 'tcf_exts_for_each_action' + - 'udp_portaddr_for_each_entry' + - 'udp_portaddr_for_each_entry_rcu' + - 'usb_hub_for_each_child' + - 'v4l2_device_for_each_subdev' + - 'v4l2_m2m_for_each_dst_buf' + - 'v4l2_m2m_for_each_dst_buf_safe' + - 'v4l2_m2m_for_each_src_buf' + - 'v4l2_m2m_for_each_src_buf_safe' + - 'virtio_device_for_each_vq' + - 'xa_for_each' + - 'xa_for_each_marked' + - 'xa_for_each_start' + - 'xas_for_each' + - 'xas_for_each_conflict' + - 'xas_for_each_marked' + - 'zorro_for_each_dev' + +#IncludeBlocks: Preserve # Unknown to clang-format-5.0 +IncludeCategories: + - Regex: '.*' + Priority: 1 +IncludeIsMainRegex: '(Test)?$' +IndentCaseLabels: false +#IndentPPDirectives: None # Unknown to clang-format-5.0 +IndentWidth: 8 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: Inner +#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0 +ObjCBlockIndentWidth: 8 +ObjCSpaceAfterProperty: true +ObjCSpaceBeforeProtocolList: true + +# Taken from git's rules +#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0 +PenaltyBreakBeforeFirstCallParameter: 30 +PenaltyBreakComment: 10 +PenaltyBreakFirstLessLess: 0 +PenaltyBreakString: 10 +PenaltyExcessCharacter: 100 +PenaltyReturnTypeOnItsOwnLine: 60 + +PointerAlignment: Right +ReflowComments: false +SortIncludes: false +#SortUsingDeclarations: false # Unknown to clang-format-4.0 +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0 +#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0 +SpaceBeforeParens: ControlStatements +#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0 +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp03 +TabWidth: 8 +UseTab: Always +... diff --git a/kernel/xenomai-v3.2.4/.gitignore b/kernel/xenomai-v3.2.4/.gitignore new file mode 100644 index 0000000..b1d6820 --- /dev/null +++ b/kernel/xenomai-v3.2.4/.gitignore @@ -0,0 +1,17 @@ +Makefile.in +include/xeno_config.h.in* +config/compile +config/config.guess +config/config.sub +config/depcomp +config/install-sh +config/libtool.m4 +config/ltmain.sh +config/ltoptions.m4 +config/ltsugar.m4 +config/ltversion.m4 +config/lt~obsolete.m4 +config/missing +configure +aclocal.m4 +autom4te.cache diff --git a/kernel/xenomai-v3.2.4/CONTRIBUTING.md b/kernel/xenomai-v3.2.4/CONTRIBUTING.md new file mode 100644 index 0000000..b55949e --- /dev/null +++ b/kernel/xenomai-v3.2.4/CONTRIBUTING.md @@ -0,0 +1,118 @@ +Contributing to Xenomai +======================= + +Contributions to Xenomai are always welcome. This document explains the general +requirements on contributions and the recommended preparation steps. It also +sketches the typical integration process of patches. + + +Contribution Checklist +---------------------- + +- use git to manage your changes [*recommended*] + +- follow Linux Kernel coding style [**required**] + - see also [Linux kernel coding style](https://www.kernel.org/doc/html/latest/process/coding-style.html) + - try out the checkpatch.pl script from the Linux kernel + +- add the required copyright header to each new file introduced [**required**] + +- structure patches logically, in small steps [**required**] + - one separable functionality/fix/refactoring = one patch + - do not mix those three into a single patch (e.g. first refactor, then + add a new functionality that builds onto the refactoring) + - after each patch, the tree still has to build and work, i.e. do not add + even temporary breakages inside a patch series (helps when tracking down + bugs) + - use `git rebase -i` to restructure a patch series + +- base patches on top of latest master or - if there are dependencies - on next + (note: next is an integration branch that may change non-linearly) [**required**] + +- test patches sufficiently AFTER the last edit (obvious, but...) [**required**] + +- add signed-off to all patches [**required**] + - to certify the "Developer's Certificate of Origin", see below + - check with your employer when not working on your own! + +- indicate if you think a patch fixes a bug present in a stable branch as well [*recommended*] + - add a note to the cover letter of the patch series + - or add some remark after the "---" separator of the patch itself + +- post patches to mailing list [**required**] + - use `git format-patch/send-email` if possible + - send patches inline, do not append them + - no HTML emails! + - CC people who you think should look at the patches, e.g. + - affected maintainers + - someone who wrote a change that is fixed or reverted by you now + - who commented on related changes in the recent past + - who otherwise has expertise and is interested in the topic + - pull requests on gitlab are only optional + +- post follow-up version(s) if feedback requires this [**required**] + +- send reminder if nothing happened after about two weeks [*recommended*] + + +Developer's Certificate of Origin 1.1 +------------------------------------- + +When signing-off a patch for this project like this + + Signed-off-by: Random J Developer <random@developer.example.org> + +using your real name (no pseudonyms or anonymous contributions), you declare the +following: + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + +See also [Sign your work - the Developer’s Certificate of Origin](https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin) +for further background on this process which was adopted from the Linux kernel. + + +Contribution Integration Process +-------------------------------- + +1. patch reviews performed on mailing list + * at least by maintainers, but everyone is invited + * feedback has to consider design, functionality and style + * simpler and clearer code preferred, even if original code works fine + +2. accepted patches merged into next branch + +3. further testing done by community, including CI build tests, code analyzer + runs, on-target tests + +4. if no new problems or discussions showed up, acceptance into master + * grace period for master: about 3 days + * urgent fixes may be applied sooner + +5. a stable-relevant patch is applied to the related stable branch after it was + merged into master (except for patches that are stable-specific) + +gitlab facilities are not used for the review process so that people can follow +all changes and related discussions at a single stop, the mailing list. This +may change in the future if gitlab should improve their email integration. diff --git a/kernel/xenomai-v3.2.4/Makefile.am b/kernel/xenomai-v3.2.4/Makefile.am new file mode 100644 index 0000000..6046442 --- /dev/null +++ b/kernel/xenomai-v3.2.4/Makefile.am @@ -0,0 +1,78 @@ +ACLOCAL_AMFLAGS=-I config + +SUBDIRS = \ + doc \ + lib \ + config \ + include \ + scripts \ + utils + +if XENO_ENABLE_DEMO +SUBDIRS += \ + demo +endif + +if XENO_ENABLE_TESTSUITE +SUBDIRS += \ + testsuite +endif + +EXTRA_DIST = kernel debian + +DIST_SUBDIRS = \ + config \ + demo \ + doc \ + include \ + lib \ + scripts \ + testsuite \ + utils + +doc/%: FORCE + $(MAKE) -C doc/ $* + +dist-hook: + rm -fr `find $(distdir) -name '.svn' -o -name CVS -o -name '.#*' \ + -o -name '*~' -o -name autom4te.cache` + +install-udev-rules: +if XENO_COBALT + if test -r $(DESTDIR)/$(sysconfdir)/udev/udev.rules ; then \ + for f in $(srcdir)/kernel/cobalt/udev/*.rules ; do \ + b=`basename $$f` ; \ + grep -q Xenomai:`basename $$b .rules` $(DESTDIR)/$(sysconfdir)/udev/udev.rules || \ + ( echo ; cat $$f ) >> $(DESTDIR)/$(sysconfdir)/udev/udev.rules ; \ + done ; \ + else \ + $(mkinstalldirs) $(DESTDIR)/$(sysconfdir)/udev/rules.d; \ + for f in $(srcdir)/kernel/cobalt/udev/*.rules ; do \ + $(INSTALL_DATA) $$f $(DESTDIR)/$(sysconfdir)/udev/rules.d/ ; \ + done ; \ + fi +endif + +uninstall-udev-rules: +if XENO_COBALT + cd $(srcdir)/kernel/cobalt/udev && for f in *.rules; do \ + rm -f $(DESTDIR)/$(sysconfdir)/udev/rules.d/$$f ; \ + done +endif + +# legacy alias +install-user: install + +install-exec-local: install-udev-rules + +uninstall-local: uninstall-udev-rules + +uninstall-local: +if XENO_COBALT + cd $(srcdir)/kernel/cobalt/udev ; \ + for f in *.rules ; do \ + $(RM) $(DESTDIR)$(sysconfdir)/udev/rules.d/$$f ; \ + done +endif + +.PHONY: FORCE diff --git a/kernel/xenomai-v3.2.4/README b/kernel/xenomai-v3.2.4/README new file mode 100644 index 0000000..d7241a1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/README @@ -0,0 +1,74 @@ + +Where to start from? +==================== + +http://xenomai.org/start-here/ is the best place to start learning +about Xenomai 3. + +Also, make sure to read the per-architecture README files, i.e.: +kernel/cobalt/arch/*/README + +Documentation +============= + +The Xenomai 3.x documentation can be built then installed this way: + +xenomai-3.x.y/configure --enable-doc-build --prefix=<install-dir> + +Asciidoc, Doxygen, W3M and Dot packages are required for building the +documentation. + +Online documentation +==================== + +The online version of the documentation is available from our website +for the current release: + +http://xenomai.org/installing-xenomai-3-x/ +http://xenomai.org/building-applications-with-xenomai-3-x/ +http://xenomai.org/running-applications-with-xenomai-3-x/ +http://xenomai.org/migrating-from-xenomai-2-x-to-3-x/ +http://xenomai.org/documentation/xenomai-3/html/xeno3prm/index.html +http://xenomai.org/troubleshooting-a-dual-kernel-configuration/ +http://xenomai.org/troubleshooting-a-single-kernel-configuration/ + +Building from sources +===================== + +Detailed instructions for building from sources are available at: +http://xenomai.org/installing-xenomai-3-x/ + +- GIT clone: + + git://git.xenomai.org/xenomai-3.git + http://git.xenomai.org/xenomai-3.git + http://git.xenomai.org/xenomai-3.git + + Once the repository is cloned, make sure to bootstrap the autoconf + system in the top-level directory by running scripts/bootstrap. In + order to do this, you will need the GNU autotools installed on your + workstation. + + If you intend to update the Xenomai code base, you may want to pass + --enable-maintainer-mode to the configure script for building, so + that autoconf/automake output files are automatically regenerated at + the next (re)build in case the corresponding templates have changed. + +- Tarballs: + + http://xenomai.org/downloads/xenomai/ + + Source tarballs are self-contained and ready for building. + +Licensing terms +=============== + +Source files which implement the Xenomai software system generally +include a copyright notice and license header. In absence of license +header in a particular file, the terms and conditions stated by the +COPYING or LICENSE file present in the top-level directory of the +relevant package apply. + +For instance, lib/cobalt/COPYING states the licensing terms and +conditions applicable to the source files present in the hierarchy +rooted at lib/cobalt. diff --git a/kernel/xenomai-v3.2.4/config/INSTALL b/kernel/xenomai-v3.2.4/config/INSTALL new file mode 100644 index 0000000..54caf7c --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/INSTALL @@ -0,0 +1,229 @@ +Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002 Free Software +Foundation, Inc. + + This file is free documentation; the Free Software Foundation gives +unlimited permission to copy, distribute and modify it. + +Basic Installation +================== + + These are generic installation instructions. + + The `configure' shell script attempts to guess correct values for +various system-dependent variables used during compilation. It uses +those values to create a `Makefile' in each directory of the package. +It may also create one or more `.h' files containing system-dependent +definitions. Finally, it creates a shell script `config.status' that +you can run in the future to recreate the current configuration, and a +file `config.log' containing compiler output (useful mainly for +debugging `configure'). + + It can also use an optional file (typically called `config.cache' +and enabled with `--cache-file=config.cache' or simply `-C') that saves +the results of its tests to speed up reconfiguring. (Caching is +disabled by default to prevent problems with accidental use of stale +cache files.) + + If you need to do unusual things to compile the package, please try +to figure out how `configure' could check whether to do them, and mail +diffs or instructions to the address given in the `README' so they can +be considered for the next release. If you are using the cache, and at +some point `config.cache' contains results you don't want to keep, you +may remove or edit it. + + The file `configure.ac' (or `configure.in') is used to create +`configure' by a program called `autoconf'. You only need +`configure.ac' if you want to change it or regenerate `configure' using +a newer version of `autoconf'. + +The simplest way to compile this package is: + + 1. `cd' to the directory containing the package's source code and type + `./configure' to configure the package for your system. If you're + using `csh' on an old version of System V, you might need to type + `sh ./configure' instead to prevent `csh' from trying to execute + `configure' itself. + + Running `configure' takes awhile. While running, it prints some + messages telling which features it is checking for. + + 2. Type `make' to compile the package. + + 3. Optionally, type `make check' to run any self-tests that come with + the package. + + 4. Type `make install' to install the programs and any data files and + documentation. + + 5. You can remove the program binaries and object files from the + source code directory by typing `make clean'. To also remove the + files that `configure' created (so you can compile the package for + a different kind of computer), type `make distclean'. There is + also a `make maintainer-clean' target, but that is intended mainly + for the package's developers. If you use it, you may have to get + all sorts of other programs in order to regenerate files that came + with the distribution. + +Compilers and Options +===================== + + Some systems require unusual options for compilation or linking that +the `configure' script does not know about. Run `./configure --help' +for details on some of the pertinent environment variables. + + You can give `configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here +is an example: + + ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix + + *Note Defining Variables::, for more details. + +Compiling For Multiple Architectures +==================================== + + You can compile the package for more than one kind of computer at the +same time, by placing the object files for each architecture in their +own directory. To do this, you must use a version of `make' that +supports the `VPATH' variable, such as GNU `make'. `cd' to the +directory where you want the object files and executables to go and run +the `configure' script. `configure' automatically checks for the +source code in the directory that `configure' is in and in `..'. + + If you have to use a `make' that does not support the `VPATH' +variable, you have to compile the package for one architecture at a +time in the source code directory. After you have installed the +package for one architecture, use `make distclean' before reconfiguring +for another architecture. + +Installation Names +================== + + By default, `make install' will install the package's files in +`/usr/local/bin', `/usr/local/man', etc. You can specify an +installation prefix other than `/usr/local' by giving `configure' the +option `--prefix=PATH'. + + You can specify separate installation prefixes for +architecture-specific files and architecture-independent files. If you +give `configure' the option `--exec-prefix=PATH', the package will use +PATH as the prefix for installing programs and libraries. +Documentation and other data files will still use the regular prefix. + + In addition, if you use an unusual directory layout you can give +options like `--bindir=PATH' to specify different values for particular +kinds of files. Run `configure --help' for a list of the directories +you can set and what kinds of files go in them. + + If the package supports it, you can cause programs to be installed +with an extra prefix or suffix on their names by giving `configure' the +option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. + +Optional Features +================= + + Some packages pay attention to `--enable-FEATURE' options to +`configure', where FEATURE indicates an optional part of the package. +They may also pay attention to `--with-PACKAGE' options, where PACKAGE +is something like `gnu-as' or `x' (for the X Window System). The +`README' should mention any `--enable-' and `--with-' options that the +package recognizes. + + For packages that use the X Window System, `configure' can usually +find the X include and library files automatically, but if it doesn't, +you can use the `configure' options `--x-includes=DIR' and +`--x-libraries=DIR' to specify their locations. + +Specifying the System Type +========================== + + There may be some features `configure' cannot figure out +automatically, but needs to determine by the type of machine the package +will run on. Usually, assuming the package is built to be run on the +_same_ architectures, `configure' can figure that out, but if it prints +a message saying it cannot guess the machine type, give it the +`--build=TYPE' option. TYPE can either be a short name for the system +type, such as `sun4', or a canonical name which has the form: + + CPU-COMPANY-SYSTEM + +where SYSTEM can have one of these forms: + + OS KERNEL-OS + + See the file `config.sub' for the possible values of each field. If +`config.sub' isn't included in this package, then this package doesn't +need to know the machine type. + + If you are _building_ compiler tools for cross-compiling, you should +use the `--target=TYPE' option to select the type of system they will +produce code for. + + If you want to _use_ a cross compiler, that generates code for a +platform different from the build platform, you should specify the +"host" platform (i.e., that on which the generated programs will +eventually be run) with `--host=TYPE'. + +Sharing Defaults +================ + + If you want to set default values for `configure' scripts to share, +you can create a site shell script called `config.site' that gives +default values for variables like `CC', `cache_file', and `prefix'. +`configure' looks for `PREFIX/share/config.site' if it exists, then +`PREFIX/etc/config.site' if it exists. Or, you can set the +`CONFIG_SITE' environment variable to the location of the site script. +A warning: not all `configure' scripts look for a site script. + +Defining Variables +================== + + Variables not defined in a site shell script can be set in the +environment passed to `configure'. However, some packages may run +configure again during the build, and the customized values of these +variables may be lost. In order to avoid this problem, you should set +them in the `configure' command line, using `VAR=value'. For example: + + ./configure CC=/usr/local2/bin/gcc + +will cause the specified gcc to be used as the C compiler (unless it is +overridden in the site shell script). + +`configure' Invocation +====================== + + `configure' recognizes the following options to control how it +operates. + +`--help' +`-h' + Print a summary of the options to `configure', and exit. + +`--version' +`-V' + Print the version of Autoconf used to generate the `configure' + script, and exit. + +`--cache-file=FILE' + Enable the cache: use and save the results of the tests in FILE, + traditionally `config.cache'. FILE defaults to `/dev/null' to + disable caching. + +`--config-cache' +`-C' + Alias for `--cache-file=config.cache'. + +`--quiet' +`--silent' +`-q' + Do not print messages saying which checks are being made. To + suppress all normal output, redirect it to `/dev/null' (any error + messages will still be shown). + +`--srcdir=DIR' + Look for the package's source code in directory DIR. Usually + `configure' can determine that directory automatically. + +`configure' also accepts some other, not widely useful, options. Run +`configure --help' for more details. + diff --git a/kernel/xenomai-v3.2.4/config/Makefile.am b/kernel/xenomai-v3.2.4/config/Makefile.am new file mode 100644 index 0000000..9ce4a9c --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/Makefile.am @@ -0,0 +1,7 @@ +EXTRA_DIST= \ + acinclude.m4 \ + docbook.m4 \ + apirev \ + version-label \ + version-code \ + ac_prog_cc_for_build.m4 diff --git a/kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4 b/kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4 new file mode 100644 index 0000000..8cba249 --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4 @@ -0,0 +1,108 @@ +dnl Available from the GNU Autoconf Macro Archive at: +dnl http://www.gnu.org/software/ac-archive/htmldoc/ac_prog_cc_for_build.html +dnl +AC_DEFUN([AC_PROG_CC_FOR_BUILD], [dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_CPP])dnl +AC_REQUIRE([AC_EXEEXT])dnl +AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl +dnl +pushdef([AC_TRY_COMPILER], [ +cat > conftest.$ac_ext << EOF +#line __oline__ "configure" +#include "confdefs.h" +[$1] +EOF +# If we can't run a trivial program, we are probably using a cross +compiler. +# Fail miserably. +if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} && (./conftest; +exit) 2>/dev/null; then + [$2]=yes +else + echo "configure: failed program was:" >&AC_FD_CC + cat conftest.$ac_ext >&AC_FD_CC + [$2]=no +fi +[$3]=no +rm -fr conftest*])dnl + +dnl Use the standard macros, but make them use other variable names +dnl +pushdef([cross_compiling], [#])dnl +pushdef([ac_cv_prog_CPP], ac_cv_build_prog_CPP)dnl +pushdef([ac_cv_prog_gcc], ac_cv_build_prog_gcc)dnl +pushdef([ac_cv_prog_cc_works], ac_cv_build_prog_cc_works)dnl +pushdef([ac_cv_prog_cc_cross], ac_cv_build_prog_cc_cross)dnl +pushdef([ac_cv_prog_cc_g], ac_cv_build_prog_cc_g)dnl +pushdef([ac_cv_exeext], ac_cv_build_exeext)dnl +pushdef([ac_cv_objext], ac_cv_build_objext)dnl +pushdef([ac_exeext], ac_build_exeext)dnl +pushdef([ac_objext], ac_build_objext)dnl +pushdef([CC], CC_FOR_BUILD)dnl +pushdef([CPP], CPP_FOR_BUILD)dnl +pushdef([CFLAGS], CFLAGS_FOR_BUILD)dnl +pushdef([CPPFLAGS], CPPFLAGS_FOR_BUILD)dnl +pushdef([host], build)dnl +pushdef([host_alias], build_alias)dnl +pushdef([host_cpu], build_cpu)dnl +pushdef([host_vendor], build_vendor)dnl +pushdef([host_os], build_os)dnl +pushdef([ac_cv_host], ac_cv_build)dnl +pushdef([ac_cv_host_alias], ac_cv_build_alias)dnl +pushdef([ac_cv_host_cpu], ac_cv_build_cpu)dnl +pushdef([ac_cv_host_vendor], ac_cv_build_vendor)dnl +pushdef([ac_cv_host_os], ac_cv_build_os)dnl +pushdef([ac_cpp], ac_build_cpp)dnl +pushdef([ac_compile], ac_build_compile)dnl +pushdef([ac_link], ac_build_link)dnl + +dnl dnl Defeat the anti-duplication mechanism +dnl dnl +dnl undefine([AC_PROVIDE_AC_PROG_CPP])dnl +dnl undefine([AC_PROVIDE_AC_PROG_C])dnl +dnl undefine([AC_PROVIDE_AC_EXEEXT])dnl + +AC_PROG_CC +AC_PROG_CPP +AC_EXEEXT + +dnl Restore the old definitions +dnl +popdef([AC_TRY_COMPILER])dnl +popdef([ac_link])dnl +popdef([ac_compile])dnl +popdef([ac_cpp])dnl +popdef([ac_cv_host_os])dnl +popdef([ac_cv_host_vendor])dnl +popdef([ac_cv_host_cpu])dnl +popdef([ac_cv_host_alias])dnl +popdef([ac_cv_host])dnl +popdef([host_os])dnl +popdef([host_vendor])dnl +popdef([host_cpu])dnl +popdef([host_alias])dnl +popdef([host])dnl +popdef([CPPFLAGS])dnl +popdef([CFLAGS])dnl +popdef([CPP])dnl +popdef([CC])dnl +popdef([ac_objext])dnl +popdef([ac_exeext])dnl +popdef([ac_cv_objext])dnl +popdef([ac_cv_exeext])dnl +popdef([ac_cv_prog_cc_g])dnl +popdef([ac_cv_prog_cc_works])dnl +popdef([ac_cv_prog_cc_cross])dnl +popdef([ac_cv_prog_gcc])dnl +popdef([cross_compiling])dnl + +dnl Finally, set Makefile variables +dnl +BUILD_EXEEXT=$ac_build_exeext +BUILD_OBJEXT=$ac_build_objext +AC_SUBST(BUILD_EXEEXT)dnl +AC_SUBST(BUILD_OBJEXT)dnl +AC_SUBST([CFLAGS_FOR_BUILD])dnl +AC_SUBST([CPPFLAGS_FOR_BUILD])dnl +]) diff --git a/kernel/xenomai-v3.2.4/config/acinclude.m4 b/kernel/xenomai-v3.2.4/config/acinclude.m4 new file mode 100644 index 0000000..9521613 --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/acinclude.m4 @@ -0,0 +1,579 @@ +dnl AC_PATH_XREQUIRED() requires X libs. This frag has been +dnl lifted nearly "as is" from Postgresql's configure.in script. + +AC_DEFUN([AC_PATH_XREQUIRED], +[ + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + save_CPPFLAGS="$CPPFLAGS" + save_LDFLAGS="$LDFLAGS" + + AC_PATH_X + AC_PATH_XTRA + + LIBS="$LIBS $X_EXTRA_LIBS" + CFLAGS="$CFLAGS $X_CFLAGS" + CPPFLAGS="$CPPFLAGS $X_CFLAGS" + LDFLAGS="$LDFLAGS $X_LIBS" + + dnl Check for X library + + X11_LIBS="" + AC_CHECK_LIB(X11, XOpenDisplay, X11_LIBS="-lX11",,${X_PRE_LIBS}) + if test "$X11_LIBS" = ""; then + dnl Not having X is bad news, period. Let the user fix this. + AC_MSG_ERROR([The X11 library '-lX11' could not be found, + so I won't go further. Please use the configure + options '--x-includes=DIR' and '--x-libraries=DIR' + to specify the X location. See the file 'config.log' + for further diagnostics.]) + fi + AC_SUBST(X_LIBS) + AC_SUBST(X11_LIBS) + AC_SUBST(X_PRE_LIBS) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" +]) + +dnl AC_POSIX_SIGHANDLER() determines whether +dnl signal handlers are posix compliant. This frag +dnl has been adapted from readline's aclocal.m4. + +AC_DEFUN([AC_POSIX_SIGHANDLER], +[AC_MSG_CHECKING([if signal handlers are posix compliant]) +AC_CACHE_VAL(ac_cv_posix_sighandler, +[AC_TRY_COMPILE([#include <sys/types.h> +#include <signal.h> +#ifdef signal +#undef signal +#endif +#ifdef __cplusplus +extern "C" +#endif +void (*signal(void))(void);], +[int i;], ac_cv_posix_sighandler=no, ac_cv_posix_sighandler=yes)])dnl +AC_MSG_RESULT($ac_cv_posix_sighandler) +if test $ac_cv_posix_sighandler = yes; then +AC_DEFINE(HAVE_POSIX_SIGHANDLER,1,[Kconfig]) +fi +]) + +#------------------------------------------------------------------------ +# SC_PATH_TCLCONFIG -- +# +# Locate the tclConfig.sh file and perform a sanity check on +# the Tcl compile flags +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --with-tcl=... +# +# Defines the following vars: +# TCL_BIN_DIR Full path to the directory containing +# the tclConfig.sh file +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PATH_TCLCONFIG], [ + # + # Ok, lets find the tcl configuration + # First, look for one uninstalled. + # the alternative search directory is invoked by --with-tcl + # + + if test x"${no_tcl}" = x ; then + # we reset no_tcl in case something fails here + no_tcl=true + AC_ARG_WITH(tcl, [ --with-tcl directory containing tcl configuration (tclConfig.sh)], with_tclconfig=${withval}) + AC_MSG_CHECKING([for Tcl configuration]) + AC_CACHE_VAL(ac_cv_c_tclconfig,[ + + # First check to see if --with-tcl was specified. + if test x"${with_tclconfig}" != x ; then + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` + else + AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) + fi + fi + + # then check for a private Tcl installation + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ../tcl \ + `ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../tcl \ + `ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../../tcl \ + `ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in ${prefix}/lib /usr/local/lib /usr/pkg/lib /usr/lib \ + `ls -dr /usr/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i; pwd)` + break + fi + done + fi + + # check in a few other private locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ${srcdir}/../tcl \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + ]) + + if test x"${ac_cv_c_tclconfig}" = x ; then + TCL_BIN_DIR="# no Tcl configs found" + AC_MSG_WARN(Can't find Tcl configuration definitions) + exit 1 + else + no_tcl= + TCL_BIN_DIR=${ac_cv_c_tclconfig} + AC_MSG_RESULT(found $TCL_BIN_DIR/tclConfig.sh) + fi + fi +]) + +#------------------------------------------------------------------------ +# SC_PATH_TKCONFIG -- +# +# Locate the tkConfig.sh file +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --with-tk=... +# +# Defines the following vars: +# TK_BIN_DIR Full path to the directory containing +# the tkConfig.sh file +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PATH_TKCONFIG], [ + # + # Ok, lets find the tk configuration + # First, look for one uninstalled. + # the alternative search directory is invoked by --with-tk + # + + if test x"${no_tk}" = x ; then + # we reset no_tk in case something fails here + no_tk=true + AC_ARG_WITH(tk, [ --with-tk directory containing tk configuration (tkConfig.sh)], with_tkconfig=${withval}) + AC_MSG_CHECKING([for Tk configuration]) + AC_CACHE_VAL(ac_cv_c_tkconfig,[ + + # First check to see if --with-tkconfig was specified. + if test x"${with_tkconfig}" != x ; then + if test -f "${with_tkconfig}/tkConfig.sh" ; then + ac_cv_c_tkconfig=`(cd ${with_tkconfig}; pwd)` + else + AC_MSG_ERROR([${with_tkconfig} directory doesn't contain tkConfig.sh]) + fi + fi + + # then check for a private Tk library + if test x"${ac_cv_c_tkconfig}" = x ; then + for i in \ + ../tk \ + `ls -dr ../tk[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../tk \ + `ls -dr ../../tk[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../../tk \ + `ls -dr ../../../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tkConfig.sh" ; then + ac_cv_c_tkconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + # check in a few common install locations + if test x"${ac_cv_c_tkconfig}" = x ; then + for i in ${prefix}/lib /usr/local/lib /usr/pkg/lib /usr/lib \ + `ls -dr /usr/lib/tk[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/tkConfig.sh" ; then + ac_cv_c_tkconfig=`(cd $i; pwd)` + break + fi + done + fi + # check in a few other private locations + if test x"${ac_cv_c_tkconfig}" = x ; then + for i in \ + ${srcdir}/../tk \ + `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tkConfig.sh" ; then + ac_cv_c_tkconfig=`(cd $i/unix; pwd)` + break + fi + done + fi + ]) + if test x"${ac_cv_c_tkconfig}" = x ; then + TK_BIN_DIR="# no Tk configs found" + AC_MSG_WARN(Can't find Tk configuration definitions) + exit 1 + else + no_tk= + TK_BIN_DIR=${ac_cv_c_tkconfig} + AC_MSG_RESULT(found $TK_BIN_DIR/tkConfig.sh) + fi + fi + +]) + +#------------------------------------------------------------------------ +# SC_LOAD_TCLCONFIG -- +# +# Load the tclConfig.sh file +# +# Arguments: +# +# Requires the following vars to be set: +# TCL_BIN_DIR +# +# Results: +# +# Subst the following vars: +# TCL_BIN_DIR +# TCL_SRC_DIR +# TCL_LIB_FILE +# +#------------------------------------------------------------------------ + +AC_DEFUN([SC_LOAD_TCLCONFIG], [ + AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh]) + + if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then + AC_MSG_RESULT([loading]) + . $TCL_BIN_DIR/tclConfig.sh + else + AC_MSG_ERROR([not found]) + fi + + AC_PATH_PROG(TCL_SCRIPT, tclsh${TCL_VERSION}, tclsh) + + AC_SUBST(TCL_BIN_DIR) + AC_SUBST(TCL_SRC_DIR) + AC_SUBST(TCL_LIB_FILE) + AC_SUBST(TCL_LIBS) + AC_SUBST(TCL_DEFS) + AC_SUBST(TCL_SHLIB_LD_LIBS) + AC_SUBST(TCL_EXTRA_CFLAGS) + AC_SUBST(TCL_LD_FLAGS) + AC_SUBST(TCL_LIB_FILE) + AC_SUBST(TCL_STUB_LIB_FILE) + AC_SUBST(TCL_LIB_SPEC) + AC_SUBST(TCL_BUILD_LIB_SPEC) + AC_SUBST(TCL_STUB_LIB_SPEC) + AC_SUBST(TCL_BUILD_STUB_LIB_SPEC) + AC_SUBST(TCL_DBGX) +]) + +#------------------------------------------------------------------------ +# SC_LOAD_TKCONFIG -- +# +# Load the tkConfig.sh file +# +# Arguments: +# +# Requires the following vars to be set: +# TK_BIN_DIR +# +# Results: +# +# Sets the following vars that should be in tkConfig.sh: +# TK_BIN_DIR +#------------------------------------------------------------------------ + +AC_DEFUN([SC_LOAD_TKCONFIG], [ + AC_MSG_CHECKING([for existence of $TK_BIN_DIR/tkConfig.sh]) + + if test -f "$TK_BIN_DIR/tkConfig.sh" ; then + AC_MSG_RESULT([loading]) + . $TK_BIN_DIR/tkConfig.sh + else + AC_MSG_ERROR([not found]) + fi + + AC_SUBST(TK_BIN_DIR) + AC_SUBST(TK_SRC_DIR) + AC_SUBST(TK_LIB_FILE) + AC_SUBST(TK_LIB_FLAG) + AC_SUBST(TK_LIB_SPEC) + AC_SUBST(TK_DBGX) +]) + +#------------------------------------------------------------------------ +# SC_PATH_TIX -- +# +# Locate the Tix installation. +# +# Arguments: +# None. +# +# Results: +# +# Substs the following vars: +# TIX_TCL_LIB +# TIX_LIB_SPEC +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PATH_TIX], [ + AC_MSG_CHECKING(for Tix's Tcl library) + + AC_ARG_WITH(tixlibrary, [ --with-tixlibrary directory containing the Tix library files.], with_tixlibrary=${withval}) + + if test x"${with_tixlibrary}" != x ; then + if test -f "${with_tixlibrary}/Init.tcl" ; then + ac_cv_tix_libdir=${with_tixlibrary} + else + AC_MSG_ERROR([${with_tixlibrary} directory does not contain Tix's init file Init.tcl]) + fi + else + AC_CACHE_VAL(ac_cv_tix_libdir, [ + for d in \ + `ls -dr /usr/local/lib/tix[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/local/share/tix[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/pkg/lib/tix[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/lib/tix[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/share/tix[[0-9]]* 2>/dev/null ` ; do + if test -f "$d/Init.tcl" ; then + ac_cv_tix_libdir=$d + break + fi + done + ]) + fi + + AC_MSG_RESULT($ac_cv_tix_libdir) + TIX_TCL_LIB=$ac_cv_tix_libdir + AC_SUBST(TIX_TCL_LIB) + + SC_LIB_SPEC(tix) + TIX_LIB_SPEC=$tix_LIB_SPEC + AC_SUBST(TIX_LIB_SPEC) +]) + +#------------------------------------------------------------------------ +# SC_LIB_SPEC -- +# +# Compute the name of an existing object library located in libdir +# from the given base name and produce the appropriate linker flags. +# +# Arguments: +# basename The base name of the library without version +# numbers, extensions, or "lib" prefixes. +# +# Requires: +# +# Results: +# +# Defines the following vars: +# ${basename}_LIB_NAME The computed library name. +# ${basename}_LIB_SPEC The computed linker flags. +#------------------------------------------------------------------------ + +AC_DEFUN([SC_LIB_SPEC], [ + AC_MSG_CHECKING(for $1 library) + eval "sc_lib_name_dir=${libdir}" + for i in \ + `ls -dr ${sc_lib_name_dir}/$1[[0-9]]*.lib 2>/dev/null ` \ + `ls -dr ${sc_lib_name_dir}/lib$1.* 2>/dev/null ` \ + `ls -dr ${sc_lib_name_dir}/lib$1[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/pkg/*/lib$1.so 2>/dev/null ` \ + `ls -dr /usr/pkg/*/lib$1[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/pkg/lib/lib$1.so 2>/dev/null ` \ + `ls -dr /usr/pkg/lib/lib$1[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/lib/$1[[0-9]]*.lib 2>/dev/null ` \ + `ls -dr /usr/lib/lib$1.so 2>/dev/null ` \ + `ls -dr /usr/lib/lib$1[[0-9]]* 2>/dev/null ` \ + `ls -dr /usr/local/lib/$1[[0-9]]*.lib 2>/dev/null ` \ + `ls -dr /usr/local/lib/lib$1.so 2>/dev/null ` \ + `ls -dr /usr/local/lib/lib$1[[0-9]]* 2>/dev/null ` ; do + if test -f "$i" ; then + sc_lib_name_dir=`dirname $i` + $1_LIB_NAME=`basename $i` + break + fi + done + + case "`uname -s`" in + *win32* | *WIN32* | *CYGWIN_NT*) + $1_LIB_SPEC=${$1_LIB_NAME} + ;; + *) + # Strip off the leading "lib" and trailing ".a" or ".so" + sc_lib_name_lib=`echo ${$1_LIB_NAME}|sed -e 's/^lib//' -e 's/\.so.*$//' -e 's/\.a$//'` + $1_LIB_SPEC="-L${sc_lib_name_dir} -l${sc_lib_name_lib}" + ;; + esac + if test "x${sc_lib_name_lib}" = x ; then + AC_MSG_ERROR(not found) + else + AC_MSG_RESULT(${$1_LIB_SPEC}) + fi +]) + +#------------------------------------------------------------------------ +# SC_PUBLIC_TCL_HEADERS -- +# +# Locate the installed public Tcl header files +# +# Arguments: +# None. +# +# Requires: +# +# Results: +# +# Adds a --with-tclinclude switch to configure. +# Result is cached. +# +# Substs the following vars: +# TCL_INCLUDES +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PUBLIC_TCL_HEADERS], [ + AC_MSG_CHECKING(for Tcl public headers) + + AC_ARG_WITH(tclinclude, [ --with-tclinclude directory containing the public Tcl header files.], with_tclinclude=${withval}) + + if test x"${with_tclinclude}" != x ; then + if test -f "${with_tclinclude}/tcl.h" ; then + ac_cv_c_tclh=${with_tclinclude} + else + AC_MSG_ERROR([${with_tclinclude} directory does not contain Tcl public header file tcl.h]) + fi + else + AC_CACHE_VAL(ac_cv_c_tclh, [ + # Use the value from --with-tclinclude, if it was given + + if test x"${with_tclinclude}" != x ; then + ac_cv_c_tclh=${with_tclinclude} + else + # Check in the includedir, if --prefix was specified + + eval "temp_includedir=${includedir}" + for i in \ + ${temp_includedir} /usr/local/include /usr/include /usr/pkg/include \ + `ls -dr /usr/include/tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/tcl.h" ; then + ac_cv_c_tclh=$i + break + fi + done + fi + ]) + fi + + # Print a message based on how we determined the include path + + if test x"${ac_cv_c_tclh}" = x ; then + AC_MSG_ERROR(tcl.h not found. Please specify its location with --with-tclinclude) + else + AC_MSG_RESULT(${ac_cv_c_tclh}) + fi + + # Convert to a native path and substitute into the output files. + + INCLUDE_DIR_NATIVE=`echo ${ac_cv_c_tclh}` + + TCL_INCLUDES="-I${INCLUDE_DIR_NATIVE}" + + AC_SUBST(TCL_INCLUDES) +]) + +#------------------------------------------------------------------------ +# SC_PUBLIC_TK_HEADERS -- +# +# Locate the installed public Tk header files +# +# Arguments: +# None. +# +# Requires: +# +# Results: +# +# Adds a --with-tkinclude switch to configure. +# Result is cached. +# +# Substs the following vars: +# TK_INCLUDES +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PUBLIC_TK_HEADERS], [ + AC_MSG_CHECKING(for Tk public headers) + + AC_ARG_WITH(tkinclude, [ --with-tkinclude directory containing the public Tk header files.], with_tkinclude=${withval}) + + if test x"${with_tkinclude}" != x ; then + if test -f "${with_tkinclude}/tk.h" ; then + ac_cv_c_tkh=${with_tkinclude} + else + AC_MSG_ERROR([${with_tkinclude} directory does not contain Tk public header file tk.h]) + fi + else + AC_CACHE_VAL(ac_cv_c_tkh, [ + # Use the value from --with-tkinclude, if it was given + + if test x"${with_tkinclude}" != x ; then + ac_cv_c_tkh=${with_tkinclude} + else + # Check in the includedir, if --prefix was specified + + eval "temp_includedir=${includedir}" + for i in \ + ${temp_includedir} /usr/local/include /usr/include /usr/pkg/include \ + `ls -dr /usr/include/tk[[8-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr /usr/include/tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/tk.h" ; then + ac_cv_c_tkh=$i + break + fi + done + fi + ]) + fi + + # Print a message based on how we determined the include path + + if test x"${ac_cv_c_tkh}" = x ; then + AC_MSG_ERROR(tk.h not found. Please specify its location with --with-tkinclude) + else + AC_MSG_RESULT(${ac_cv_c_tkh}) + fi + + # Convert to a native path and substitute into the output files. + + INCLUDE_DIR_NATIVE=`echo ${ac_cv_c_tkh}` + + TK_INCLUDES="-I${INCLUDE_DIR_NATIVE}" + + AC_SUBST(TK_INCLUDES) +]) diff --git a/kernel/xenomai-v3.2.4/config/apirev b/kernel/xenomai-v3.2.4/config/apirev new file mode 100644 index 0000000..60d3b2f --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/apirev @@ -0,0 +1 @@ +15 diff --git a/kernel/xenomai-v3.2.4/config/docbook.m4 b/kernel/xenomai-v3.2.4/config/docbook.m4 new file mode 100644 index 0000000..f0391c1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/docbook.m4 @@ -0,0 +1,170 @@ +# -*- Autoconf -*- +# Building Docbook-XML documents with the autotools. +# +# Check whether needed tools for generating Docbook XML doc are installed and +# running. +# - "docbook-root" is the name of the source tree subdirectory which is the +# docbook documentation root. It is expected to contain at least : +# catalog.in, used to match DTD generic addresses to their local copy ; +# pictures, with all the pictures referenced by the XML documents ; +# css, with the CSS referenced by the html documents. +# +# - "generated-doc-root" is the name of the source tree subdirectory which +# contains the generated documentation. It is expected to contain at least : +# html/pictures with the pictures needed by the html documents ; +# html/css with the css needed by the html documents ; +# html/* one directory by html document ; +# pdf/*.pdf one pdf file by pdf document ; +# +# - "docbook-dtd-version" is the version of the Docbook-XML DTD used. +# +# DBX_DOC_INIT(docbook-root, generated-doc-root, docbook-dtd-version) +# ------------------------------------------------------------------------------ +AC_DEFUN([DBX_DOC_INIT], +[ +DBX_DOC_ROOT="$1" +AC_SUBST(DBX_DOC_ROOT) + +AC_MSG_CHECKING(whether compiling Docbook XML documentation) +AC_ARG_ENABLE(dbx, + AS_HELP_STRING([--enable-dbx],[Build Docbook XML documentation.]), + [case "$enableval" in + y | ye | yes) DBX_DOC=yes;; + *) DBX_DOC="" ;; + esac]) + +if test \! -f "$srcdir/$1/catalog.in"; then + if test x$DBX_DOC = xyes; + then + AC_MSG_ERROR([$1/catalog.in could not be found in the source tree, +DocBook documentation can not be generated.]) + fi + AC_MSG_RESULT([not present.]) +else + AC_MSG_RESULT(${DBX_DOC:-no}) +fi +AM_CONDITIONAL(DBX_DOC,[test "$DBX_DOC" = yes]) + +# +DBX_GEN_DOC_ROOT="$2" +AC_SUBST(DBX_GEN_DOC_ROOT) + +# First: search for needed tools. +AC_CHECK_PROG(DBX_LINT, xmllint, xmllint) +if test x"$DBX_LINT" = x -a -n "$DBX_DOC"; then + AC_MSG_ERROR([xmllint was not found. Check your PATH variable and try again.]) +fi +AC_SUBST(DBX_LINT) + + +AC_CHECK_PROG(DBX_XSLTPROC, xsltproc, xsltproc) +if test x"$DBX_XSLTPROC" = x -a -n "$DBX_DOC"; then + AC_MSG_ERROR([xsltproc was not found. Check your PATH variable and try +again.]) +fi +AC_SUBST(DBX_XSLTPROC) + + +AC_CHECK_PROG(DBX_FOP, fop, fop) +if test x"$DBX_FOP" = x -a -n "$DBX_DOC"; then + AC_MSG_ERROR([fop was not found. Check your PATH variable and try again.]) +fi +AC_SUBST(DBX_FOP) + +# Second: search for DTD and XSL stylesheets. +DBX_DTD_VERSION="$3" + +AC_MSG_CHECKING(whether Docbook XML documentation generation can use network.) +AC_ARG_ENABLE(dbx-network, + AS_HELP_STRING([--enable-dbx-network],[Try to access Docbook DTD and +XSL stylesheets through network (default is to die if local installation can not +be found by configure).]), + [ case "$enable_dbx_network" in + y | yes | yes ) + DBX_NET=yes;; + n | no ) + DBX_NET="";; + esac + ]) + +# Do not define the --nonet xsltproc flag if the option --enable-dbx-network was +# passed +AC_MSG_RESULT(${DBX_NET:-no}) +if test -n "$DBX_NET"; then + unset DBX_MAYBE_NONET +else + DBX_MAYBE_NONET=--nonet +fi +AC_SUBST(DBX_MAYBE_NONET) + + +AC_MSG_CHECKING(for docbook-xml root dir) +AC_ARG_WITH(dbx-root, + AS_HELP_STRING([--with-dbx-root],[specify the Docbook XML root (that +is, the directory where docbookx.dtd should be found). Default is to use +well-known locations (or network if --enable-dbx-network was passed).]), + [DBX_ROOT="$withval"]) +if test x"$DBX_ROOT" = x; then + # Still not found, we will hence look for it using the "well-known" + # places (well... for the moment, only the Debian package directory) + for dir in \ + /usr/share/sgml/docbook/dtd/xml/$DBX_DTD_VERSION + do + if test -e $dir/docbookx.dtd; then + DBX_ROOT="$dir"; + break; + fi + done +fi +AC_MSG_RESULT(${DBX_ROOT:-network}) +if test x"$DBX_ROOT" = x; then + if test x"$enable_dbx_network" != x -a -n "$DBX_DOC"; then + AC_MSG_ERROR([The Docbook XML DTD was not found, and accessing it +through network is forbidden.]) + fi + DBX_ROOT="http://www.oasis-open.org/docbook/xml/$DBX_DTD_VERSION/" +else + DBX_ROOT="file://$DBX_ROOT" +fi +AC_SUBST(DBX_ROOT) + + +AC_MSG_CHECKING(for docbook-xsl root dir) +AC_ARG_WITH(docbook-xsl-root, + AS_HELP_STRING([--with-dbx-xsl-root],[specify the Docbook XML XSL +stylesheet root. Default is to use well-known locations (or network if +--enable-dbx-network was passed)]), + [ DBX_XSL_ROOT="$withval" ]) +if test x"$DBX_XSL_ROOT" = x; then + # Still not found, we will hence look for it using the "well-known" + # places (well... for the moment, only the Debian standard directory) + for dir in \ + /usr/share/sgml/docbook/stylesheet/xsl/nwalsh + do + if test -e "$dir/html/docbook.xsl"; then + DBX_XSL_ROOT="$dir"; + break; + fi + done +fi +AC_MSG_RESULT(${DBX_XSL_ROOT:-network}) +if test x"$DBX_XSL_ROOT" = x; then + if test x"$enable_dbx_network" != x -a -n "$DBX_DOC"; then + AC_MSG_ERROR([The Docbook XML DTD was not found, and accessing it +through network is forbidden.]) + fi + DBX_XSL_ROOT="http://http://docbook.sourceforge.net/release/xsl/current" +else + DBX_XSL_ROOT="file://$DBX_XSL_ROOT" +fi +AC_SUBST(DBX_XSL_ROOT) + + +DBX_ABS_SRCDIR=`case $srcdir in + [\\/]* | ?:[\\/]* ) echo : ;; + *) echo false ;; +esac` +AM_CONDITIONAL(DBX_ABS_SRCDIR, $DBX_ABS_SRCDIR) + + +]) diff --git a/kernel/xenomai-v3.2.4/config/version-code b/kernel/xenomai-v3.2.4/config/version-code new file mode 100644 index 0000000..351227f --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/version-code @@ -0,0 +1 @@ +3.2.4 diff --git a/kernel/xenomai-v3.2.4/config/version-label b/kernel/xenomai-v3.2.4/config/version-label new file mode 100644 index 0000000..351227f --- /dev/null +++ b/kernel/xenomai-v3.2.4/config/version-label @@ -0,0 +1 @@ +3.2.4 diff --git a/kernel/xenomai-v3.2.4/configure.ac b/kernel/xenomai-v3.2.4/configure.ac new file mode 100644 index 0000000..e7a1701 --- /dev/null +++ b/kernel/xenomai-v3.2.4/configure.ac @@ -0,0 +1,1062 @@ +dnl Process this file with autoconf to produce a configure script. +AC_PREREQ(2.62) + +# The config/version-code file defines the general versioning data +# as: <major>.<minor>.<subrev>, giving the full Xenomai version stamp. +# config/apirev defines the revision level of the user API we +# implement (which actually expresses the revision level of the +# Copperplate library). The kernel ABI is Cobalt-specific and is +# defined for each architecture in the asm/features.h file. +AC_INIT([Xenomai],m4_normalize(m4_include([config/version-label])),xenomai@xenomai.org) + +AC_CONFIG_HEADERS(include/xeno_config.h) +AC_CONFIG_AUX_DIR(config) +AC_CONFIG_MACRO_DIR([config]) +AC_CONFIG_SRCDIR(lib/cobalt/thread.c) +AC_PREFIX_DEFAULT(/usr/xenomai) +# We want $prefix to be set for the configure script +if test x$prefix = xNONE; then + prefix=$ac_default_prefix +fi + +version_code=`cat $srcdir/config/version-code` +CONFIG_XENO_VERSION_MAJOR=`expr $version_code : '\([[0-9]]*\)'` +CONFIG_XENO_VERSION_MINOR=`expr $version_code : '[[0-9]]*\.\([[0-9]]*\)'` +CONFIG_XENO_REVISION_LEVEL=`expr $version_code : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` +CONFIG_XENO_UAPI_LEVEL=`cat $srcdir/config/apirev` +CONFIG_XENO_VERSION_STRING="$PACKAGE_VERSION" +topdir=`cd $srcdir && pwd` + +dnl Find out whether we build for Cobalt core, i.e. dual kernel mode, +dnl or Mercury (single image kernel). Defaults to Cobalt. +rtcore_type=cobalt +AC_MSG_CHECKING([whether we build for Cobalt or Mercury core]) +AC_ARG_WITH(core, + AS_HELP_STRING([--with-core=<cobalt | mercury>],[build for dual kernel or single image]), + [ + case "$withval" in + "" | y | ye | yes | n | no) + AC_MSG_ERROR([You must supply an argument to --with-core]) + ;; + cobalt|mercury) + rtcore_type=$withval + ;; + *) + AC_MSG_ERROR([--with-core=<cobalt | mercury>]) + esac + ]) +AC_MSG_RESULT($rtcore_type) + +AM_CONDITIONAL(XENO_COBALT,[test x$rtcore_type = xcobalt]) +test x$rtcore_type = xcobalt && AC_DEFINE(CONFIG_XENO_COBALT,1,[config]) +AM_CONDITIONAL(XENO_MERCURY,[test x$rtcore_type = xmercury]) +test x$rtcore_type = xmercury && AC_DEFINE(CONFIG_XENO_MERCURY,1,[config]) +XENO_TARGET_CORE=$rtcore_type + +if test "x$CFLAGS" = "x"; then + XENO_EMPTY_CFLAGS=true +else + XENO_EMPTY_CFLAGS=false +fi + +if eval test $includedir = /usr/include; then + AC_MSG_ERROR([Using /usr/include as includedir is not supported. Please change your --prefix or specify another --includedir]) +fi + +AC_CANONICAL_BUILD +AC_CANONICAL_HOST +AC_PROG_INSTALL + +AC_ARG_WITH(cc, + AS_HELP_STRING([--with-cc=compiler],[use specific C compiler]), + [ + case "$withval" in + "" | y | ye | yes | n | no) + AC_MSG_ERROR([You must supply an argument to --with-cc]) + ;; + esac + CC="$withval" + ]) +AC_PROG_CC + +# Do not let autoconf set the default value of CFLAGS +if $XENO_EMPTY_CFLAGS; then + CFLAGS="" +fi + +AC_PROG_CC_FOR_BUILD +AC_PROG_GREP +LT_PATH_NM + +XENO_SYMBOL_PREFIX= +LT_SYS_SYMBOL_USCORE +if test \! x$sys_symbol_underscore = xno; then + XENO_SYMBOL_PREFIX=_ +fi +AC_SUBST(XENO_SYMBOL_PREFIX) + +AC_DEFINE_UNQUOTED(CONFIG_XENO_BUILD_STRING,"$build",[Build system alias]) +XENO_BUILD_STRING="$build" +AC_DEFINE_UNQUOTED(CONFIG_XENO_HOST_STRING,"$host",[Host system alias]) +XENO_HOST_STRING="$host" +XENO_BUILD_COMPILER="`$CC -v 2>&1 | tail -n 1`" +AC_DEFINE_UNQUOTED(CONFIG_XENO_COMPILER,"$XENO_BUILD_COMPILER",[Compiler]) + +AM_INIT_AUTOMAKE([foreign no-exeext dist-bzip2 tar-ustar subdir-objects]) +m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) +AM_MAINTAINER_MODE +AM_PROG_AS +AM_PROG_LEX + +XENO_BUILD_ARGS="$ac_configure_args" + +AC_MSG_CHECKING([for target architecture]) + +if test x$host_alias = x; then + build_for=$host +else + build_for=$host_alias +fi + +use_tls=no +case "$build_for" in + i*86*-*) + use_tls=yes + target_cpu_arch=x86 + CONFIG_XENO_DEFAULT_PERIOD=100000 + ;; + ppc-*|powerpc-*) + use_tls=yes + target_cpu_arch=powerpc + CONFIG_XENO_DEFAULT_PERIOD=100000 + ;; + arm*-*) + target_cpu_arch=arm + CONFIG_XENO_DEFAULT_PERIOD=1000000 + ;; + aarch64-*) + target_cpu_arch=arm64 + CONFIG_XENO_DEFAULT_PERIOD=1000000 + ;; + x86_64-*|amd64-*) + use_tls=yes + target_cpu_arch=x86 + CONFIG_XENO_DEFAULT_PERIOD=100000 + ;; + *) + if test $rtcore_type = cobalt; then + echo "" + echo "**********************************************" + echo "Cobalt not supported over $build_for." + echo "**********************************************" + echo "" + exit 1 + else + CONFIG_XENO_DEFAULT_PERIOD=100000 + target_cpu_arch=`echo $build_for|cut -d- -f1` + fi + ;; +esac + +AC_MSG_RESULT([$target_cpu_arch]) +XENO_TARGET_ARCH=$target_cpu_arch +AC_ENABLE_SHARED +AC_PROG_LIBTOOL + +dnl +dnl Parse options +dnl + +dnl Debug build (default: off, no symbols) + +debug_mode= +debug_symbols= +AC_MSG_CHECKING(whether to enable debug mode) +AC_ARG_ENABLE(debug, + AS_HELP_STRING([--enable-debug], [Enable debug mode in programs]), + [case "$enableval" in + symbols) + debug_symbols=y + ;; + y | yes | partial) + debug_mode=partial + debug_symbols=y + ;; + full) + debug_mode=full + debug_symbols=y + ;; + n | no) + debug_mode= + debug_symbols= + ;; + *) + AC_MSG_ERROR([invalid debug level $enableval]) + ;; + esac]) +AC_MSG_RESULT(${debug_mode:-no}) +AM_CONDITIONAL(XENO_DEBUG,[test \! x$debug_mode = x]) +test \! x$debug_mode = x && AC_DEFINE(CONFIG_XENO_DEBUG,1,[config]) +AM_CONDITIONAL(XENO_DEBUG_FULL,[test x$debug_mode = xfull]) +test x$debug_mode = xfull && AC_DEFINE(CONFIG_XENO_DEBUG_FULL,1,[config]) + +dnl Demo (default: on) + +AC_ARG_ENABLE(demo, + AS_HELP_STRING([--disable-demo], [Disable demonstration code])) +AM_CONDITIONAL(XENO_ENABLE_DEMO,[test x$enable_demo != xno]) + +dnl Testsuite (default: on) + +AC_ARG_ENABLE(testsuite, + AS_HELP_STRING([--disable-testsuite], [Disable testsuite])) +AM_CONDITIONAL(XENO_ENABLE_TESTSUITE,[test x$enable_testsuite != xno]) + +dnl Low resolution clock (default: off) + +unset lores_clock +AC_MSG_CHECKING(whether to enable the low resolution clock) +AC_ARG_ENABLE(lores-clock, + AS_HELP_STRING([--enable-lores-clock], [Enable low resolution clock]), + [case "$enableval" in + y | yes) lores_clock=y ;; + *) unset lores_clock ;; + esac]) +AC_MSG_RESULT(${lores_clock:-no}) +if test x$lores_clock = x; then + AC_DEFINE(CONFIG_XENO_LORES_CLOCK_DISABLED,1,[config]) +fi + +dnl Raw monotonic clock (default: cobalt=on, mercury=off) + +if test $rtcore_type = cobalt; then + raw_monotonic_clock=y +else + raw_monotonic_clock= +fi +AC_MSG_CHECKING(whether we may use CLOCK_MONOTONIC_RAW) +AC_ARG_ENABLE(clock-monotonic-raw, + AS_HELP_STRING([--enable-clock-monotonic-raw], [Use CLOCK_MONOTONIC_RAW for timings]), + [case "$enableval" in + y | yes) raw_monotonic_clock=y ;; + *) unset raw_monotonic_clock ;; + esac]) +AC_MSG_RESULT(${raw_monotonic_clock:-no}) +if test x$raw_monotonic_clock = xy; then + AC_DEFINE(CONFIG_XENO_RAW_CLOCK_ENABLED,1,[config]) +fi + +checkflags="-nostdinc -isystem \$(SYSROOT)/usr/include -Wbitwise -Wno-transparent-union -D_GNU_SOURCE -D_XOPEN_SOURCE=500 -D_REENTRANT \$(DEFS) \$(DEFAULT_INCLUDES) \$(INCLUDES) \$(AM_CPPFLAGS) \$(CPPFLAGS) -I\$(top_srcdir)/include -isystem \$(shell \$(CC) -print-file-name=include) -include \$(top_builddir)/include/xeno_config.h \$(shell \$(CC) -dM -E -xc /dev/null|sed -e 's/^\\#define /-D/' -e \"s/ /=\'/\" -e \"s/\$\$/\'/\")" + +dnl Used with sparse +AC_SUBST(CHECKFLAGS, $checkflags) + +dnl Enable assertions (default: depends on debug mode) + +test x$debug_mode = x || use_assert=y +AC_MSG_CHECKING(whether assertions should be enabled) +AC_ARG_ENABLE(assert, + AS_HELP_STRING([--enable-assert], [Enable runtime assertions]), + [case "$enableval" in + y | yes) use_assert=y ;; + *) unset use_assert ;; + esac]) +AC_MSG_RESULT(${use_assert:-no}) + +dnl Enable asynchronous cancellation (default: off) + +async_cancel= +AC_MSG_CHECKING(whether asynchronous cancellation of threads is enabled) +AC_ARG_ENABLE(async-cancel, + AS_HELP_STRING([--enable-async-cancel], [Enable asynchronous cancellation]), + [case "$enableval" in + y | yes) async_cancel=y ;; + n | no) unset async_cancel ;; + esac]) +AC_MSG_RESULT(${async_cancel:-no}) + +if test x$async_cancel = xy; then + AC_DEFINE(CONFIG_XENO_ASYNC_CANCEL,1,[config]) +fi + +dnl Work-around for broken PI with condvars on Mercury (default: off) + +unset workaround_condvar_pi +AC_MSG_CHECKING(whether to enable the workaround for broken PI with condvars) +AC_ARG_ENABLE(condvar-workaround, + AS_HELP_STRING([--enable-condvar-workaround], [Enable workaround for broken PI with condvars in glibc]), + [case "$enableval" in + y | yes) workaround_condvar_pi=y ;; + *) unset workaround_condvar_pi ;; + esac]) +AC_MSG_RESULT(${workaround_condvar_pi:-no}) +if test x$workaround_condvar_pi = xy; then + if test $rtcore_type = mercury; then + AC_DEFINE(CONFIG_XENO_WORKAROUND_CONDVAR_PI,1,[config]) + else + AC_MSG_WARN([PI workaround for condvars useless over Cobalt - ignoring]) + fi +fi + +dnl Lazy schedparam propagation for Cobalt (default: off) + +unset lazy_setsched_update +AC_MSG_CHECKING(whether to enable lazy scheduling parameter update) +AC_ARG_ENABLE(lazy-setsched, + AS_HELP_STRING([--enable-lazy-setsched], [Enable lazy scheduling parameter update]), + [case "$enableval" in + y | yes) lazy_setsched_update=y ;; + *) unset lazy_setsched_update ;; + esac]) +AC_MSG_RESULT(${lazy_setsched_update:-no}) +if test x$lazy_setsched_update = xy; then + if test x$rtcore_type = xcobalt; then + AC_DEFINE(CONFIG_XENO_LAZY_SETSCHED,1,[config]) + else + AC_MSG_WARN([No lazy scheduling parameter updates over Mercury - ignoring]) + fi +fi + +dnl Enable shared multi-processing (default: off) + +use_pshared= +AC_MSG_CHECKING(whether shared multi-processing should be supported) +AC_ARG_ENABLE(pshared, + AS_HELP_STRING([--enable-pshared], [Enable shared multi-processing for capable skins]), + [case "$enableval" in + y | yes) use_pshared=y ;; + *) unset use_pshared ;; + esac]) +AC_MSG_RESULT(${use_pshared:-no}) + +if test x$use_pshared = xy; then + AC_DEFINE(CONFIG_XENO_PSHARED,1,[config]) +fi +AM_CONDITIONAL(XENO_PSHARED,[test x$use_pshared = xy]) + +dnl Allocator selection + +localmem_allocator=heapmem +AC_MSG_CHECKING([for process-local memory allocator]) +AC_ARG_WITH(localmem, + AS_HELP_STRING([--with-localmem=<heapmem | tlsf>],[Select process-local memory allocator]), + [ + case "$withval" in + "" | y | ye | yes | n | no) + AC_MSG_ERROR([You must supply an argument to --with-localmem]) + ;; + heapmem|tlsf) + localmem_allocator=$withval + ;; + *) + AC_MSG_ERROR([--localmem-allocator=<heapmem | tlsf>]) + esac + ]) +AC_MSG_RESULT($localmem_allocator) + +dnl Registry support in user-space (FUSE-based, default: off) + +use_registry= +registry_root= +AC_MSG_CHECKING(whether the registry should be enabled) +AC_ARG_ENABLE(registry, + AS_HELP_STRING([--enable-registry], [Export real-time objects to a registry]), + [case "$enableval" in + y | yes) use_registry=y; registry_root=/var/run/xenomai ;; + /*) use_registry=y; registry_root=$enableval ;; + *) unset use_registry ;; + esac]) +AC_MSG_RESULT(${use_registry:-no}${registry_root:+[,] mounted on ${registry_root}}) + +if test x$use_registry = xy; then + PKG_CHECK_MODULES(FUSE, fuse) + FUSE_CFLAGS="$FUSE_CFLAGS -DFUSE_USE_VERSION=25" + AC_DEFINE(CONFIG_XENO_REGISTRY,1,[config]) + AC_DEFINE_UNQUOTED(CONFIG_XENO_REGISTRY_ROOT,"$registry_root",[config]) +fi +AM_CONDITIONAL(XENO_REGISTRY,[test x$use_registry = xy]) + +dnl SMP support (default: on for cobalt/x86, off otherwise) + +CONFIG_SMP= +if test $target_cpu_arch = x86 -a $rtcore_type = cobalt; then + CONFIG_SMP=y +fi +AC_MSG_CHECKING(for SMP support) +AC_ARG_ENABLE(smp, + AS_HELP_STRING([--enable-smp], [Enable SMP support]), + [case "$enableval" in + y | yes) CONFIG_SMP=y ;; + *) unset CONFIG_SMP ;; + esac]) +AC_MSG_RESULT(${CONFIG_SMP:-no}) + +dnl Runtime sanity checks (default: on) + +CONFIG_XENO_SANITY=y +AC_MSG_CHECKING(whether to enable sanity checks) +AC_ARG_ENABLE(sanity, + AS_HELP_STRING([--enable-sanity], [Enable sanity checks at runtime]), + [case "$enableval" in + y | yes) CONFIG_XENO_SANITY=y ;; + *) unset CONFIG_XENO_SANITY= ;; + esac]) +AC_MSG_RESULT(${CONFIG_XENO_SANITY:-no}) + +if test x$CONFIG_XENO_SANITY = xy; then + AC_DEFINE(CONFIG_XENO_SANITY,1,[config]) +else + AC_DEFINE(CONFIG_XENO_SANITY,0,[config]) +fi + +dnl VSYSCALL (default: enabled) for Cobalt/x86 + +if test $target_cpu_arch = x86 -a $rtcore_type = cobalt; then + CONFIG_XENO_X86_VSYSCALL=y + AC_MSG_CHECKING(for x86 VSYSCALL availability) + AC_ARG_ENABLE(x86-vsyscall, + AS_HELP_STRING([--enable-x86-vsyscall], [Assume VSYSCALL enabled for issuing syscalls]), + [case "$enableval" in + y | yes) CONFIG_XENO_X86_VSYSCALL=y ;; + *) unset CONFIG_XENO_X86_VSYSCALL ;; + esac]) + AC_MSG_RESULT(${CONFIG_XENO_X86_VSYSCALL:-no}) +fi + +dnl Documentation package. + +XENO_BUILD_DOC= +XENO_DOC_GIT= +AC_MSG_CHECKING(whether to build documentation) +AC_ARG_ENABLE(doc-build, + AS_HELP_STRING([--enable-doc-build], [Build Xenomai documentation]), + [case "$enableval" in + y | yes) XENO_BUILD_DOC=y ;; + n | no) ;; + *) if test \! x$enableval = x; then + XENO_BUILD_DOC=y + XENO_DOC_GIT=$enableval + fi + ;; + esac]) +AM_CONDITIONAL(XENO_BUILD_DOC,[test "$XENO_BUILD_DOC" = y]) +AC_SUBST(XENO_DOC_GIT) + +AC_CHECK_PROG(DOXYGEN, doxygen, doxygen) + +if test x${XENO_BUILD_DOC} = xy -a x"$DOXYGEN" = x ; then + AC_MSG_ERROR([Missing the Doxygen tools to build the documentation]) +fi + +AC_CHECK_PROG(DOXYGEN_HAVE_DOT, dot, YES, NO) +if test x"$DOXYGEN_HAVE_DOT" = xYES ; then + DOXYGEN_SHOW_INCLUDE_FILES=NO +else + DOXYGEN_SHOW_INCLUDE_FILES=YES +fi + +LATEX_BATCHMODE=YES +LATEX_MODE=batch +AC_MSG_CHECKING(for LaTeX mode) +AC_ARG_ENABLE(verbose-latex, + AS_HELP_STRING([--enable-verbose-latex], [Disable LaTeX non-stop mode]), + [case "$enableval" in + y | yes) + LATEX_BATCHMODE=NO + LATEX_MODE=non-stop + ;; + *) ;; + esac]) +AC_MSG_RESULT(${LATEX_MODE}) + +AC_CHECK_PROG(ASCIIDOC, asciidoc, asciidoc) +if test x${XENO_BUILD_DOC} = xy -a x"$ASCIIDOC" = x ; then + AC_MSG_ERROR([Missing the asciidoc tool to build the documentation]) +fi +AC_CHECK_PROG(A2X, a2x, a2x) +if test x${XENO_BUILD_DOC} = xy -a x"$A2X" = x ; then + AC_MSG_ERROR([Missing the a2x tool to build the documentation]) +fi +AC_CHECK_PROG(W3M, w3m, w3m) +if test x${XENO_BUILD_DOC} = xy -a x"$W3M" = x ; then + AC_MSG_ERROR([Missing the w3m tool to build the documentation]) +fi + +dnl Set better default values for pdfdir, mandir and htmldir +dnl This won't override user settings, unless the user wants +dnl the default values, which we ban... + +if test x$pdfdir = x'${docdir}'; then + pdfdir='${docdir}/pdf' +fi +AC_SUBST(pdfdir) +if test x$mandir = x'${docdir}'; then + mandir='${docdir}/man' +fi +AC_SUBST(mandir) +if test x$htmldir = x'${docdir}'; then + htmldir='${docdir}/html' +fi +AC_SUBST(htmldir) + +dnl Check for Valgrind client API support. +dnl Some GCC releases produce broken assembly code for Valgrind +dnl client calls, so we check this too. --disable-valgrind-client +dnl may be used to forcibly turn this API off. + +AC_CHECK_HEADER(valgrind/valgrind.h,CONFIG_XENO_VALGRIND_API=y) + +AC_MSG_CHECKING(for Valgrind client API) +AC_ARG_ENABLE(valgrind-client, + AS_HELP_STRING([--enable-valgrind-client], [Enable Valgrind client API]), + [case "$enableval" in + n | no) unset CONFIG_XENO_VALGRIND_API ;; + esac]) +AC_MSG_RESULT(${CONFIG_XENO_VALGRIND_API:-no}) + +if test \! x$CONFIG_XENO_VALGRIND_API = x ; then + AC_MSG_CHECKING([whether GCC emits sane code for Valgrind calls]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <valgrind/valgrind.h>]], + [[return RUNNING_ON_VALGRIND;]])], + [ac_cv_valgrind_client=yes], + [ac_cv_valgrind_client="no (DISABLING)"]) + if [[ \! "$ac_cv_valgrind_client" = yes ]]; then + unset CONFIG_XENO_VALGRIND_API + fi + AC_MSG_RESULT([$ac_cv_valgrind_client]) +fi + +test x$CONFIG_XENO_VALGRIND_API = xy && AC_DEFINE(CONFIG_XENO_VALGRIND_API,1,[config]) + +dnl Check for obstack support in *libc +AC_CHECK_HEADERS(obstack.h,libc_has_obstack=y) +AM_CONDITIONAL(XENO_PRIVATE_OBSTACK,[test x$libc_has_obstack = x]) + +dnl Check for presence of some headers +AC_CHECK_HEADERS(mqueue.h) + +dnl Check for presence of some routines we need +save_LIBS="$LIBS" +LIBS="$LIBS -lrt -lpthread" +AC_CHECK_FUNCS([pthread_mutexattr_setprotocol \ + pthread_mutexattr_getprotocol \ + pthread_mutexattr_getprioceiling \ + pthread_mutexattr_setprioceiling \ + pthread_mutexattr_setrobust \ + pthread_mutexattr_setrobust_np \ + pthread_mutex_getprioceiling \ + pthread_mutex_setprioceiling \ + pthread_condattr_getclock \ + pthread_condattr_setclock \ + pthread_spin_lock fork \ + pthread_attr_setaffinity_np \ + pthread_setaffinity_np \ + pthread_getattr_np \ + pthread_atfork \ + pthread_setname_np \ + pthread_setschedprio \ + sched_getcpu \ + clock_nanosleep \ + shm_open \ + shm_unlink \ + backtrace]) +LIBS="$save_LIBS" + +save_CPPFLAGS="$CPPFLAGS" +CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" +AC_CHECK_DECLS([PTHREAD_PRIO_NONE], [], [], [#include <pthread.h>]) +AC_CHECK_DECLS([PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP], [], [], [#include <pthread.h>]) +AC_CHECK_DECLS([PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP], [], [], [#include <pthread.h>]) +CPPFLAGS=$save_CPPFLAGS + +dnl If we can't set the clock for condvar timeouts, then +dnl we have to restrict the Copperplate clock to CLOCK_REALTIME over +dnl Mercury unconditionally. Cobalt is different: although we may not +dnl have pthread_condattr_setclock() available from the threading library, +dnl Copperplate is still able to attach Cobalt condvars to specific clocks +dnl internally, therefore we don't have to use a restricted clock in +dnl Copperplate. +dnl +dnl In effect this means that updating the host system date may affect +dnl wait times of all blocking services implemented by Copperplate over +dnl Mercury, but will only affect explicit calls to pthread_cond_timedwait() +dnl over Cobalt. +dnl +dnl This is a provision for running over legacy threading libraries +dnl such as linuxthreads. +dnl +dnl CAUTION: the CLOCK_COPPERPLATE value is part of the ABI between +dnl the Xenomai core libraries and the applications. Therefore it shall +dnl remain stable even if applications depend on a different libc +dnl than Xenomai libraries were built against originally. Hence the +dnl built-in CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED flag, which +dnl won't vary for a given Xenomai installation. + +if test $rtcore_type = mercury; then + AC_CHECK_FUNC(pthread_condattr_setclock,, + [AC_DEFINE(CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED, 1,[config])]) +fi + +dnl Check that Copperplate can implement a shared heap if +dnl --enable-pshared was given. +if test x$use_pshared = xy; then + AC_CHECK_FUNC(shm_open,, + [AC_MSG_ERROR([shm_open() is missing, --disable-pshared is required])]) +fi + +dnl +dnl Produce the info needed to build xeno_config.h +dnl + +AC_DEFINE_UNQUOTED(CONFIG_XENO_VERSION_MAJOR,$CONFIG_XENO_VERSION_MAJOR,[config]) +AC_DEFINE_UNQUOTED(CONFIG_XENO_VERSION_MINOR,$CONFIG_XENO_VERSION_MINOR,[config]) +AC_DEFINE_UNQUOTED(CONFIG_XENO_REVISION_LEVEL,$CONFIG_XENO_REVISION_LEVEL,[config]) +AC_DEFINE_UNQUOTED(CONFIG_XENO_UAPI_LEVEL,$CONFIG_XENO_UAPI_LEVEL,[config]) +AC_DEFINE_UNQUOTED(CONFIG_XENO_VERSION_STRING,"$CONFIG_XENO_VERSION_STRING",[config]) +AC_DEFINE_UNQUOTED(CONFIG_XENO_PREFIX,"$prefix",[config]) +AC_DEFINE_UNQUOTED(CONFIG_XENO_BUILD_ARGS,"$XENO_BUILD_ARGS",[config]) + +dnl +dnl Features we enabled and likely want to find at kernel level. +dnl When applicable, we reuse the kernel option symbol so that we +dnl don't need to make particular cases with kernel code which may +dnl also be compiled in user-space libs. +dnl + +test x$CONFIG_XENO_X86_VSYSCALL = xy && AC_DEFINE(CONFIG_XENO_X86_VSYSCALL,1,[config]) +test x$CONFIG_SMP = xy && AC_DEFINE(CONFIG_SMP,1,[config]) + +dnl +dnl Userland may want to know about MMU availability on the target. +dnl For now, we assume that having fork() means having an MMU. +dnl +test x$ac_cv_func_fork = xyes && AC_DEFINE(CONFIG_MMU,1,[config]) + +AM_CONDITIONAL(CONFIG_XENO_SHARED,[test "$enable_shared" = 'yes']) + +# Default sampling period (ns) used in various tests +AC_DEFINE_UNQUOTED(CONFIG_XENO_DEFAULT_PERIOD,$CONFIG_XENO_DEFAULT_PERIOD,[config]) + +dnl Allocator for Copperplate. Note: in dual kernel mode, we don't +dnl want malloc, no matter what: pick either heapmem or tlsf, defaults +dnl to heapmem. Force switch to malloc over the Mercury core in debug +dnl mode, to ease debugging with valgrind, instrumented glibc etc. + +if test $rtcore_type = cobalt -o x$debug_mode = x; then + case $localmem_allocator in + heapmem) + AC_DEFINE(CONFIG_XENO_HEAPMEM,1,[config]) + use_heapmem=y + use_tlsf= + ;; + tlsf) + AC_DEFINE(CONFIG_XENO_TLSF,1,[config]) + use_tlsf=y + use_heapmem= + ;; + esac +else + use_heapmem= + use_tlsf= + AC_MSG_WARN([using malloc() for private memory in debug mode]) +fi +AM_CONDITIONAL(XENO_TLSF,[test x$use_tlsf = xy]) +AM_CONDITIONAL(XENO_HEAPMEM,[test x$use_heapmem = xy]) + +dnl Check for atomic builtins. For now we only check for the legacy +dnl interface, i.e. __sync_*. + +AC_CACHE_CHECK([whether the compiler provides atomic builtins], ac_cv_atomic_builtins, [ +LIBS= +AC_TRY_LINK([ +int atomic_sub(int i) { return __sync_sub_and_fetch(&i, 1); } +int atomic_add(int i) { return __sync_add_and_fetch(&i, 1); } +], [], ac_cv_atomic_builtins="yes") +]) +if test "$ac_cv_atomic_builtins" != "yes"; then + AC_MSG_ERROR([compiler does not support atomic builtins]) +fi + +unset want_fortify +AC_MSG_CHECKING(for fortify support) +AC_ARG_ENABLE([fortify], + AC_HELP_STRING([--enable-fortify], + [Enable _FORTIFY_SOURCE]), + [case "$enableval" in + y | yes) want_fortify=yes;; + *) want_fortify=no;; + esac]) +AC_MSG_RESULT(${want_fortify:-autodetect}) +AC_CHECK_FUNC(__vfprintf_chk, + [AC_DEFINE(CONFIG_XENO_FORTIFY, 1,[config])], + [if test x"$want_fortify" = "xyes"; then + AC_MSG_ERROR([Fortify support enabled but not available in *libc]) + fi]) + +dnl Exported CFLAGS and LDFLAGS, shared with internal flags +XENO_USER_APP_CFLAGS="-D_GNU_SOURCE -D_REENTRANT -fasynchronous-unwind-tables" +XENO_USER_APP_LDFLAGS= + +if test x$use_registry = xy; then + XENO_FUSE_CFLAGS=$FUSE_CFLAGS + XENO_USER_APP_LDFLAGS="$XENO_USER_APP_LDFLAGS $FUSE_LIBS" +fi + +dnl Internal CFLAGS and LDFLAGS, may be enhanced per-arch below +XENO_USER_CFLAGS="$XENO_USER_APP_CFLAGS -pipe -fstrict-aliasing \ +-Wall -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long \ +-Wno-unused-parameter -Wno-format-truncation -Werror -Wformat-security \ +-D__XENO__ -D__IN_XENO__" +if test x$want_fortify = xyes -a x$debug_mode != xfull; then + XENO_USER_CFLAGS="$XENO_USER_CFLAGS -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2" +fi +XENO_USER_LDADD="$XENO_USER_APP_LDFLAGS" + +dnl Add any flags forced on the command line, but only +dnl for building apps. +XENO_USER_APP_CFLAGS="$CFLAGS $XENO_USER_APP_CFLAGS" +XENO_USER_APP_LDFLAGS="$LDFLAGS $XENO_USER_APP_LDFLAGS" + +if test x$debug_mode = xpartial; then + XENO_USER_CFLAGS="-g -O2 $XENO_USER_CFLAGS" +elif test x$debug_mode = xfull; then + XENO_USER_CFLAGS="-g -O0 $XENO_USER_CFLAGS" +elif test x$debug_symbols = xy; then + XENO_USER_CFLAGS="-g -O2 $XENO_USER_CFLAGS" +else + XENO_USER_CFLAGS="-O2 $XENO_USER_CFLAGS" +fi + +if test x$use_assert = x; then + XENO_USER_CFLAGS="-DNDEBUG $XENO_USER_CFLAGS" +fi + +XENO_USER_CFLAGS_STDLIB="$XENO_USER_CFLAGS" +XENO_USER_CFLAGS="$XENO_USER_CFLAGS -I$topdir/include/$rtcore_type" + +AC_MSG_CHECKING([whether ld supports @file]) +AC_CACHE_VAL(ac_cv_ld_file_option, + AC_LANG_SAVE + AC_LANG_C + save_LDFLAGS="$LDFLAGS" + [LDFLAGS="-Wl,@/dev/null"] + AC_LINK_IFELSE([AC_LANG_SOURCE([main(){}])], + [ac_cv_ld_file_option=yes], + [ac_cv_ld_file_option=no]) + LDFLAGS="$save_LDFLAGS" + AC_LANG_RESTORE) +AC_MSG_RESULT(${ac_cv_ld_file_option:-no}) +LD_FILE_OPTION=$ac_cv_ld_file_option +AC_SUBST(LD_FILE_OPTION) + +AC_MSG_CHECKING(whether to enable dlopening of Xenomai libraries) +AC_ARG_ENABLE(dlopen-libs, + AC_HELP_STRING([--enable-dlopen-libs], [Allow dynamic loading of Xenomai libraries]), + [case "$enableval" in + y | yes) CONFIG_XENO_LIBS_DLOPEN=y ;; + *) CONFIG_XENO_LIBS_DLOPEN=$enableval ;; + esac]) +AC_MSG_RESULT(${CONFIG_XENO_LIBS_DLOPEN:-no}) +if test x$CONFIG_XENO_LIBS_DLOPEN = xy; then + AC_DEFINE(CONFIG_XENO_LIBS_DLOPEN,1,[config]) + AC_DEFINE_UNQUOTED(CONFIG_XENO_TLS_MODEL,"global-dynamic",[TLS model]) + XENO_LIB_LDFLAGS="-Wl,-z -Wl,nodelete" +else + AC_DEFINE_UNQUOTED(CONFIG_XENO_TLS_MODEL,"initial-exec",[TLS model]) + XENO_LIB_LDFLAGS="-Wl,-z -Wl,nodlopen" +fi +AM_CONDITIONAL(CONFIG_XENO_LIBS_DLOPEN,[test x$CONFIG_XENO_LIBS_DLOPEN = xy]) + +AC_MSG_CHECKING(whether to enable TLS support) +AC_ARG_ENABLE([tls], + AC_HELP_STRING([--enable-tls], + [Enable thread local storage]), + [use_tls=$enableval]) +AC_MSG_RESULT($use_tls) + +dnl Check whether the compiler supports the __thread keyword. +if test "x$use_tls" != xno; then + AC_CACHE_CHECK([for __thread keyword], libc_cv_gcc_tls, + [cat > conftest.c <<\EOF +__thread int a __attribute__ ((tls_model ("initial-exec"))) = 42; +__thread int b __attribute__ ((tls_model ("global-dynamic"))) = 12; +EOF + if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS -c -Werror conftest.c >&AS_MESSAGE_LOG_FD]); then + libc_cv_gcc_tls=yes + else + libc_cv_gcc_tls=no + fi + rm -f conftest*]) + if test "$libc_cv_gcc_tls" = yes; then + AC_DEFINE(HAVE_TLS,1,[config]) + fi +fi + +AC_MSG_CHECKING(location for test executables) +AC_ARG_WITH(testdir, + AS_HELP_STRING([--with-testdir=<test-exec-dir>],[location for test executables (defaults to $bindir)]), + [ + case "$withval" in + "" | y | ye | yes | n | no) + AC_MSG_ERROR([You must supply an argument to --with-testdir]) + ;; + esac + XENO_TEST_DIR="$withval" + ], [XENO_TEST_DIR=$bindir]) +AC_MSG_RESULT($XENO_TEST_DIR) + +demodir='${exec_prefix}/demo' +AC_MSG_CHECKING(location for demo programs) +AC_ARG_WITH(demodir, + AS_HELP_STRING([--with-demodir=<demo-program-dir>],[location for demo programs (defaults to $prefix/demo)]), + [ + case "$withval" in + "" | y | ye | yes | n | no) + AC_MSG_ERROR([You must supply an argument to --with-demodir]) + ;; + esac + XENO_DEMO_DIR="$withval" + ], [XENO_DEMO_DIR=$demodir]) +AC_MSG_RESULT($XENO_DEMO_DIR) + +AC_MSG_CHECKING([for test source generation]) +AC_RUN_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], + [AC_MSG_RESULT(ok)], [AC_MSG_RESULT(failed)], [AC_MSG_RESULT(untestable)]) + +dnl CAUTION: We need to have the CONFIG_XENO_XX symbols always +dnl defined when the configuration header is read, but we want the +dnl Autoconf-produced symbols to be defined only when compiling +dnl Xenomai. This way, we won't pollute the namespace with the latter +dnl when our configuration header is indirectly included by a client +dnl application. To achieve this, we ask autoheader to produce the +dnl following header structure: +dnl #define CONFIG_XX +dnl #define CONFIG_XX ... +dnl #ifdef __IN_XENO__ +dnl <Autoconf-defined symbols> +dnl #endif /* __IN_XENO__ */ +dnl This is quite a hack since we have to rely on the fact that +dnl all Autoconf-generated symbols are lexicographically sorted +dnl after CONFIG_XENO_XX ones, but, well... +dnl Use a key which will cause the verbatim string to be put after +dnl all CONFIG_XENO_XX symbols, but still before any Autoconf-generated +dnl symbol, hence CONFIG_XENO___. +AH_VERBATIM(CONFIG_XENO___,[#ifdef __IN_XENO__]) + +dnl Now we can close the conditional section, right after all +dnl Autoconf-generated symbols have been listed. +AH_BOTTOM([#endif /* __IN_XENO__ */]) + +if test $rtcore_type = cobalt; then + XENO_USER_CFLAGS="-I$topdir/lib/cobalt/arch/$target_cpu_arch/include -I$topdir/kernel/cobalt/arch/$target_cpu_arch/include $XENO_USER_CFLAGS" + XENO_COBALT_CFLAGS="$XENO_USER_CFLAGS" + case "$build_for" in + i*86*-*) XENO_COBALT_CFLAGS="$XENO_COBALT_CFLAGS -fno-omit-frame-pointer";; + esac + +dnl Build wrapping information. XENO_POSIX_WRAPPERS lists all wrapping +dnl directives in a format the linker understands, for building the +dnl in-tree executables which require POSIX symbol wrapping. + + modechk_wrappers="$topdir/lib/cobalt/modechk.wrappers" + cobalt_wrappers="$topdir/lib/cobalt/cobalt.wrappers" + if [[ $ac_cv_ld_file_option = yes ]]; then + XENO_POSIX_WRAPPERS="-Wl,@$modechk_wrappers -Wl,@$cobalt_wrappers" + else + XENO_POSIX_WRAPPERS=`cat $modechk_wrappers $cobalt_wrappers | \ + while read wrap_option symbol ; do \ + echo -n "-Wl,$wrap_option,$symbol " ; \ + done` + fi + + AC_SUBST(XENO_POSIX_WRAPPERS) + AC_SUBST([CONFIG_STATUS_DEPENDENCIES], ["$modechk_wrappers $cobalt_wrappers"]) +fi + +dnl Multi-library support. +AC_MSG_CHECKING([whether to enable soname suffix for libraries]) +AC_ARG_ENABLE([so-suffix], + [AS_HELP_STRING([--enable-so-suffix], + [enable soname suffix (for Mercury only)])], + [enable_so_suffix=$enableval], + [enable_so_suffix="no"]) +AC_MSG_RESULT(${enable_so_suffix}) +if test "$enable_so_suffix" = "yes"; then + if test "$rtcore_type" != mercury; then + AC_MSG_ERROR([soname suffix is only allowed for Mercury core]) + else + CORE="_$rtcore_type" + fi +fi + +dnl +dnl Build the Makefiles +dnl + +XENO_AUTOINIT_LDFLAGS='$(top_builddir)/lib/boilerplate/init/bootstrap-internal.o'" -Wl,--wrap=main -Wl,--dynamic-list=$topdir/scripts/dynlist.ld" +AC_SUBST(XENO_AUTOINIT_LDFLAGS) + +XENO_CORE_LDADD="\$(top_builddir)/lib/$rtcore_type/lib${rtcore_type}.la" +if test $rtcore_type = cobalt; then + XENO_CORE_LDADD="$XENO_CORE_LDADD \$(top_builddir)/lib/cobalt/libmodechk.la" +fi +AC_SUBST(XENO_CORE_LDADD) + +AC_SUBST(DOXYGEN_SHOW_INCLUDE_FILES) +AC_SUBST(DOXYGEN_HAVE_DOT) +AC_SUBST(DOXYGEN) +AC_SUBST(LATEX_BATCHMODE) +AC_SUBST(LATEX_MODE) + +AC_SUBST(ASCIIDODC) +AC_SUBST(A2X) +AC_SUBST(W3M) + +AC_SUBST(XENO_TARGET_CORE) +AC_SUBST(XENO_TARGET_ARCH) +AC_SUBST(XENO_BUILD_STRING) +AC_SUBST(XENO_HOST_STRING) +AC_SUBST(XENO_COBALT_CFLAGS) +AC_SUBST(XENO_LIB_LDFLAGS) +AC_SUBST(XENO_USER_CFLAGS) +AC_SUBST(XENO_USER_CFLAGS_STDLIB) +AC_SUBST(XENO_USER_LDADD) +AC_SUBST(XENO_USER_APP_CFLAGS) +AC_SUBST(XENO_USER_APP_LDFLAGS) +AC_SUBST(XENO_FUSE_CFLAGS) +AC_SUBST(XENO_TEST_DIR) +AC_SUBST(XENO_DEMO_DIR) +AC_SUBST(XENO_BUILD_COMPILER) +AC_SUBST(XENO_BUILD_ARGS) +AC_SUBST(CORE) + +AC_CONFIG_FILES([ \ + Makefile \ + config/Makefile \ + scripts/Makefile \ + scripts/xeno-config:scripts/xeno-config-$rtcore_type.in \ + scripts/xeno \ + lib/Makefile \ + lib/boilerplate/Makefile \ + lib/boilerplate/init/Makefile \ + lib/cobalt/Makefile \ + lib/cobalt/arch/Makefile \ + lib/cobalt/arch/arm/Makefile \ + lib/cobalt/arch/arm/include/Makefile \ + lib/cobalt/arch/arm/include/asm/Makefile \ + lib/cobalt/arch/arm/include/asm/xenomai/Makefile \ + lib/cobalt/arch/arm64/Makefile \ + lib/cobalt/arch/arm64/include/Makefile \ + lib/cobalt/arch/arm64/include/asm/Makefile \ + lib/cobalt/arch/arm64/include/asm/xenomai/Makefile \ + lib/cobalt/arch/powerpc/Makefile \ + lib/cobalt/arch/powerpc/include/Makefile \ + lib/cobalt/arch/powerpc/include/asm/Makefile \ + lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile \ + lib/cobalt/arch/x86/Makefile \ + lib/cobalt/arch/x86/include/Makefile \ + lib/cobalt/arch/x86/include/asm/Makefile \ + lib/cobalt/arch/x86/include/asm/xenomai/Makefile \ + lib/mercury/Makefile \ + lib/copperplate/Makefile \ + lib/copperplate/regd/Makefile \ + lib/alchemy/Makefile \ + lib/vxworks/Makefile \ + lib/psos/Makefile \ + lib/analogy/Makefile \ + lib/smokey/Makefile \ + lib/trank/Makefile \ + testsuite/Makefile \ + testsuite/latency/Makefile \ + testsuite/switchtest/Makefile \ + testsuite/gpiotest/Makefile \ + testsuite/gpiobench/Makefile \ + testsuite/spitest/Makefile \ + testsuite/smokey/Makefile \ + testsuite/smokey/arith/Makefile \ + testsuite/smokey/dlopen/Makefile \ + testsuite/smokey/sched-quota/Makefile \ + testsuite/smokey/sched-tp/Makefile \ + testsuite/smokey/setsched/Makefile \ + testsuite/smokey/rtdm/Makefile \ + testsuite/smokey/vdso-access/Makefile \ + testsuite/smokey/posix-cond/Makefile \ + testsuite/smokey/posix-mutex/Makefile \ + testsuite/smokey/posix-clock/Makefile \ + testsuite/smokey/posix-fork/Makefile \ + testsuite/smokey/posix-select/Makefile \ + testsuite/smokey/xddp/Makefile \ + testsuite/smokey/iddp/Makefile \ + testsuite/smokey/bufp/Makefile \ + testsuite/smokey/sigdebug/Makefile \ + testsuite/smokey/timerfd/Makefile \ + testsuite/smokey/tsc/Makefile \ + testsuite/smokey/leaks/Makefile \ + testsuite/smokey/memcheck/Makefile \ + testsuite/smokey/memory-coreheap/Makefile \ + testsuite/smokey/memory-heapmem/Makefile \ + testsuite/smokey/memory-tlsf/Makefile \ + testsuite/smokey/memory-pshared/Makefile \ + testsuite/smokey/fpu-stress/Makefile \ + testsuite/smokey/net_udp/Makefile \ + testsuite/smokey/net_packet_dgram/Makefile \ + testsuite/smokey/net_packet_raw/Makefile \ + testsuite/smokey/net_common/Makefile \ + testsuite/smokey/cpu-affinity/Makefile \ + testsuite/smokey/gdb/Makefile \ + testsuite/smokey/y2038/Makefile \ + testsuite/clocktest/Makefile \ + testsuite/xeno-test/Makefile \ + utils/Makefile \ + utils/hdb/Makefile \ + utils/can/Makefile \ + utils/analogy/Makefile \ + utils/ps/Makefile \ + utils/slackspot/Makefile \ + utils/corectl/Makefile \ + utils/autotune/Makefile \ + utils/net/rtnet \ + utils/net/rtnet.conf \ + utils/net/Makefile \ + utils/chkkconf/Makefile \ + demo/Makefile \ + demo/posix/Makefile \ + demo/posix/cyclictest/Makefile \ + demo/posix/cobalt/Makefile \ + demo/alchemy/Makefile \ + demo/alchemy/cobalt/Makefile \ + include/Makefile \ + include/cobalt/uapi/Makefile \ + include/cobalt/uapi/asm-generic/Makefile \ + include/cobalt/uapi/kernel/Makefile \ + include/cobalt/Makefile \ + include/cobalt/sys/Makefile \ + include/cobalt/kernel/Makefile \ + include/cobalt/kernel/rtdm/Makefile \ + include/cobalt/kernel/rtdm/analogy/Makefile \ + include/cobalt/boilerplate/Makefile \ + include/rtdm/Makefile \ + include/rtdm/uapi/Makefile \ + include/mercury/Makefile \ + include/mercury/boilerplate/Makefile \ + include/boilerplate/Makefile \ + include/copperplate/Makefile \ + include/alchemy/Makefile \ + include/vxworks/Makefile \ + include/psos/Makefile \ + include/smokey/Makefile \ + include/trank/Makefile \ + include/trank/posix/Makefile \ + include/trank/native/Makefile \ + include/trank/rtdm/Makefile \ + include/xenomai/Makefile \ + doc/Makefile \ + doc/doxygen/Makefile \ + doc/doxygen/xeno3prm-common.conf \ + doc/doxygen/xeno3prm-html.conf \ + doc/doxygen/xeno3prm-latex.conf \ + doc/gitdoc/Makefile \ + doc/asciidoc/Makefile \ + ]) + +AC_OUTPUT() diff --git a/kernel/xenomai-v3.2.4/demo/Makefile.am b/kernel/xenomai-v3.2.4/demo/Makefile.am new file mode 100644 index 0000000..fe5107c --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/Makefile.am @@ -0,0 +1,2 @@ + +SUBDIRS = posix alchemy diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am b/kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am new file mode 100644 index 0000000..fed5c1c --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am @@ -0,0 +1,32 @@ +demodir = @XENO_DEMO_DIR@ + +demo_PROGRAMS = altency + +if XENO_COBALT +SUBDIRS = cobalt +endif + +cppflags = \ + $(XENO_USER_CFLAGS) \ + -I$(top_srcdir)/include + +ldadd = \ + ../../lib/alchemy/libalchemy@CORE@.la \ + ../../lib/copperplate/libcopperplate@CORE@.la \ + @XENO_CORE_LDADD@ \ + @XENO_USER_LDADD@ \ + -lpthread -lrt -lm + +altency_SOURCES = altency.c +altency_CPPFLAGS = $(cppflags) +altency_LDADD = $(ldadd) -lpthread -lrt -lm +altency_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS) + +# This demo mixes the Alchemy and Xenomai-enabled POSIX APIs over +# Cobalt, so we ask for both set of flags. --posix along with +# --ldflags will get us the linker switches causing the symbol +# wrapping for open/read/write/ioctl and friends. Over Mercury, +# --posix is ignored since it's implicitly enabled. +# +# CFLAGS = $(shell xeno-config --alchemy --posix --cflags) +# LDFLAGS = $(shell xeno-config --alchemy --posix --ldflags) diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/altency.c b/kernel/xenomai-v3.2.4/demo/alchemy/altency.c new file mode 100644 index 0000000..e7c31d7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/alchemy/altency.c @@ -0,0 +1,699 @@ +/* + * The alternate latency measurement program based on the Alchemy API. + * + * Licensed under the LGPL v2.1. + */ +#include <stdlib.h> +#include <math.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <sched.h> +#include <time.h> +#include <unistd.h> +#include <signal.h> +#include <alchemy/task.h> +#include <alchemy/timer.h> +#include <alchemy/sem.h> +#include <rtdm/testing.h> +#include <boilerplate/trace.h> +#include <xenomai/init.h> + +RT_TASK latency_task, display_task; + +RT_SEM display_sem; + +#define TEN_MILLIONS 10000000 + +unsigned max_relaxed; +int32_t minjitter, maxjitter, avgjitter; +int32_t gminjitter = TEN_MILLIONS, gmaxjitter = -TEN_MILLIONS, goverrun = 0; +int64_t gavgjitter = 0; + +long long period_ns = 0; +int test_duration = 0; /* sec of testing, via -T <sec>, 0 is inf */ +int data_lines = 21; /* data lines per header line, -l <lines> to change */ +int quiet = 0; /* suppress printing of RTH, RTD lines when -T given */ +int devfd = -1; +int freeze_max = 0; +int priority = T_HIPRIO; +int stop_upon_switch = 0; +sig_atomic_t sampling_relaxed = 0; + +#define USER_TASK 0 +#define KERNEL_TASK 1 +#define TIMER_HANDLER 2 + +int test_mode = USER_TASK; +const char *test_mode_names[] = { + "periodic user-mode task", + "in-kernel periodic task", + "in-kernel timer handler" +}; + +time_t test_start, test_end; /* report test duration */ +int test_loops = 0; /* outer loop count */ + +/* Warmup time : in order to avoid spurious cache effects on low-end machines. */ +#define WARMUP_TIME 1 +#define HISTOGRAM_CELLS 300 +int histogram_size = HISTOGRAM_CELLS; +int32_t *histogram_avg = NULL, *histogram_max = NULL, *histogram_min = NULL; + +char *do_gnuplot = NULL; +int do_histogram = 0, do_stats = 0, finished = 0; +int bucketsize = 1000; /* default = 1000ns, -B <size> to override */ + +#define need_histo() (do_histogram || do_stats || do_gnuplot) + +static inline void add_histogram(int32_t *histogram, int32_t addval) +{ + /* bucketsize steps */ + int inabs = (addval >= 0 ? addval : -addval) / bucketsize; + histogram[inabs < histogram_size ? inabs : histogram_size - 1]++; +} + +static void latency(void *cookie) +{ + RTIME expected_ns, start_ns, fault_threshold; + unsigned int old_relaxed = 0, new_relaxed; + int ret, count, nsamples, warmup = 1; + int32_t minj, maxj, dt, overrun, sumj; + unsigned long ov; + + fault_threshold = CONFIG_XENO_DEFAULT_PERIOD; + nsamples = (long long)ONE_BILLION / period_ns; + start_ns = rt_timer_read() + 1000000; /* 1ms from now */ + expected_ns = start_ns; + + ret = rt_task_set_periodic(NULL, start_ns, period_ns); + if (ret) { + fprintf(stderr, "altency: failed to set periodic, code %d\n", + ret); + return; + } + + for (;;) { + minj = TEN_MILLIONS; + maxj = -TEN_MILLIONS; + overrun = 0; + test_loops++; + + for (count = sumj = 0; count < nsamples; count++) { + ret = rt_task_wait_period(&ov); + dt = (int32_t)(rt_timer_read() - expected_ns); + new_relaxed = sampling_relaxed; + if (dt > maxj) { + if (new_relaxed != old_relaxed + && dt > fault_threshold) + max_relaxed += + new_relaxed - old_relaxed; + maxj = dt; + } + old_relaxed = new_relaxed; + if (dt < minj) + minj = dt; + sumj += dt; + + if (ret) { + if (ret != -ETIMEDOUT) { + fprintf(stderr, + "altency: wait period failed, code %d\n", + ret); + exit(EXIT_FAILURE); /* Timer stopped. */ + } + overrun += ov; + expected_ns += period_ns * ov; + } + expected_ns += period_ns; + + if (freeze_max && (dt > gmaxjitter) + && !(finished || warmup)) { + xntrace_user_freeze(dt, 0); + gmaxjitter = dt; + } + + if (!(finished || warmup) && need_histo()) + add_histogram(histogram_avg, dt); + } + + if (!warmup) { + if (!finished && need_histo()) { + add_histogram(histogram_max, maxj); + add_histogram(histogram_min, minj); + } + + minjitter = minj; + if (minj < gminjitter) + gminjitter = minj; + + maxjitter = maxj; + if (maxj > gmaxjitter) + gmaxjitter = maxj; + + avgjitter = sumj / nsamples; + gavgjitter += avgjitter; + goverrun += overrun; + rt_sem_v(&display_sem); + } + + if (warmup && test_loops == WARMUP_TIME) { + test_loops = 0; + warmup = 0; + } + } +} + +static void display(void *cookie) +{ + char sem_name[16]; + int ret, n = 0; + time_t start; + + if (test_mode == USER_TASK) { + snprintf(sem_name, sizeof(sem_name), "dispsem-%d", getpid()); + ret = rt_sem_create(&display_sem, sem_name, 0, S_FIFO); + if (ret) { + fprintf(stderr, + "altency: cannot create semaphore: %s\n", + strerror(-ret)); + return; + } + + } else { + struct rttst_tmbench_config config; + + if (test_mode == KERNEL_TASK) + config.mode = RTTST_TMBENCH_TASK; + else + config.mode = RTTST_TMBENCH_HANDLER; + + config.period = period_ns; + config.priority = priority; + config.warmup_loops = WARMUP_TIME; + config.histogram_size = need_histo() ? histogram_size : 0; + config.histogram_bucketsize = bucketsize; + config.freeze_max = freeze_max; + + ret = ioctl(devfd, RTTST_RTIOC_TMBENCH_START, &config); + if (ret) { + fprintf(stderr, + "altency: failed to start in-kernel timer benchmark, code %d\n", + ret); + return; + } + } + + time(&start); + + if (WARMUP_TIME) + printf("warming up...\n"); + + if (quiet) + fprintf(stderr, "running quietly for %d seconds\n", + test_duration); + + for (;;) { + int32_t minj, gminj, maxj, gmaxj, avgj; + + if (test_mode == USER_TASK) { + ret = rt_sem_p(&display_sem, TM_INFINITE); + if (ret) { + if (ret != -EIDRM) + fprintf(stderr, + "altency: failed to pend on semaphore, code %d\n", + ret); + + return; + } + + minj = minjitter; + gminj = gminjitter; + avgj = avgjitter; + maxj = maxjitter; + gmaxj = gmaxjitter; + + } else { + struct rttst_interm_bench_res result; + + ret = ioctl(devfd, RTTST_RTIOC_INTERM_BENCH_RES, &result); + if (ret) { + if (ret != -EIDRM) + fprintf(stderr, + "altency: failed to call RTTST_RTIOC_INTERM_BENCH_RES, %m\n"); + + return; + } + + minj = result.last.min; + gminj = result.overall.min; + avgj = result.last.avg; + maxj = result.last.max; + gmaxj = result.overall.max; + goverrun = result.overall.overruns; + } + + if (!quiet) { + if (data_lines && (n++ % data_lines) == 0) { + time_t now, dt; + time(&now); + dt = now - start - WARMUP_TIME; + printf + ("RTT| %.2ld:%.2ld:%.2ld (%s, %Ld us period, " + "priority %d)\n", dt / 3600, + (dt / 60) % 60, dt % 60, + test_mode_names[test_mode], + period_ns / 1000, priority); + printf("RTH|%11s|%11s|%11s|%8s|%6s|%11s|%11s\n", + "----lat min", "----lat avg", + "----lat max", "-overrun", "---msw", + "---lat best", "--lat worst"); + } + printf("RTD|%11.3f|%11.3f|%11.3f|%8d|%6u|%11.3f|%11.3f\n", + (double)minj / 1000, + (double)avgj / 1000, + (double)maxj / 1000, + goverrun, + max_relaxed, + (double)gminj / 1000, (double)gmaxj / 1000); + } + } +} + +static double dump_histogram(int32_t *histogram, char *kind) +{ + int n, total_hits = 0; + double avg = 0; /* used to sum hits 1st */ + + if (do_histogram) + printf("---|--param|----range-|--samples\n"); + + for (n = 0; n < histogram_size; n++) { + int32_t hits = histogram[n]; + + if (hits) { + total_hits += hits; + avg += n * hits; + if (do_histogram) + printf("HSD| %s| %3d -%3d | %8d\n", + kind, n, n + 1, hits); + } + } + + avg /= total_hits; /* compute avg, reuse variable */ + + return avg; +} + +static void dump_histo_gnuplot(int32_t *histogram) +{ + unsigned start, stop; + FILE *f; + int n; + + f = fopen(do_gnuplot, "w"); + if (!f) + return; + + for (n = 0; n < histogram_size && histogram[n] == 0L; n++) + ; + start = n; + + for (n = histogram_size - 1; n >= 0 && histogram[n] == 0L; n--) + ; + stop = n; + + fprintf(f, "%g 1\n", start * bucketsize / 1000.0); + for (n = start; n <= stop; n++) + fprintf(f, "%g %d\n", + (n + 0.5) * bucketsize / 1000.0, histogram[n] + 1); + fprintf(f, "%g 1\n", (stop + 1) * bucketsize / 1000.0); + + fclose(f); +} + +static void dump_stats(int32_t *histogram, char *kind, double avg) +{ + int n, total_hits = 0; + double variance = 0; + + for (n = 0; n < histogram_size; n++) { + int32_t hits = histogram[n]; + + if (hits) { + total_hits += hits; + variance += hits * (n - avg) * (n - avg); + } + } + + /* compute std-deviation (unbiased form) */ + if (total_hits > 1) { + variance /= total_hits - 1; + variance = sqrt(variance); + } else + variance = 0; + + printf("HSS| %s| %9d| %10.3f| %10.3f\n", + kind, total_hits, avg, variance); +} + +static void dump_hist_stats(void) +{ + double minavg, maxavg, avgavg; + + /* max is last, where its visible w/o scrolling */ + minavg = dump_histogram(histogram_min, "min"); + avgavg = dump_histogram(histogram_avg, "avg"); + maxavg = dump_histogram(histogram_max, "max"); + + printf("HSH|--param|--samples-|--average--|---stddev--\n"); + + dump_stats(histogram_min, "min", minavg); + dump_stats(histogram_avg, "avg", avgavg); + dump_stats(histogram_max, "max", maxavg); + + if (do_gnuplot) + dump_histo_gnuplot(histogram_avg); +} + +static void cleanup(void) +{ + time_t actual_duration; + int32_t gmaxj, gminj, gavgj; + + if (test_mode == USER_TASK) { + rt_sem_delete(&display_sem); + + gavgjitter /= (test_loops > 1 ? test_loops : 2) - 1; + + gminj = gminjitter; + gmaxj = gmaxjitter; + gavgj = gavgjitter; + } else { + struct rttst_overall_bench_res overall; + + overall.histogram_min = histogram_min; + overall.histogram_max = histogram_max; + overall.histogram_avg = histogram_avg; + ioctl(devfd, RTTST_RTIOC_TMBENCH_STOP, &overall); + gminj = overall.result.min; + gmaxj = overall.result.max; + gavgj = overall.result.avg; + goverrun = overall.result.overruns; + } + + if (devfd >= 0) + close(devfd); + + if (need_histo()) + dump_hist_stats(); + + time(&test_end); + actual_duration = test_end - test_start - WARMUP_TIME; + if (!test_duration) + test_duration = actual_duration; + + printf + ("---|-----------|-----------|-----------|--------|------|-------------------------\n" + "RTS|%11.3f|%11.3f|%11.3f|%8d|%6u| %.2ld:%.2ld:%.2ld/%.2d:%.2d:%.2d\n", + (double)gminj / 1000, (double)gavgj / 1000, (double)gmaxj / 1000, + goverrun, max_relaxed, actual_duration / 3600, (actual_duration / 60) % 60, + actual_duration % 60, test_duration / 3600, + (test_duration / 60) % 60, test_duration % 60); + if (max_relaxed > 0) + printf( +"Warning! some latency peaks may have been due to involuntary mode switches.\n" +"Please contact xenomai@xenomai.org\n"); + + if (histogram_avg) + free(histogram_avg); + if (histogram_max) + free(histogram_max); + if (histogram_min) + free(histogram_min); + + exit(0); +} + +static void faulthand(int sig) +{ + xntrace_user_freeze(0, 1); + signal(sig, SIG_DFL); + __STD(kill(getpid(), sig)); +} + +#ifdef CONFIG_XENO_COBALT + +static const char *reason_str[] = { + [SIGDEBUG_UNDEFINED] = "received SIGDEBUG for unknown reason", + [SIGDEBUG_MIGRATE_SIGNAL] = "received signal", + [SIGDEBUG_MIGRATE_SYSCALL] = "invoked syscall", + [SIGDEBUG_MIGRATE_FAULT] = "triggered fault", + [SIGDEBUG_MIGRATE_PRIOINV] = "affected by priority inversion", + [SIGDEBUG_NOMLOCK] = "process memory not locked", + [SIGDEBUG_WATCHDOG] = "watchdog triggered (period too short?)", + [SIGDEBUG_LOCK_BREAK] = "scheduler lock break", +}; + +static void sigdebug(int sig, siginfo_t *si, void *context) +{ + const char fmt[] = "%s, aborting.\n" + "(enabling CONFIG_XENO_OPT_DEBUG_TRACE_RELAX may help)\n"; + unsigned int reason = sigdebug_reason(si); + int n __attribute__ ((unused)); + static char buffer[256]; + + if (!stop_upon_switch) { + ++sampling_relaxed; + return; + } + + if (reason > SIGDEBUG_WATCHDOG) + reason = SIGDEBUG_UNDEFINED; + + switch(reason) { + case SIGDEBUG_UNDEFINED: + case SIGDEBUG_NOMLOCK: + case SIGDEBUG_WATCHDOG: + n = snprintf(buffer, sizeof(buffer), "altency: %s\n", + reason_str[reason]); + n = write(STDERR_FILENO, buffer, n); + exit(EXIT_FAILURE); + } + + n = snprintf(buffer, sizeof(buffer), fmt, reason_str[reason]); + n = write(STDERR_FILENO, buffer, n); + signal(sig, SIG_DFL); + __STD(kill(getpid(), sig)); +} + +#endif /* CONFIG_XENO_COBALT */ + +void application_usage(void) +{ + fprintf(stderr, "usage: %s [options]:\n", get_program_name()); + fprintf(stderr, + "-h print histograms of min, avg, max latencies\n" + "-g <file> dump histogram to <file> in gnuplot format\n" + "-s print statistics of min, avg, max latencies\n" + "-H <histogram-size> default = 200, increase if your last bucket is full\n" + "-B <bucket-size> default = 1000ns, decrease for more resolution\n" + "-p <period_us> sampling period\n" + "-l <data-lines per header> default=21, 0 to supress headers\n" + "-T <test_duration_seconds> default=0, so ^C to end\n" + "-q supresses RTD, RTH lines if -T is used\n" + "-D <testing_device_no> number of testing device, default=0\n" + "-t <test_mode> 0=user task (default), 1=kernel task, 2=timer IRQ\n" + "-f freeze trace for each new max latency\n" + "-c <cpu> pin measuring task down to given CPU\n" + "-P <priority> task priority (test mode 0 and 1 only)\n" + "-b break upon mode switch\n" + ); +} + +int main(int argc, char *const *argv) +{ + struct sigaction sa __attribute__((unused)); + int c, ret, sig, cpu = 0; + char task_name[32]; + cpu_set_t cpus; + sigset_t mask; + + while ((c = getopt(argc, argv, "g:hp:l:T:qH:B:sD:t:fc:P:b")) != EOF) + switch (c) { + case 'g': + do_gnuplot = strdup(optarg); + break; + case 'h': + do_histogram = 1; + break; + case 's': + do_stats = 1; + break; + case 'H': + histogram_size = atoi(optarg); + break; + case 'B': + bucketsize = atoi(optarg); + break; + case 'p': + period_ns = atoi(optarg) * 1000LL; + if (period_ns > ONE_BILLION) { + fprintf(stderr, "altency: invalid period (> 1s).\n"); + exit(2); + } + break; + case 'l': + data_lines = atoi(optarg); + break; + case 'T': + test_duration = atoi(optarg); + alarm(test_duration + WARMUP_TIME); + break; + case 'q': + quiet = 1; + break; + case 't': + test_mode = atoi(optarg); + break; + case 'f': + freeze_max = 1; + break; + case 'c': + cpu = atoi(optarg); + if (cpu < 0 || cpu >= CPU_SETSIZE) { + fprintf(stderr, "altency: invalid CPU #%d\n", cpu); + return 1; + } + break; + case 'P': + priority = atoi(optarg); + break; + case 'b': + stop_upon_switch = 1; + break; + default: + xenomai_usage(); + exit(2); + } + + if (!test_duration && quiet) { + fprintf(stderr, + "altency: -q only works if -T has been given.\n"); + quiet = 0; + } + + if ((test_mode < USER_TASK) || (test_mode > TIMER_HANDLER)) { + fprintf(stderr, "altency: invalid test mode.\n"); + exit(2); + } + + time(&test_start); + + histogram_avg = calloc(histogram_size, sizeof(int32_t)); + histogram_max = calloc(histogram_size, sizeof(int32_t)); + histogram_min = calloc(histogram_size, sizeof(int32_t)); + + if (!(histogram_avg && histogram_max && histogram_min)) + cleanup(); + + if (period_ns == 0) + period_ns = CONFIG_XENO_DEFAULT_PERIOD; /* ns */ + + if (priority <= T_LOPRIO) + priority = T_LOPRIO + 1; + else if (priority > T_HIPRIO) + priority = T_HIPRIO; + + sigemptyset(&mask); + sigaddset(&mask, SIGINT); + sigaddset(&mask, SIGTERM); + sigaddset(&mask, SIGHUP); + sigaddset(&mask, SIGALRM); + pthread_sigmask(SIG_BLOCK, &mask, NULL); + +#ifdef CONFIG_XENO_COBALT + sigemptyset(&sa.sa_mask); + sa.sa_sigaction = sigdebug; + sa.sa_flags = SA_SIGINFO; + sigaction(SIGDEBUG, &sa, NULL); +#endif + + if (freeze_max) { + /* If something goes wrong, we want to freeze the current + trace path to help debugging. */ + signal(SIGSEGV, faulthand); + signal(SIGBUS, faulthand); + } + + setlinebuf(stdout); + + printf("== Sampling period: %Ld us\n" + "== Test mode: %s\n" + "== All results in microseconds\n", + period_ns / 1000, test_mode_names[test_mode]); + + if (test_mode != USER_TASK) { + devfd = open("/dev/rtdm/timerbench", O_RDWR); + if (devfd < 0) { + fprintf(stderr, + "altency: failed to open timerbench device, %m\n" + "(modprobe xeno_timerbench?)\n"); + return 0; + } + } + + snprintf(task_name, sizeof(task_name), "alt-display-%d", getpid()); + ret = rt_task_create(&display_task, task_name, 0, 0, 0); + if (ret) { + fprintf(stderr, + "altency: failed to create display task, code %d\n", + ret); + return 0; + } + + ret = rt_task_start(&display_task, &display, NULL); + if (ret) { + fprintf(stderr, + "altency: failed to start display task, code %d\n", + ret); + return 0; + } + + if (test_mode == USER_TASK) { + snprintf(task_name, sizeof(task_name), "alt-sampling-%d", getpid()); + ret = rt_task_create(&latency_task, task_name, 0, priority, + T_WARNSW); + if (ret) { + fprintf(stderr, + "altency: failed to create sampling task, code %d\n", + ret); + return 0; + } + + CPU_ZERO(&cpus); + CPU_SET(cpu, &cpus); + ret = rt_task_set_affinity(&latency_task, &cpus); + if (ret) { + fprintf(stderr, + "altency: failed to set CPU affinity, code %d\n", + ret); + return 0; + } + + ret = rt_task_start(&latency_task, latency, NULL); + if (ret) { + fprintf(stderr, + "altency: failed to start sampling task, code %d\n", + ret); + return 0; + } + } + + __STD(sigwait(&mask, &sig)); + finished = 1; + + cleanup(); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am new file mode 100644 index 0000000..ae2edd7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am @@ -0,0 +1,22 @@ +demodir = @XENO_DEMO_DIR@ + +CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC) + +demo_PROGRAMS = cross-link + +cppflags = \ + $(XENO_USER_CFLAGS) \ + -I$(top_srcdir)/include + +ldadd = \ + @XENO_AUTOINIT_LDFLAGS@ \ + $(XENO_POSIX_WRAPPERS) \ + ../../../lib/alchemy/libalchemy@CORE@.la \ + ../../../lib/copperplate/libcopperplate@CORE@.la \ + @XENO_CORE_LDADD@ \ + @XENO_USER_LDADD@ \ + -lrt -lpthread -lm + +cross_link_SOURCES = cross-link.c +cross_link_CPPFLAGS = $(cppflags) +cross_link_LDADD = $(ldadd) diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c new file mode 100644 index 0000000..9b1fb34 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c @@ -0,0 +1,328 @@ +/* + * cross-link.c + * + * Userspace test program (Xenomai alchemy skin) for RTDM-based UART drivers + * Copyright 2005 by Joerg Langenberg <joergel75@gmx.net> + * + * Updates by Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <stdio.h> +#include <signal.h> +#include <unistd.h> +#include <sys/mman.h> +#include <alchemy/task.h> +#include <alchemy/timer.h> +#include <rtdm/serial.h> + +#define MAIN_PREFIX "main : " +#define WTASK_PREFIX "write_task: " +#define RTASK_PREFIX "read_task: " + +#define WRITE_FILE "/dev/rtdm/rtser0" +#define READ_FILE "/dev/rtdm/rtser1" + +int read_fd = -1; +int write_fd = -1; + +#define STATE_FILE_OPENED 1 +#define STATE_TASK_CREATED 2 + +unsigned int read_state = 0; +unsigned int write_state = 0; + +/* --s-ms-us-ns */ +RTIME write_task_period_ns = 100000000llu; +RT_TASK write_task; +RT_TASK read_task; + +static const struct rtser_config read_config = { + .config_mask = 0xFFFF, + .baud_rate = 115200, + .parity = RTSER_DEF_PARITY, + .data_bits = RTSER_DEF_BITS, + .stop_bits = RTSER_DEF_STOPB, + .handshake = RTSER_DEF_HAND, + .fifo_depth = RTSER_DEF_FIFO_DEPTH, + .rx_timeout = RTSER_DEF_TIMEOUT, + .tx_timeout = RTSER_DEF_TIMEOUT, + .event_timeout = 1000000000, /* 1 s */ + .timestamp_history = RTSER_RX_TIMESTAMP_HISTORY, + .event_mask = RTSER_EVENT_RXPEND, +}; + +static const struct rtser_config write_config = { + .config_mask = RTSER_SET_BAUD | RTSER_SET_TIMESTAMP_HISTORY, + .baud_rate = 115200, + .timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY, + /* the rest implicitly remains default */ +}; + +static int close_file( int fd, char *name) +{ + int err, i=0; + + do { + i++; + err = close(fd); + switch (err) { + case -EAGAIN: + printf(MAIN_PREFIX "%s -> EAGAIN (%d times)\n", + name, i); + rt_task_sleep(50000); /* wait 50us */ + break; + case 0: + printf(MAIN_PREFIX "%s -> closed\n", name); + break; + default: + printf(MAIN_PREFIX "%s -> %s\n", name, + strerror(errno)); + break; + } + } while (err == -EAGAIN && i < 10); + + return err; +} + +static void cleanup_all(void) +{ + if (read_state & STATE_FILE_OPENED) { + close_file(read_fd, READ_FILE" (read)"); + read_state &= ~STATE_FILE_OPENED; + } + + if (write_state & STATE_FILE_OPENED) { + close_file(write_fd, WRITE_FILE " (write)"); + write_state &= ~STATE_FILE_OPENED; + } + + if (write_state & STATE_TASK_CREATED) { + printf(MAIN_PREFIX "delete write_task\n"); + rt_task_delete(&write_task); + write_state &= ~STATE_TASK_CREATED; + } + + if (read_state & STATE_TASK_CREATED) { + printf(MAIN_PREFIX "delete read_task\n"); + rt_task_delete(&read_task); + read_state &= ~STATE_TASK_CREATED; + } +} + +static void catch_signal(int sig) +{ + cleanup_all(); + printf(MAIN_PREFIX "exit\n"); + return; +} + +static void write_task_proc(void *arg) +{ + int err; + RTIME write_time; + ssize_t sz = sizeof(RTIME); + int written = 0; + + err = rt_task_set_periodic(NULL, TM_NOW, + rt_timer_ns2ticks(write_task_period_ns)); + if (err) { + printf(WTASK_PREFIX "error on set periodic, %s\n", + strerror(-err)); + goto exit_write_task; + } + + while (1) { + err = rt_task_wait_period(NULL); + if (err) { + printf(WTASK_PREFIX + "error on rt_task_wait_period, %s\n", + strerror(-err)); + break; + } + + write_time = rt_timer_read(); + + written = write(write_fd, &write_time, sz); + if (written < 0 ) { + printf(WTASK_PREFIX "error on write, %s\n", + strerror(errno)); + break; + } else if (written != sz) { + printf(WTASK_PREFIX "only %d / %zd byte transmitted\n", + written, sz); + break; + } + } + + exit_write_task: + if ((write_state & STATE_FILE_OPENED) && + close_file(write_fd, WRITE_FILE " (write)") == 0) + write_state &= ~STATE_FILE_OPENED; + + printf(WTASK_PREFIX "exit\n"); +} + +static void read_task_proc(void *arg) +{ + int err; + int nr = 0; + RTIME read_time = 0; + RTIME write_time = 0; + RTIME irq_time = 0; + ssize_t sz = sizeof(RTIME); + int rd = 0; + struct rtser_event rx_event; + + printf(" Nr | write->irq | irq->read | write->read |\n"); + printf("-----------------------------------------------------------\n"); + + /* + * We are in secondary mode now due to printf, the next + * blocking Xenomai or driver call will switch us back + * (here: RTSER_RTIOC_WAIT_EVENT). + */ + + while (1) { + /* waiting for event */ + err = ioctl(read_fd, RTSER_RTIOC_WAIT_EVENT, &rx_event); + if (err) { + printf(RTASK_PREFIX + "error on RTSER_RTIOC_WAIT_EVENT, %s\n", + strerror(errno)); + if (err == -ETIMEDOUT) + continue; + break; + } + + irq_time = rx_event.rxpend_timestamp; + rd = read(read_fd, &write_time, sz); + if (rd == sz) { + read_time = rt_timer_read(); + printf("%3d |%16llu |%16llu |%16llu\n", nr, + irq_time - write_time, + read_time - irq_time, + read_time - write_time); + nr++; + } else if (rd < 0 ) { + printf(RTASK_PREFIX "error on read, code %s\n", + strerror(errno)); + break; + } else { + printf(RTASK_PREFIX "only %d / %zd byte received \n", + rd, sz); + break; + } + } + + if ((read_state & STATE_FILE_OPENED) && + close_file(read_fd, READ_FILE " (read)") == 0) + read_state &= ~STATE_FILE_OPENED; + + printf(RTASK_PREFIX "exit\n"); +} + +int main(int argc, char* argv[]) +{ + int err = 0; + + signal(SIGTERM, catch_signal); + signal(SIGINT, catch_signal); + + /* open rtser0 */ + write_fd = open( WRITE_FILE, 0); + if (write_fd < 0) { + printf(MAIN_PREFIX "can't open %s (write), %s\n", WRITE_FILE, + strerror(errno)); + goto error; + } + write_state |= STATE_FILE_OPENED; + printf(MAIN_PREFIX "write-file opened\n"); + + /* writing write-config */ + err = ioctl(write_fd, RTSER_RTIOC_SET_CONFIG, &write_config); + if (err) { + printf(MAIN_PREFIX "error while RTSER_RTIOC_SET_CONFIG, %s\n", + strerror(errno)); + goto error; + } + printf(MAIN_PREFIX "write-config written\n"); + + /* open rtser1 */ + read_fd = open( READ_FILE, 0 ); + if (read_fd < 0) { + printf(MAIN_PREFIX "can't open %s (read), %s\n", READ_FILE, + strerror(errno)); + goto error; + } + read_state |= STATE_FILE_OPENED; + printf(MAIN_PREFIX "read-file opened\n"); + + /* writing read-config */ + err = ioctl(read_fd, RTSER_RTIOC_SET_CONFIG, &read_config); + if (err) { + printf(MAIN_PREFIX "error while ioctl, %s\n", + strerror(errno)); + goto error; + } + printf(MAIN_PREFIX "read-config written\n"); + + /* create write_task */ + err = rt_task_create(&write_task, "write_task", 0, 50, 0); + if (err) { + printf(MAIN_PREFIX "failed to create write_task, %s\n", + strerror(-err)); + goto error; + } + write_state |= STATE_TASK_CREATED; + printf(MAIN_PREFIX "write-task created\n"); + + /* create read_task */ + err = rt_task_create(&read_task, "read_task", 0, 51, 0); + if (err) { + printf(MAIN_PREFIX "failed to create read_task, %s\n", + strerror(-err)); + goto error; + } + read_state |= STATE_TASK_CREATED; + printf(MAIN_PREFIX "read-task created\n"); + + /* start write_task */ + printf(MAIN_PREFIX "starting write-task\n"); + err = rt_task_start(&write_task, &write_task_proc, NULL); + if (err) { + printf(MAIN_PREFIX "failed to start write_task, %s\n", + strerror(-err)); + goto error; + } + + /* start read_task */ + printf(MAIN_PREFIX "starting read-task\n"); + err = rt_task_start(&read_task,&read_task_proc,NULL); + if (err) { + printf(MAIN_PREFIX "failed to start read_task, %s\n", + strerror(-err)); + goto error; + } + + for (;;) + pause(); + + return 0; + + error: + cleanup_all(); + return err; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/Makefile.am b/kernel/xenomai-v3.2.4/demo/posix/Makefile.am new file mode 100644 index 0000000..0dd66a5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/Makefile.am @@ -0,0 +1,8 @@ + +SUBDIRS = cyclictest + +if XENO_COBALT +SUBDIRS += cobalt +endif + +DIST_SUBDIRS = cyclictest cobalt diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am new file mode 100644 index 0000000..2a22967 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am @@ -0,0 +1,76 @@ +demodir = @XENO_DEMO_DIR@ + +CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC) + +demo_PROGRAMS = \ + gpiopwm \ + bufp-label \ + bufp-readwrite \ + can_rtt \ + eth_p_all \ + iddp-label \ + iddp-sendrecv \ + xddp-echo \ + xddp-label \ + xddp-stream + +cppflags = \ + $(XENO_USER_CFLAGS) \ + -I$(top_srcdir)/include + +ldflags = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS) + +ldadd = \ + @XENO_CORE_LDADD@ \ + @XENO_USER_LDADD@ \ + -lpthread -lrt + +gpiopwm_SOURCES = gpiopwm.c +gpiopwm_CPPFLAGS = $(cppflags) -I$(top_srcdir)/include/rtdm/uapi +gpiopwm_LDFLAGS = $(ldflags) +gpiopwm_LDADD = $(ldadd) + +bufp_label_SOURCES = bufp-label.c +bufp_label_CPPFLAGS = $(cppflags) +bufp_label_LDFLAGS = $(ldflags) +bufp_label_LDADD = $(ldadd) + +bufp_readwrite_SOURCES = bufp-readwrite.c +bufp_readwrite_CPPFLAGS = $(cppflags) +bufp_readwrite_LDFLAGS = $(ldflags) +bufp_readwrite_LDADD = $(ldadd) + +can_rtt_SOURCES = can-rtt.c +can_rtt_CPPFLAGS = $(cppflags) +can_rtt_LDFLAGS = $(ldflags) +can_rtt_LDADD = $(ldadd) + +eth_p_all_SOURCES = eth_p_all.c +eth_p_all_CPPFLAGS = $(cppflags) +eth_p_all_LDFLAGS = $(ldflags) +eth_p_all_LDADD = $(ldadd) + +iddp_label_SOURCES = iddp-label.c +iddp_label_CPPFLAGS = $(cppflags) +iddp_label_LDFLAGS = $(ldflags) +iddp_label_LDADD = $(ldadd) + +iddp_sendrecv_SOURCES = iddp-sendrecv.c +iddp_sendrecv_CPPFLAGS = $(cppflags) +iddp_sendrecv_LDFLAGS = $(ldflags) +iddp_sendrecv_LDADD = $(ldadd) + +xddp_echo_SOURCES = xddp-echo.c +xddp_echo_CPPFLAGS = $(cppflags) +xddp_echo_LDFLAGS = $(ldflags) +xddp_echo_LDADD = $(ldadd) + +xddp_label_SOURCES = xddp-label.c +xddp_label_CPPFLAGS = $(cppflags) +xddp_label_LDFLAGS = $(ldflags) +xddp_label_LDADD = $(ldadd) + +xddp_stream_SOURCES = xddp-stream.c +xddp_stream_CPPFLAGS = $(cppflags) +xddp_stream_LDFLAGS = $(ldflags) +xddp_stream_LDADD = $(ldadd) diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c new file mode 100644 index 0000000..1141c89 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * BUFP-based client/server demo, using the read(2)/write(2) + * system calls to exchange data over a socket. + * + * In this example, two sockets are created. A server thread (reader) + * is bound to a real-time port and receives a stream of bytes sent to + * this port from a client thread (writer). + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <pthread.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t svtid, cltid; + +#define BUFP_PORT_LABEL "bufp-demo" + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *server(void *arg) +{ + struct rtipc_port_label plabel; + struct sockaddr_ipc saddr; + char buf[128]; + size_t bufsz; + int ret, s; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP); + if (s < 0) + fail("socket"); + + /* + * Set a 16k buffer for the server endpoint. This + * configuration must be done prior to binding the socket to a + * port. + */ + bufsz = 16384; /* bytes */ + ret = setsockopt(s, SOL_BUFP, BUFP_BUFSZ, + &bufsz, sizeof(bufsz)); + if (ret) + fail("setsockopt"); + + /* + * Set a port label. This name will be registered when + * binding, in addition to the port number (if given). + */ + strcpy(plabel.label, BUFP_PORT_LABEL); + ret = setsockopt(s, SOL_BUFP, BUFP_LABEL, + &plabel, sizeof(plabel)); + if (ret) + fail("setsockopt"); + + /* + * Bind the socket to the port. Assign that port a label, so + * that peers may use a descriptive information to locate + * it. Labeled ports will appear in the + * /proc/xenomai/registry/rtipc/bufp directory once the socket + * is bound. + * + * saddr.sipc_port specifies the port number to use. If -1 is + * passed, the BUFP driver will auto-select an idle port. + */ + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = -1; + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + ret = read(s, buf, sizeof(buf)); + if (ret < 0) { + close(s); + fail("read"); + } + printf("%s: received %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, buf); + } + + return NULL; +} + +static void *client(void *arg) +{ + struct rtipc_port_label plabel; + struct sockaddr_ipc svsaddr; + int ret, s, n = 0, len; + struct timespec ts; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP); + if (s < 0) + fail("socket"); + + /* + * Set the port label. This name will be used to find the peer + * when connecting, instead of the port number. The label must + * be set _after_ the socket is bound to the port, so that + * BUFP does not try to register this label for the client + * port as well (like the server thread did). + */ + strcpy(plabel.label, BUFP_PORT_LABEL); + ret = setsockopt(s, SOL_BUFP, BUFP_LABEL, + &plabel, sizeof(plabel)); + if (ret) + fail("setsockopt"); + + memset(&svsaddr, 0, sizeof(svsaddr)); + svsaddr.sipc_family = AF_RTIPC; + svsaddr.sipc_port = -1; /* Tell BUFP to search by label. */ + ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr)); + if (ret) + fail("connect"); + + for (;;) { + len = strlen(msg[n]); + ret = write(s, msg[n], len); + if (ret < 0) { + close(s); + fail("write"); + } + printf("%s: sent %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, msg[n]); + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param svparam = {.sched_priority = 71 }; + struct sched_param clparam = {.sched_priority = 70 }; + pthread_attr_t svattr, clattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&svattr); + pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&svattr, SCHED_FIFO); + pthread_attr_setschedparam(&svattr, &svparam); + + errno = pthread_create(&svtid, &svattr, &server, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(&clattr); + pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&clattr, SCHED_FIFO); + pthread_attr_setschedparam(&clattr, &clparam); + + errno = pthread_create(&cltid, &clattr, &client, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(svtid); + pthread_cancel(cltid); + pthread_join(svtid, NULL); + pthread_join(cltid, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c new file mode 100644 index 0000000..34d761a --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * BUFP-based client/server demo, using the read(2)/write(2) + * system calls to exchange data over a socket. + * + * In this example, two sockets are created. A server thread (reader) + * is bound to a real-time port and receives a stream of bytes sent to + * this port from a client thread (writer). + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <pthread.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t svtid, cltid; + +#define BUFP_SVPORT 12 + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *server(void *arg) +{ + struct sockaddr_ipc saddr; + char buf[128]; + size_t bufsz; + int ret, s; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP); + if (s < 0) + fail("socket"); + + /* + * Set a 16k buffer for the server endpoint. This + * configuration must be done prior to binding the socket to a + * port. + */ + bufsz = 16384; /* bytes */ + ret = setsockopt(s, SOL_BUFP, BUFP_BUFSZ, + &bufsz, sizeof(bufsz)); + if (ret) + fail("setsockopt"); + + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = BUFP_SVPORT; + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + ret = read(s, buf, sizeof(buf)); + if (ret < 0) { + close(s); + fail("read"); + } + printf("%s: received %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, buf); + } + + return NULL; +} + +static void *client(void *arg) +{ + struct sockaddr_ipc svsaddr; + int ret, s, n = 0, len; + struct timespec ts; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP); + if (s < 0) + fail("socket"); + + memset(&svsaddr, 0, sizeof(svsaddr)); + svsaddr.sipc_family = AF_RTIPC; + svsaddr.sipc_port = BUFP_SVPORT; + ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr)); + if (ret) + fail("connect"); + + for (;;) { + len = strlen(msg[n]); + ret = write(s, msg[n], len); + if (ret < 0) { + close(s); + fail("write"); + } + printf("%s: sent %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, msg[n]); + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param svparam = {.sched_priority = 71 }; + struct sched_param clparam = {.sched_priority = 70 }; + pthread_attr_t svattr, clattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&svattr); + pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&svattr, SCHED_FIFO); + pthread_attr_setschedparam(&svattr, &svparam); + + errno = pthread_create(&svtid, &svattr, &server, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(&clattr); + pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&clattr, SCHED_FIFO); + pthread_attr_setschedparam(&clattr, &clparam); + + errno = pthread_create(&cltid, &clattr, &client, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(svtid); + pthread_cancel(cltid); + pthread_join(svtid, NULL); + pthread_join(cltid, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c new file mode 100644 index 0000000..dd212d8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c @@ -0,0 +1,408 @@ +/* + * Round-Trip-Time Test - sends and receives messages and measures the + * time in between. + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Based on RTnet's examples/xenomai/posix/rtt-sender.c. + * + * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * 2002 Marc Kleine-Budde <kleine-budde@gmx.de> + * 2006 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * The program sends out CAN messages periodically and copies the current + * time-stamp to the payload. At reception, that time-stamp is compared + * with the current time to determine the round-trip time. The jitter + * values are printer out regularly. Concurrent tests can be carried out + * by starting the program with different message identifiers. It is also + * possible to use this program on a remote system as simple repeater to + * loopback messages. + */ + +#include <errno.h> +#include <mqueue.h> +#include <signal.h> +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <limits.h> +#include <getopt.h> +#include <memory.h> +#include <netinet/in.h> +#include <net/if.h> +#include <sys/ioctl.h> +#include <rtdm/can.h> +#include <xenomai/init.h> + +#define NSEC_PER_SEC 1000000000 + +static unsigned int cycle = 10000; /* 10 ms */ +static canid_t can_id = 0x1; + +static pthread_t txthread, rxthread; +static int txsock, rxsock; +static mqd_t mq; +static int txcount, rxcount; +static int overruns; +static int repeater; + +struct rtt_stat { + long long rtt; + long long rtt_min; + long long rtt_max; + long long rtt_sum; + long long rtt_sum_last; + int counts_per_sec; +}; + +void application_usage(void) +{ + fprintf(stderr, "usage: %s [options] <tx-can-interface> <rx-can-interface>:\n", + get_program_name()); + fprintf(stderr, + " -r, --repeater Repeater, send back received messages\n" + " -i, --id=ID CAN Identifier (default = 0x1)\n" + " -c, --cycle Cycle time in us (default = 10000us)\n"); +} + +static void *transmitter(void *arg) +{ + struct sched_param param = { .sched_priority = 80 }; + struct timespec next_period; + struct timespec time; + struct can_frame frame; + long long *rtt_time = (long long *)&frame.data, t; + + /* Pre-fill CAN frame */ + frame.can_id = can_id; + frame.can_dlc = sizeof(*rtt_time); + + pthread_setname_np(pthread_self(), "rtcan_rtt_transmitter"); + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + + clock_gettime(CLOCK_MONOTONIC, &next_period); + + while(1) { + next_period.tv_nsec += cycle * 1000; + while (next_period.tv_nsec >= NSEC_PER_SEC) { + next_period.tv_nsec -= NSEC_PER_SEC; + next_period.tv_sec++; + } + + clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &next_period, NULL); + + if (rxcount != txcount) { + overruns++; + continue; + } + + clock_gettime(CLOCK_MONOTONIC, &time); + t = (long long)time.tv_sec * NSEC_PER_SEC + time.tv_nsec; + memcpy(rtt_time, &t, sizeof(t)); + + /* Transmit the message containing the local time */ + if (send(txsock, (void *)&frame, sizeof(struct can_frame), 0) < 0) { + if (errno == EBADF) + printf("terminating transmitter thread\n"); + else + perror("send failed"); + return NULL; + } + txcount++; + } +} + + +static void *receiver(void *arg) +{ + struct sched_param param = { .sched_priority = 82 }; + struct timespec time; + struct can_frame frame; + long long *rtt_time = (long long *)frame.data, t; + struct rtt_stat rtt_stat = {0, 1000000000000000000LL, -1000000000000000000LL, + 0, 0, 0}; + + pthread_setname_np(pthread_self(), "rtcan_rtt_receiver"); + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + + rtt_stat.counts_per_sec = 1000000 / cycle; + + while (1) { + if (recv(rxsock, (void *)&frame, sizeof(struct can_frame), 0) < 0) { + if (errno == EBADF) + printf("terminating receiver thread\n"); + else + perror("recv failed"); + return NULL; + } + if (repeater) { + /* Transmit the message back as is */ + if (send(txsock, (void *)&frame, sizeof(struct can_frame), 0) < 0) { + if (errno == EBADF) + printf("terminating transmitter thread\n"); + else + perror("send failed"); + return NULL; + } + txcount++; + } else { + memcpy(&t, rtt_time, sizeof(t)); + clock_gettime(CLOCK_MONOTONIC, &time); + if (rxcount > 0) { + rtt_stat.rtt = ((long long)time.tv_sec * 1000000000LL + + time.tv_nsec - t); + rtt_stat.rtt_sum += rtt_stat.rtt; + if (rtt_stat.rtt < rtt_stat.rtt_min) + rtt_stat.rtt_min = rtt_stat.rtt; + if (rtt_stat.rtt > rtt_stat.rtt_max) + rtt_stat.rtt_max = rtt_stat.rtt; + } + } + rxcount++; + + if ((rxcount % rtt_stat.counts_per_sec) == 0) { + mq_send(mq, (char *)&rtt_stat, sizeof(rtt_stat), 0); + rtt_stat.rtt_sum_last = rtt_stat.rtt_sum; + } + } +} + +static void catch_signal(int sig) +{ + mq_close(mq); + close(rxsock); + close(txsock); +} + + +int main(int argc, char *argv[]) +{ + struct sched_param param = { .sched_priority = 1 }; + pthread_attr_t thattr; + struct mq_attr mqattr; + struct sockaddr_can rxaddr, txaddr; + struct can_filter rxfilter[1]; + struct rtt_stat rtt_stat; + char mqname[32]; + char *txdev, *rxdev; + struct can_ifreq ifr; + int ret, opt; + + struct option long_options[] = { + { "id", required_argument, 0, 'i'}, + { "cycle", required_argument, 0, 'c'}, + { "repeater", no_argument, 0, 'r'}, + { 0, 0, 0, 0}, + }; + + while ((opt = getopt_long(argc, argv, "ri:c:", + long_options, NULL)) != -1) { + switch (opt) { + case 'c': + cycle = atoi(optarg); + break; + + case 'i': + can_id = strtoul(optarg, NULL, 0); + break; + + case 'r': + repeater = 1; + break; + + default: + fprintf(stderr, "Unknown option %c\n", opt); + exit(-1); + } + } + + printf("%d %d\n", optind, argc); + if (optind + 2 != argc) { + xenomai_usage(); + exit(0); + } + + txdev = argv[optind]; + rxdev = argv[optind + 1]; + + /* Create and configure RX socket */ + if ((rxsock = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) { + perror("RX socket failed"); + return -1; + } + + namecpy(ifr.ifr_name, rxdev); + printf("RX rxsock=%d, ifr_name=%s\n", rxsock, ifr.ifr_name); + + if (ioctl(rxsock, SIOCGIFINDEX, &ifr) < 0) { + perror("RX ioctl SIOCGIFINDEX failed"); + goto failure1; + } + + /* We only want to receive our own messages */ + rxfilter[0].can_id = can_id; + rxfilter[0].can_mask = 0x3ff; + if (setsockopt(rxsock, SOL_CAN_RAW, CAN_RAW_FILTER, + &rxfilter, sizeof(struct can_filter)) < 0) { + perror("RX setsockopt CAN_RAW_FILTER failed"); + goto failure1; + } + memset(&rxaddr, 0, sizeof(rxaddr)); + rxaddr.can_ifindex = ifr.ifr_ifindex; + rxaddr.can_family = AF_CAN; + if (bind(rxsock, (struct sockaddr *)&rxaddr, sizeof(rxaddr)) < 0) { + perror("RX bind failed\n"); + goto failure1; + } + + /* Create and configure TX socket */ + + if (strcmp(rxdev, txdev) == 0) { + txsock = rxsock; + } else { + if ((txsock = socket(PF_CAN, SOCK_RAW, 0)) < 0) { + perror("TX socket failed"); + goto failure1; + } + + namecpy(ifr.ifr_name, txdev); + printf("TX txsock=%d, ifr_name=%s\n", txsock, ifr.ifr_name); + + if (ioctl(txsock, SIOCGIFINDEX, &ifr) < 0) { + perror("TX ioctl SIOCGIFINDEX failed"); + goto failure2; + } + + /* Suppress definiton of a default receive filter list */ + if (setsockopt(txsock, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0) < 0) { + perror("TX setsockopt CAN_RAW_FILTER failed"); + goto failure2; + } + + memset(&txaddr, 0, sizeof(txaddr)); + txaddr.can_ifindex = ifr.ifr_ifindex; + txaddr.can_family = AF_CAN; + + if (bind(txsock, (struct sockaddr *)&txaddr, sizeof(txaddr)) < 0) { + perror("TX bind failed\n"); + goto failure2; + } + } + + signal(SIGTERM, catch_signal); + signal(SIGINT, catch_signal); + signal(SIGHUP, catch_signal); + + printf("Round-Trip-Time test %s -> %s with CAN ID 0x%x\n", + argv[optind], argv[optind + 1], can_id); + printf("Cycle time: %d us\n", cycle); + printf("All RTT timing figures are in us.\n"); + + /* Create statistics message queue */ + snprintf(mqname, sizeof(mqname), "/rtcan_rtt-%d", getpid()); + mqattr.mq_flags = 0; + mqattr.mq_maxmsg = 100; + mqattr.mq_msgsize = sizeof(struct rtt_stat); + mq = mq_open(mqname, O_RDWR | O_CREAT | O_EXCL, 0600, &mqattr); + if (mq == (mqd_t)-1) { + perror("opening mqueue failed"); + goto failure2; + } + + /* Create receiver RT-thread */ + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + ret = pthread_create(&rxthread, &thattr, &receiver, NULL); + if (ret) { + fprintf(stderr, "%s: pthread_create(receiver) failed\n", + strerror(-ret)); + goto failure3; + } + + if (!repeater) { + /* Create transitter RT-thread */ + ret = pthread_create(&txthread, &thattr, &transmitter, NULL); + if (ret) { + fprintf(stderr, "%s: pthread_create(transmitter) failed\n", + strerror(-ret)); + goto failure4; + } + } + + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + + if (repeater) + printf("Messages\n"); + else + printf("Messages RTTlast RTT_avg RTT_min RTT_max Overruns\n"); + + while (1) { + long long rtt_avg; + + ret = mq_receive(mq, (char *)&rtt_stat, sizeof(rtt_stat), NULL); + if (ret != sizeof(rtt_stat)) { + if (ret < 0) { + if (errno == EBADF) + printf("terminating mq_receive\n"); + else + perror("mq_receive failed"); + } else + fprintf(stderr, + "mq_receive returned invalid length %d\n", ret); + break; + } + + if (repeater) { + printf("%8d\n", rxcount); + } else { + rtt_avg = ((rtt_stat.rtt_sum - rtt_stat.rtt_sum_last) / + rtt_stat.counts_per_sec); + printf("%8d %7ld %7ld %7ld %7ld %8d\n", rxcount, + (long)(rtt_stat.rtt / 1000), (long)(rtt_avg / 1000), + (long)(rtt_stat.rtt_min / 1000), + (long)(rtt_stat.rtt_max / 1000), + overruns); + } + } + + /* This call also leaves primary mode, required for socket cleanup. */ + printf("shutting down\n"); + + /* Important: First close the sockets! */ + close(rxsock); + close(txsock); + pthread_join(txthread, NULL); + pthread_cancel(rxthread); + pthread_join(rxthread, NULL); + + return 0; + + failure4: + pthread_cancel(rxthread); + pthread_join(rxthread, NULL); + failure3: + mq_close(mq); + failure2: + close(txsock); + failure1: + close(rxsock); + + return 1; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c new file mode 100644 index 0000000..c4cf0d6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c @@ -0,0 +1,108 @@ +/*** + * + * demo/posix/cobalt/rtnet-eth_p_all.c + * + * ETH_P_ALL receiver - listens for all incoming packets and dumps them + * + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de> + * + * RTnet - real-time networking example + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <stdio.h> +#include <stdlib.h> +#include <errno.h> +#include <string.h> + +#include <signal.h> +#include <pthread.h> +#include <unistd.h> +#include <sys/mman.h> +#include <sys/socket.h> +#include <sys/ioctl.h> +#include <netpacket/packet.h> +#include <net/ethernet.h> +#include <net/if.h> +#include <arpa/inet.h> +#include <netinet/ether.h> + +char buffer[10*1024]; +int sock; + + +static void catch_signal(int sig) +{ + close(sock); +} + + +int main(int argc, char *argv[]) +{ + struct sched_param param = { .sched_priority = 1 }; + ssize_t len; + struct sockaddr_ll addr; + struct ether_header *eth = (struct ether_header *)buffer; + + + signal(SIGTERM, catch_signal); + signal(SIGINT, catch_signal); + signal(SIGHUP, catch_signal); + mlockall(MCL_CURRENT|MCL_FUTURE); + + if ((sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) < 0) { + perror("socket cannot be created"); + return EXIT_FAILURE; + } + + if (argc > 1) { + struct ifreq ifr; + + snprintf(ifr.ifr_name, IFNAMSIZ, "%s", argv[1]); + if (ioctl(sock, SIOCGIFINDEX, &ifr) < 0) { + perror("cannot get interface index"); + close(sock); + return EXIT_FAILURE; + } + + addr.sll_family = AF_PACKET; + addr.sll_protocol = ETH_P_ALL; + addr.sll_ifindex = ifr.ifr_ifindex; + + if (bind(sock, (struct sockaddr *)&addr, sizeof(addr)) < 0) { + perror("cannot bind to local ip/port"); + close(sock); + return EXIT_FAILURE; + } + } + + pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); + + while (1) { + len = recv(sock, buffer, sizeof(buffer), 0); + if (len < 0) + break; + + printf("from: %s type: %04x length=%zd\n", + ether_ntoa((struct ether_addr *)eth->ether_shost), + ntohs(eth->ether_type), len); + } + + printf("shutting down\n"); + + return EXIT_SUCCESS; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c new file mode 100644 index 0000000..b195d7e --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c @@ -0,0 +1,504 @@ +#include <xenomai/init.h> +#include <semaphore.h> +#include <pthread.h> +#include <signal.h> +#include <rtdm/gpiopwm.h> +#include <stdlib.h> +#include <getopt.h> +#include <errno.h> +#include <error.h> +#include <stdio.h> +#include <time.h> + +#include <unistd.h> +#include <stdlib.h> +#include <string.h> +#include <netdb.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <arpa/inet.h> +#include <linux/netdevice.h> + +#define MIN_DUTY_CYCLE (0) +#define MAX_DUTY_CYCLE (100) + +typedef void *(*gpiopwm_control_thread)(void *cookie); +#define DEVICE_NAME "/dev/rtdm/gpiopwm" +char *device_name; +int dev; + +static sem_t synch; +static sem_t setup; +static int stop; +static int step = 1; +static int port = 66666; + +#define MAX_IP_INTERFACES (9) +static char *ip_str[MAX_IP_INTERFACES + 1]; +static int last_ip; + + +#define GPIO_PWM_SERVO_CONFIG \ +{ \ + .duty_cycle = 50, \ + .range_min = 950, \ + .range_max = 2050, \ + .period = 20000000, \ + .gpio = 1, \ +} + +static struct gpiopwm config = GPIO_PWM_SERVO_CONFIG; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void sem_sync(sem_t *sem) +{ + int ret; + + for (;;) { + ret = sem_wait(sem); + if (ret == 0) + return; + if (errno != EINTR) + fail("sem_wait"); + } +} + +static inline void clear_screen(void) +{ + const char* cmd = "\e[1;1H\e[2J"; + int ret; + + ret = write(2, cmd, strlen(cmd)); + if (!ret) + error(1, ret, "clear screen error"); +} + +static inline void print_config(char *str) +{ + int i; + + printf("Config: %s\n", str); + for (i = 0; i < last_ip ; i++) + printf("%s", ip_str[i]); + printf(" device : %s\n", device_name); + printf(" range : [%d, %d]\n", config.range_min, config.range_max); + printf(" period : %d nsec\n", config.period); + printf(" gpio pin : %d\n", config.gpio); + printf(" duty cycle : %d\n", config.duty_cycle); +} + +static inline void input_message(void) +{ + print_config(""); + printf("\n GPIO PWM Control\n"); + printf( " Enter duty_cycle [0-100] : "); +} + +static void get_ip_addresses(void) +{ + char ip[INET_ADDRSTRLEN]; + struct sockaddr_in *s_in; + struct ifconf ifconf; + struct ifreq ifr[10]; + int ret; + int ifs; + int i; + int s; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s < 0) + return; + + ifconf.ifc_buf = (char *) ifr; + ifconf.ifc_len = sizeof(ifr); + + if (ioctl(s, SIOCGIFCONF, &ifconf) == -1) + return; + + ifs = ifconf.ifc_len / sizeof(ifr[0]); + + /* we wont _display_ more than MAX_IP_INTERFACES */ + if (ifs > MAX_IP_INTERFACES) + ifs = MAX_IP_INTERFACES; + + last_ip = ifs + 1; + + for (i = 0; i < ifs; i++) { + s_in = (struct sockaddr_in *) &ifr[i].ifr_addr; + if (!inet_ntop(AF_INET, &s_in->sin_addr, ip, sizeof(ip))) + return; + ret = asprintf(&ip_str[i]," ip : %s\n", ip); + if (ret) + perror("asprintf"); + } + + ret = asprintf(&ip_str[i]," port : %d\n\n", port); + if (ret) + perror("asprintf"); + + close(s); +} + +static void setup_sched_parameters(pthread_attr_t *attr, int prio) +{ + struct sched_param p; + int ret; + + ret = pthread_attr_init(attr); + if (ret) + error(1, ret, "pthread_attr_init()"); + + ret = pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED); + if (ret) + error(1, ret, "pthread_attr_setinheritsched()"); + + ret = pthread_attr_setschedpolicy(attr, prio ? SCHED_FIFO : SCHED_OTHER); + if (ret) + error(1, ret, "pthread_attr_setschedpolicy()"); + + p.sched_priority = prio; + ret = pthread_attr_setschedparam(attr, &p); + if (ret) + error(1, ret, "pthread_attr_setschedparam()"); +} + +static void *gpiopwm_init_thread(void *cookie) +{ + int ret; + + pthread_setname_np(pthread_self(), "gpio-pwm-handler"); + ret = ioctl(dev, GPIOPWM_RTIOC_SET_CONFIG, config); + if (ret) + error(1, ret, "failed to set config"); + + ioctl(dev, GPIOPWM_RTIOC_START); + + /* setup completed: allow handler to run */ + sem_post(&setup); + + /* wait for completion */ + sem_sync(&synch); + ioctl(dev, GPIOPWM_RTIOC_STOP); + + return NULL; +} + +/* + * Controls the motor receving the duty cycle sent over UDP + * ie: echo -n <duty_cycle> | nc -w1 -u <ipaddr> <port> + */ +static void *gpiopwm_udp_ctrl_thread(void *cookie) +{ + struct sockaddr_in saddr; + struct sockaddr_in caddr; + unsigned int duty_cycle; + const int blen = 4; + int optval = 1; + socklen_t clen; + char buf[blen]; + int sockfd; + int ret; + + pthread_setname_np(pthread_self(), "gpio-pwm.netcat"); + + sockfd = socket(AF_INET, SOCK_DGRAM, 0); + if (sockfd < 0) + perror("socket"); + + setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(int)); + + bzero((char *) &saddr, sizeof(saddr)); + saddr.sin_addr.s_addr = htonl(INADDR_ANY); + saddr.sin_port = htons(port); + saddr.sin_family = AF_INET; + + if (bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr)) < 0) + perror("bind"); + + clen = sizeof(caddr); + sem_sync(&setup); + for (;;) { + + clear_screen(); + print_config("UDP Server\n"); + + memset(buf,'\0', blen); + ret = recvfrom(sockfd, buf, blen - 1, 0, (struct sockaddr *)&caddr, &clen); + if (ret < 0) + perror("recvfrom"); + + duty_cycle = strtol(buf, NULL, 10); + if (duty_cycle < MIN_DUTY_CYCLE || duty_cycle > MAX_DUTY_CYCLE) + continue; + + ret = ioctl(dev, GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE, duty_cycle); + if (ret) + break; + + config.duty_cycle = duty_cycle; + } + + return NULL; +} + +/* + * Manual control of the pwm duty cycle. + */ +static void *gpiopwm_manual_ctrl_thread(void *cookie) +{ + unsigned int duty_cycle; + size_t len = 4; + char *in; + int ret; + + pthread_setname_np(pthread_self(), "gpio-pwm.manual"); + + in = malloc(len * sizeof(*in)); + if (!in) + goto err; + + sem_sync(&setup); + for (;;) { + clear_screen(); + input_message(); + + len = getline(&in, &len, stdin); + if (len == -1 || len == 1) + break; + + duty_cycle = atoi(in); + if (!duty_cycle && strncmp(in, "000", len - 1) != 0) + break; + + ret = ioctl(dev, GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE, duty_cycle); + if (ret) { + fprintf(stderr, "invalid duty cycle %d\n", duty_cycle); + break; + } + + config.duty_cycle = duty_cycle; + } + + free(in); +err: + sem_post(&synch); + + return NULL; +} + +/* + * Continuously sweep all duty cycles 0..100 and 100..0. + * No mode switches should occur. + */ +static void *gpiopwm_sweep_ctrl_thread(void *cookie) +{ + struct timespec delay; + struct duty_values { + enum { fwd, bck} direction; + int x; + } values; + int ret; + + pthread_setname_np(pthread_self(), "gpio-pwm.sweep"); + + delay = (struct timespec) {.tv_sec = 0, .tv_nsec = 10 * config.period}; + values = (struct duty_values) {.direction = fwd, .x = MIN_DUTY_CYCLE}; + + sem_sync(&setup); + for (;;) { + if (stop) + break; + + ret = ioctl(dev, GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE, values.x); + if (ret) { + fprintf(stderr, "invalid duty cycle %d\n", values.x); + break; + } + + nanosleep(&delay, NULL); + + if (values.direction == bck) { + if (values.x - (step - 1) > MIN_DUTY_CYCLE) + values.x -= step; + else { + values.direction = fwd; + values.x = MIN_DUTY_CYCLE; + continue; + } + } + + if (values.direction == fwd) { + if (values.x + (step - 1) < MAX_DUTY_CYCLE) + values.x += step; + else { + values.direction = bck; + values.x = MAX_DUTY_CYCLE; + } + } + } + sem_post(&synch); + + return NULL; +} + +static void gpiopwm_sweep_sig_handler(int sig) +{ + stop = 1; +} + +static const struct option options[] = { + { +#define help_opt 0 + .name = "help", + .has_arg = 0, + .flag = NULL, + }, + { +#define sweep_range_opt 1 + .name = "sweep", + .has_arg = 1, + .flag = NULL, + }, + { +#define manual_opt 2 + .name = "manual", + .has_arg = 0, + .flag = NULL, + }, + { +#define config_opt 3 + .name = "config", + .has_arg = 1, + .flag = NULL, + }, + { +#define udp_opt 4 + .name = "udp", + .has_arg = 1, + .flag = NULL, + }, + { + .name = NULL, + } +}; + +static void usage(void) +{ + fprintf(stderr, "Usage:\n" + "gpiopwm --config=dev:min:max:period:gpio:duty [--sweep=<step> | --udp=<port> | --manual]\n\n" + "--config=<..>\n" + " dev: /dev/rtdm/gpio-pwm id [0..7]\n" + " min: min active period in usec\n" + " max: max active period in usec\n" + " period: base signal period in nsec\n" + " gpio: gpio pin number\n" + " duty: default duty cycle [0..100]\n" + "--sweep=<step>\n" + " sweep all duty cycle ranges in a loop\n" + " in step increments [default 1]\n" + "--manual input duty cycle from the command line\n" + "--udp=<port> receive duty cycle from the network\n" + " ie: echo -n <duty_cycle> | nc -w1 -u <ipaddr> <port>\n" + ); +} + +int main(int argc, char *argv[]) +{ + gpiopwm_control_thread handler = NULL; + pthread_t pwm_task, ctrl_task; + int opt, lindex, device = 0; + pthread_attr_t tattr; + char *p; + int ret; + + for (;;) { + lindex = -1; + opt = getopt_long_only(argc, argv, "", options, &lindex); + if (opt == EOF) + break; + + switch (lindex) { + case sweep_range_opt: + handler = gpiopwm_sweep_ctrl_thread; + signal(SIGINT, gpiopwm_sweep_sig_handler); + step = atoi(optarg); + step = step < 1 ? 1 : step; + break; + case manual_opt: + handler = gpiopwm_manual_ctrl_thread; + signal(SIGINT, SIG_IGN); + break; + case udp_opt: + handler = gpiopwm_udp_ctrl_thread; + port = atoi(optarg); + get_ip_addresses(); + break; + case config_opt: + p = strtok(optarg,":"); + device = p ? atoi(p): -1; + p = strtok(NULL,":"); + config.range_min = p ? atoi(p): -1; + p = strtok(NULL,":"); + config.range_max = p ? atoi(p): -1; + p = strtok(NULL,":"); + config.period = p ? atoi(p): -1; + p = strtok(NULL,":"); + config.gpio = p ? atoi(p): -1; + p = strtok(NULL,""); + config.duty_cycle = p ? atoi(p): -1; + break; + case help_opt: + default: + usage(); + exit(1); + } + } + + if (handler == NULL) { + usage(); + exit(1); + } + + ret = sem_init(&synch, 0, 0); + if (ret < 0) + error(1, errno, "can't create synch semaphore"); + + ret = sem_init(&setup, 0, 0); + if (ret < 0) + error(1, errno, "can't create setup semaphore"); + + ret = asprintf(&device_name, "%s%d", DEVICE_NAME, device); + if (ret < 0) + error(1, EINVAL, "can't create device name"); + + dev = open(device_name, O_RDWR); + if (dev < 0) + error(1, EINVAL, "can't open %s", device_name); + + setup_sched_parameters(&tattr, 99); + ret = pthread_create(&ctrl_task, &tattr, handler, NULL); + if (ret) + error(1, ret, "pthread_create(ctrl_handler)"); + + setup_sched_parameters(&tattr, 98); + ret = pthread_create(&pwm_task, &tattr, gpiopwm_init_thread, NULL); + if (ret) + error(1, ret, "pthread_create(init thread)"); + + pthread_join(pwm_task, NULL); + pthread_join(ctrl_task, NULL); + + pthread_attr_destroy(&tattr); + + ret = close(dev); + if (ret < 0) + error(1, EINVAL, "can't close"); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c new file mode 100644 index 0000000..06fc881 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * IDDP-based client/server demo, using the write(2)/recvfrom(2) + * system calls to exchange data over a socket. + * + * In this example, two sockets are created. A server thread (reader) + * is bound to a labeled real-time port and receives datagrams sent to + * this port from a client thread (writer). The client thread attaches + * to the port opened by the server using a labeled connection + * request. The client socket is bound to a different port, only to + * provide a valid peer name; this is optional. + * + * ASCII labels can be attached to bound ports, in order to connect + * sockets to them in a more descriptive way than using plain numeric + * port values. + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <pthread.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t svtid, cltid; + +#define IDDP_CLPORT 27 + +#define IDDP_PORT_LABEL "iddp-demo" + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *server(void *arg) +{ + struct sockaddr_ipc saddr, claddr; + struct rtipc_port_label plabel; + socklen_t addrlen; + char buf[128]; + int ret, s; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP); + if (s < 0) + fail("socket"); + + /* + * We will use Xenomai's system heap for datagram, so no + * IDDP_POOLSZ required here. + */ + + /* + * Set a port label. This name will be registered when + * binding, in addition to the port number (if given). + */ + strcpy(plabel.label, IDDP_PORT_LABEL); + ret = setsockopt(s, SOL_IDDP, IDDP_LABEL, + &plabel, sizeof(plabel)); + if (ret) + fail("setsockopt"); + + /* + * Bind the socket to the port. Assign that port a label, so + * that peers may use a descriptive information to locate + * it. Labeled ports will appear in the + * /proc/xenomai/registry/rtipc/iddp directory once the socket + * is bound. + * + * saddr.sipc_port specifies the port number to use. If -1 is + * passed, the IDDP driver will auto-select an idle port. + */ + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = -1; /* Pick next free */ + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + addrlen = sizeof(saddr); + ret = recvfrom(s, buf, sizeof(buf), 0, + (struct sockaddr *)&claddr, &addrlen); + if (ret < 0) { + close(s); + fail("recvfrom"); + } + printf("%s: received %d bytes, \"%.*s\" from port %d\n", + __FUNCTION__, ret, ret, buf, claddr.sipc_port); + } + + return NULL; +} + +static void *client(void *arg) +{ + struct sockaddr_ipc svsaddr, clsaddr; + struct rtipc_port_label plabel; + int ret, s, n = 0, len; + struct timespec ts; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP); + if (s < 0) + fail("socket"); + + /* + * Set a name on the client socket. This is strictly optional, + * and only done here for the purpose of getting back a + * different port number in recvfrom(). + */ + clsaddr.sipc_family = AF_RTIPC; + clsaddr.sipc_port = IDDP_CLPORT; + ret = bind(s, (struct sockaddr *)&clsaddr, sizeof(clsaddr)); + if (ret) + fail("bind"); + + /* + * Set the port label. This name will be used to find the peer + * when connecting, instead of the port number. The label must + * be set _after_ the socket is bound to the port, so that + * IDDP does not try to register this label for the client + * port as well (like the server thread did). + */ + strcpy(plabel.label, IDDP_PORT_LABEL); + ret = setsockopt(s, SOL_IDDP, IDDP_LABEL, + &plabel, sizeof(plabel)); + if (ret) + fail("setsockopt"); + + memset(&svsaddr, 0, sizeof(svsaddr)); + svsaddr.sipc_family = AF_RTIPC; + svsaddr.sipc_port = -1; /* Tell IDDP to search by label. */ + ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr)); + if (ret) + fail("connect"); + + for (;;) { + len = strlen(msg[n]); + /* Send to default destination we connected to. */ + ret = write(s, msg[n], len); + if (ret < 0) { + close(s); + fail("sendto"); + } + printf("%s: sent %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, msg[n]); + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param svparam = {.sched_priority = 71 }; + struct sched_param clparam = {.sched_priority = 70 }; + pthread_attr_t svattr, clattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&svattr); + pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&svattr, SCHED_FIFO); + pthread_attr_setschedparam(&svattr, &svparam); + + errno = pthread_create(&svtid, &svattr, &server, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(&clattr); + pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&clattr, SCHED_FIFO); + pthread_attr_setschedparam(&clattr, &clparam); + + errno = pthread_create(&cltid, &clattr, &client, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(svtid); + pthread_cancel(cltid); + pthread_join(svtid, NULL); + pthread_join(cltid, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c new file mode 100644 index 0000000..31ee10f --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c @@ -0,0 +1,194 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * IDDP-based client/server demo, using the sendto(2)/recvfrom(2) + * system calls to exchange data over a socket. + * + * In this example, two sockets are created. A server thread (reader) + * is bound to a real-time port and receives datagrams sent to this + * port from a client thread (writer). The client socket is bound to a + * different port, only to provide a valid peer name; this is + * optional. + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <pthread.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t svtid, cltid; + +#define IDDP_SVPORT 12 +#define IDDP_CLPORT 13 + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *server(void *arg) +{ + struct sockaddr_ipc saddr, claddr; + socklen_t addrlen; + char buf[128]; + size_t poolsz; + int ret, s; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP); + if (s < 0) + fail("socket"); + + /* + * Set a local 32k pool for the server endpoint. Memory needed + * to convey datagrams will be pulled from this pool, instead + * of Xenomai's system pool. + */ + poolsz = 32768; /* bytes */ + ret = setsockopt(s, SOL_IDDP, IDDP_POOLSZ, + &poolsz, sizeof(poolsz)); + if (ret) + fail("setsockopt"); + + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = IDDP_SVPORT; + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + addrlen = sizeof(saddr); + ret = recvfrom(s, buf, sizeof(buf), 0, + (struct sockaddr *)&claddr, &addrlen); + if (ret < 0) { + close(s); + fail("recvfrom"); + } + printf("%s: received %d bytes, \"%.*s\" from port %d\n", + __FUNCTION__, ret, ret, buf, claddr.sipc_port); + } + + return NULL; +} + +static void *client(void *arg) +{ + struct sockaddr_ipc svsaddr, clsaddr; + int ret, s, n = 0, len; + struct timespec ts; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP); + if (s < 0) + fail("socket"); + + clsaddr.sipc_family = AF_RTIPC; + clsaddr.sipc_port = IDDP_CLPORT; + ret = bind(s, (struct sockaddr *)&clsaddr, sizeof(clsaddr)); + if (ret) + fail("bind"); + + svsaddr.sipc_family = AF_RTIPC; + svsaddr.sipc_port = IDDP_SVPORT; + for (;;) { + len = strlen(msg[n]); + ret = sendto(s, msg[n], len, 0, + (struct sockaddr *)&svsaddr, sizeof(svsaddr)); + if (ret < 0) { + close(s); + fail("sendto"); + } + printf("%s: sent %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, msg[n]); + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param svparam = {.sched_priority = 71 }; + struct sched_param clparam = {.sched_priority = 70 }; + pthread_attr_t svattr, clattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&svattr); + pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&svattr, SCHED_FIFO); + pthread_attr_setschedparam(&svattr, &svparam); + + errno = pthread_create(&svtid, &svattr, &server, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(&clattr); + pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&clattr, SCHED_FIFO); + pthread_attr_setschedparam(&clattr, &clparam); + + errno = pthread_create(&cltid, &clattr, &client, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(svtid); + pthread_cancel(cltid); + pthread_join(svtid, NULL); + pthread_join(cltid, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c new file mode 100644 index 0000000..ba85582 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c @@ -0,0 +1,248 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * XDDP-based RT/NRT threads communication demo. + * + * Real-time Xenomai threads and regular Linux threads may want to + * exchange data in a way that does not require the former to leave + * the real-time domain (i.e. secondary mode). Message pipes - as + * implemented by the RTDM-based XDDP protocol - are provided for this + * purpose. + * + * On the Linux domain side, pseudo-device files named /dev/rtp<minor> + * give regular POSIX threads access to non real-time communication + * endpoints, via the standard character-based I/O interface. On the + * Xenomai domain side, sockets may be bound to XDDP ports, which act + * as proxies to send and receive data to/from the associated + * pseudo-device files. Ports and pseudo-device minor numbers are + * paired, meaning that e.g. port 7 will proxy the traffic for + * /dev/rtp7. Therefore, port numbers may range from 0 to + * CONFIG_XENO_OPT_PIPE_NRDEV - 1. + * + * All data sent through a bound/connected XDDP socket via sendto(2) or + * write(2) will be passed to the peer endpoint in the Linux domain, + * and made available for reading via the standard read(2) system + * call. Conversely, all data sent using write(2) through the non + * real-time endpoint will be conveyed to the real-time socket + * endpoint, and made available to the recvfrom(2) or read(2) system + * calls. + * + * Both threads can use the bi-directional data path to send and + * receive datagrams in a FIFO manner, as illustrated by the simple + * echoing process implemented by this program. + * + * realtime_thread------------------------------>-------+ + * => get socket | + * => bind socket to port 0 v + * => write traffic to NRT domain via sendto() | + * => read traffic from NRT domain via recvfrom() <--|--+ + * | | + * regular_thread---------------------------------------+ | + * => open /dev/rtp0 | ^ + * => read traffic from RT domain via read() | | + * => echo traffic back to RT domain via write() +--+ + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <malloc.h> +#include <pthread.h> +#include <fcntl.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t rt, nrt; + +#define XDDP_PORT 0 /* [0..CONFIG-XENO_OPT_PIPE_NRDEV - 1] */ + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *realtime_thread(void *arg) +{ + struct sockaddr_ipc saddr; + int ret, s, n = 0, len; + struct timespec ts; + size_t poolsz; + char buf[128]; + + /* + * Get a datagram socket to bind to the RT endpoint. Each + * endpoint is represented by a port number within the XDDP + * protocol namespace. + */ + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP); + if (s < 0) { + perror("socket"); + exit(EXIT_FAILURE); + } + + /* + * Set a local 16k pool for the RT endpoint. Memory needed to + * convey datagrams will be pulled from this pool, instead of + * Xenomai's system pool. + */ + poolsz = 16384; /* bytes */ + ret = setsockopt(s, SOL_XDDP, XDDP_POOLSZ, + &poolsz, sizeof(poolsz)); + if (ret) + fail("setsockopt"); + + /* + * Bind the socket to the port, to setup a proxy to channel + * traffic to/from the Linux domain. + * + * saddr.sipc_port specifies the port number to use. + */ + memset(&saddr, 0, sizeof(saddr)); + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = XDDP_PORT; + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + len = strlen(msg[n]); + /* + * Send a datagram to the NRT endpoint via the proxy. + * We may pass a NULL destination address, since a + * bound socket is assigned a default destination + * address matching the binding address (unless + * connect(2) was issued before bind(2), in which case + * the former would prevail). + */ + ret = sendto(s, msg[n], len, 0, NULL, 0); + if (ret != len) + fail("sendto"); + + printf("%s: sent %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, msg[n]); + + /* Read back packets echoed by the regular thread */ + ret = recvfrom(s, buf, sizeof(buf), 0, NULL, 0); + if (ret <= 0) + fail("recvfrom"); + + printf(" => \"%.*s\" echoed by peer\n", ret, buf); + + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +static void *regular_thread(void *arg) +{ + char buf[128], *devname; + int fd, ret; + + if (asprintf(&devname, "/dev/rtp%d", XDDP_PORT) < 0) + fail("asprintf"); + + fd = open(devname, O_RDWR); + free(devname); + if (fd < 0) + fail("open"); + + for (;;) { + /* Get the next message from realtime_thread. */ + ret = read(fd, buf, sizeof(buf)); + if (ret <= 0) + fail("read"); + + /* Echo the message back to realtime_thread. */ + ret = write(fd, buf, ret); + if (ret <= 0) + fail("write"); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param rtparam = { .sched_priority = 42 }; + pthread_attr_t rtattr, regattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&rtattr); + pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO); + pthread_attr_setschedparam(&rtattr, &rtparam); + + errno = pthread_create(&rt, &rtattr, &realtime_thread, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(®attr); + pthread_attr_setdetachstate(®attr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(®attr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(®attr, SCHED_OTHER); + + errno = pthread_create(&nrt, ®attr, ®ular_thread, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(rt); + pthread_cancel(nrt); + pthread_join(rt, NULL); + pthread_join(nrt, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c new file mode 100644 index 0000000..9de31fd --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * XDDP-based RT/NRT threads communication demo. + * + * Real-time Xenomai threads and regular Linux threads may want to + * exchange data in a way that does not require the former to leave + * the real-time domain (i.e. secondary mode). Message pipes - as + * implemented by the RTDM-based XDDP protocol - are provided for this + * purpose. + * + * On the Linux domain side, pseudo-device files named /dev/rtp<minor> + * give regular POSIX threads access to non real-time communication + * endpoints, via the standard character-based I/O interface. On the + * Xenomai domain side, sockets may be bound to XDDP ports, which act + * as proxies to send and receive data to/from the associated + * pseudo-device files. Ports and pseudo-device minor numbers are + * paired, meaning that e.g. port 7 will proxy the traffic for + * /dev/rtp7. Therefore, port numbers may range from 0 to + * CONFIG_XENO_OPT_PIPE_NRDEV - 1. + * + * All data sent through a bound/connected XDDP socket via sendto(2) or + * write(2) will be passed to the peer endpoint in the Linux domain, + * and made available for reading via the standard read(2) system + * call. Conversely, all data sent using write(2) through the non + * real-time endpoint will be conveyed to the real-time socket + * endpoint, and made available to the recvfrom(2) or read(2) system + * calls. + * + * ASCII labels can be attached to bound ports, in order to connect + * sockets to them in a more descriptive way than using plain numeric + * port values. + * + * The example code below illustrates the following process: + * + * realtime_thread1----------------------------->----------+ + * => get socket | + * => bind socket to port "xddp-demo | + * => read traffic from NRT domain via recvfrom() <--+--+ + * | | + * realtime_thread2----------------------------------------+ | + * => get socket | | + * => connect socket to port "xddp-demo" | | + * => write traffic to NRT domain via sendto() v | + * | ^ + * regular_thread------------------------------------------+ | + * => open /proc/xenomai/registry/rtipc/xddp/xddp-demo | | + * => read traffic from RT domain via read() | | + * => mirror traffic to RT domain via write() +--+ + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <malloc.h> +#include <pthread.h> +#include <fcntl.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t rt1, rt2, nrt; + +#define XDDP_PORT_LABEL "xddp-demo" + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *realtime_thread1(void *arg) +{ + struct rtipc_port_label plabel; + struct sockaddr_ipc saddr; + char buf[128]; + int ret, s; + + /* + * Get a datagram socket to bind to the RT endpoint. Each + * endpoint is represented by a port number within the XDDP + * protocol namespace. + */ + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP); + if (s < 0) { + perror("socket"); + exit(EXIT_FAILURE); + } + + /* + * Set a port label. This name will be registered when + * binding, in addition to the port number (if given). + */ + strcpy(plabel.label, XDDP_PORT_LABEL); + ret = setsockopt(s, SOL_XDDP, XDDP_LABEL, + &plabel, sizeof(plabel)); + if (ret) + fail("setsockopt"); + /* + * Bind the socket to the port, to setup a proxy to channel + * traffic to/from the Linux domain. Assign that port a label, + * so that peers may use a descriptive information to locate + * it. For instance, the pseudo-device matching our RT + * endpoint will appear as + * /proc/xenomai/registry/rtipc/xddp/<XDDP_PORT_LABEL> in the + * Linux domain, once the socket is bound. + * + * saddr.sipc_port specifies the port number to use. If -1 is + * passed, the XDDP driver will auto-select an idle port. + */ + memset(&saddr, 0, sizeof(saddr)); + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = -1; + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + /* Get packets relayed by the regular thread */ + ret = recvfrom(s, buf, sizeof(buf), 0, NULL, 0); + if (ret <= 0) + fail("recvfrom"); + + printf("%s: \"%.*s\" relayed by peer\n", __FUNCTION__, ret, buf); + } + + return NULL; +} + +static void *realtime_thread2(void *arg) +{ + struct rtipc_port_label plabel; + struct sockaddr_ipc saddr; + int ret, s, n = 0, len; + struct timespec ts; + struct timeval tv; + socklen_t addrlen; + + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP); + if (s < 0) { + perror("socket"); + exit(EXIT_FAILURE); + } + + /* + * Set the socket timeout; it will apply when attempting to + * connect to a labeled port, and to recvfrom() calls. The + * following setup tells the XDDP driver to wait for at most + * one second until a socket is bound to a port using the same + * label, or return with a timeout error. + */ + tv.tv_sec = 1; + tv.tv_usec = 0; + ret = setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, + &tv, sizeof(tv)); + if (ret) + fail("setsockopt"); + + /* + * Set a port label. This name will be used to find the peer + * when connecting, instead of the port number. + */ + strcpy(plabel.label, XDDP_PORT_LABEL); + ret = setsockopt(s, SOL_XDDP, XDDP_LABEL, + &plabel, sizeof(plabel)); + if (ret) + fail("setsockopt"); + + memset(&saddr, 0, sizeof(saddr)); + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = -1; /* Tell XDDP to search by label. */ + ret = connect(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("connect"); + + /* + * We succeeded in making the port our default destination + * address by using its label, but we don't know its actual + * port number yet. Use getpeername() to retrieve it. + */ + addrlen = sizeof(saddr); + ret = getpeername(s, (struct sockaddr *)&saddr, &addrlen); + if (ret || addrlen != sizeof(saddr)) + fail("getpeername"); + + printf("%s: NRT peer is reading from /dev/rtp%d\n", + __FUNCTION__, saddr.sipc_port); + + for (;;) { + len = strlen(msg[n]); + /* + * Send a datagram to the NRT endpoint via the proxy. + * We may pass a NULL destination address, since the + * socket was successfully assigned the proper default + * address via connect(2). + */ + ret = sendto(s, msg[n], len, 0, NULL, 0); + if (ret != len) + fail("sendto"); + + printf("%s: sent %d bytes, \"%.*s\"\n", + __FUNCTION__, ret, ret, msg[n]); + + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +static void *regular_thread(void *arg) +{ + char buf[128], *devname; + int fd, ret; + + if (asprintf(&devname, + "/proc/xenomai/registry/rtipc/xddp/%s", + XDDP_PORT_LABEL) < 0) + fail("asprintf"); + + fd = open(devname, O_RDWR); + free(devname); + if (fd < 0) + fail("open"); + + for (;;) { + /* Get the next message from realtime_thread2. */ + ret = read(fd, buf, sizeof(buf)); + if (ret <= 0) + fail("read"); + + /* Relay the message to realtime_thread1. */ + ret = write(fd, buf, ret); + if (ret <= 0) + fail("write"); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param rtparam = { .sched_priority = 42 }; + pthread_attr_t rtattr, regattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&rtattr); + pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO); + pthread_attr_setschedparam(&rtattr, &rtparam); + + /* Both real-time threads have the same attribute set. */ + + errno = pthread_create(&rt1, &rtattr, &realtime_thread1, NULL); + if (errno) + fail("pthread_create"); + + errno = pthread_create(&rt2, &rtattr, &realtime_thread2, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(®attr); + pthread_attr_setdetachstate(®attr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(®attr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(®attr, SCHED_OTHER); + + errno = pthread_create(&nrt, ®attr, ®ular_thread, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(rt1); + pthread_cancel(rt2); + pthread_cancel(nrt); + pthread_join(rt1, NULL); + pthread_join(rt2, NULL); + pthread_join(nrt, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c new file mode 100644 index 0000000..e537294 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * + * XDDP-based RT/NRT threads communication demo. + * + * Real-time Xenomai threads and regular Linux threads may want to + * exchange data in a way that does not require the former to leave + * the real-time domain (i.e. secondary mode). Message pipes - as + * implemented by the RTDM-based XDDP protocol - are provided for this + * purpose. + * + * On the Linux domain side, pseudo-device files named /dev/rtp<minor> + * give regular POSIX threads access to non real-time communication + * endpoints, via the standard character-based I/O interface. On the + * Xenomai domain side, sockets may be bound to XDDP ports, which act + * as proxies to send and receive data to/from the associated + * pseudo-device files. Ports and pseudo-device minor numbers are + * paired, meaning that e.g. port 7 will proxy the traffic for + * /dev/rtp7. Therefore, port numbers may range from 0 to + * CONFIG_XENO_OPT_PIPE_NRDEV - 1. + * + * All data sent through a bound/connected XDDP socket via sendto(2) or + * write(2) will be passed to the peer endpoint in the Linux domain, + * and made available for reading via the standard read(2) system + * call. Conversely, all data sent using write(2) through the non + * real-time endpoint will be conveyed to the real-time socket + * endpoint, and made available to the recvfrom(2) or read(2) system + * calls. + * + * In addition to sending datagrams, real-time threads may stream data + * in a byte-oriented mode through the proxy as well. This increases + * the bandwidth and reduces the overhead, when a lot of data has to + * flow down to the Linux domain, if keeping the message boundaries is + * not required. The example code below illustrates such use. + * + * realtime_thread-------------------------------------->----------+ + * => get socket | + * => bind socket to port 0 v + * => write scattered traffic to NRT domain via sendto() | + * => read traffic from NRT domain via recvfrom() <--|--+ + * | | + * regular_thread--------------------------------------------------+ | + * => open /dev/rtp0 | ^ + * => read traffic from RT domain via read() | | + * => echo traffic back to RT domain via write() +--+ + */ +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <signal.h> +#include <string.h> +#include <malloc.h> +#include <pthread.h> +#include <fcntl.h> +#include <errno.h> +#include <rtdm/ipc.h> + +pthread_t rt, nrt; + +#define XDDP_PORT 0 /* [0..CONFIG-XENO_OPT_PIPE_NRDEV - 1] */ + +static const char *msg[] = { + "Surfing With The Alien", + "Lords of Karma", + "Banana Mango", + "Psycho Monkey", + "Luminous Flesh Giants", + "Moroccan Sunset", + "Satch Boogie", + "Flying In A Blue Dream", + "Ride", + "Summer Song", + "Speed Of Light", + "Crystal Planet", + "Raspberry Jam Delta-V", + "Champagne?", + "Clouds Race Across The Sky", + "Engines Of Creation" +}; + +static void fail(const char *reason) +{ + perror(reason); + exit(EXIT_FAILURE); +} + +static void *realtime_thread(void *arg) +{ + struct sockaddr_ipc saddr; + int ret, s, n = 0, len, b; + struct timespec ts; + size_t streamsz; + char buf[128]; + + /* + * Get a datagram socket to bind to the RT endpoint. Each + * endpoint is represented by a port number within the XDDP + * protocol namespace. + */ + s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP); + if (s < 0) { + perror("socket"); + exit(EXIT_FAILURE); + } + + /* + * Tell the XDDP driver that we will use the streaming + * capabilities on this socket. To this end, we have to + * specify the size of the streaming buffer, as a count of + * bytes. The real-time output will be buffered up to that + * amount, and sent as a single datagram to the NRT endpoint + * when fully gathered, or when another source port attempts + * to send data to the same endpoint. Passing a null size + * would disable streaming. + */ + streamsz = 1024; /* bytes */ + ret = setsockopt(s, SOL_XDDP, XDDP_BUFSZ, + &streamsz, sizeof(streamsz)); + if (ret) + fail("setsockopt"); + /* + * Bind the socket to the port, to setup a proxy to channel + * traffic to/from the Linux domain. + * + * saddr.sipc_port specifies the port number to use. + */ + memset(&saddr, 0, sizeof(saddr)); + saddr.sipc_family = AF_RTIPC; + saddr.sipc_port = XDDP_PORT; + ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret) + fail("bind"); + + for (;;) { + len = strlen(msg[n]); + /* + * Send a datagram to the NRT endpoint via the proxy. + * The output is artificially scattered in separate + * one-byte sendings, to illustrate the use of + * MSG_MORE. + */ + for (b = 0; b < len; b++) { + ret = sendto(s, msg[n] + b, 1, MSG_MORE, NULL, 0); + if (ret != 1) + fail("sendto"); + } + + printf("%s: sent (scattered) %d-bytes message, \"%.*s\"\n", + __FUNCTION__, len, len, msg[n]); + + /* Read back packets echoed by the regular thread */ + ret = recvfrom(s, buf, sizeof(buf), 0, NULL, 0); + if (ret <= 0) + fail("recvfrom"); + + printf(" => \"%.*s\" echoed by peer\n", ret, buf); + + n = (n + 1) % (sizeof(msg) / sizeof(msg[0])); + /* + * We run in full real-time mode (i.e. primary mode), + * so we have to let the system breathe between two + * iterations. + */ + ts.tv_sec = 0; + ts.tv_nsec = 500000000; /* 500 ms */ + clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL); + } + + return NULL; +} + +static void *regular_thread(void *arg) +{ + char buf[128], *devname; + int fd, ret; + + if (asprintf(&devname, "/dev/rtp%d", XDDP_PORT) < 0) + fail("asprintf"); + + fd = open(devname, O_RDWR); + free(devname); + if (fd < 0) + fail("open"); + + for (;;) { + /* Get the next message from realtime_thread. */ + ret = read(fd, buf, sizeof(buf)); + if (ret <= 0) + fail("read"); + + /* Echo the message back to realtime_thread. */ + ret = write(fd, buf, ret); + if (ret <= 0) + fail("write"); + } + + return NULL; +} + +int main(int argc, char **argv) +{ + struct sched_param rtparam = { .sched_priority = 42 }; + pthread_attr_t rtattr, regattr; + sigset_t set; + int sig; + + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + pthread_sigmask(SIG_BLOCK, &set, NULL); + + pthread_attr_init(&rtattr); + pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO); + pthread_attr_setschedparam(&rtattr, &rtparam); + + errno = pthread_create(&rt, &rtattr, &realtime_thread, NULL); + if (errno) + fail("pthread_create"); + + pthread_attr_init(®attr); + pthread_attr_setdetachstate(®attr, PTHREAD_CREATE_JOINABLE); + pthread_attr_setinheritsched(®attr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(®attr, SCHED_OTHER); + + errno = pthread_create(&nrt, ®attr, ®ular_thread, NULL); + if (errno) + fail("pthread_create"); + + __STD(sigwait(&set, &sig)); + pthread_cancel(rt); + pthread_cancel(nrt); + pthread_join(rt, NULL); + pthread_join(nrt, NULL); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am new file mode 100644 index 0000000..248e406 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am @@ -0,0 +1,33 @@ +demodir = @XENO_DEMO_DIR@ + +CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC) + +VERSION_STRING = 0.92 + +demo_PROGRAMS = cyclictest + +cyclictest_CPPFLAGS = \ + $(XENO_USER_CFLAGS) \ + -I$(top_srcdir)/include \ + -DVERSION_STRING=$(VERSION_STRING) \ + -Wno-strict-prototypes \ + -Wno-implicit-function-declaration \ + -Wno-missing-prototypes \ + -Wno-nonnull \ + -Wno-unused-function + +cyclictest_SOURCES = \ + cyclictest.c \ + error.c \ + error.h \ + rt_numa.h \ + rt-sched.h \ + rt-utils.c \ + rt-utils.h + +cyclictest_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS) + +cyclictest_LDADD = \ + @XENO_CORE_LDADD@ \ + @XENO_USER_LDADD@ \ + -lpthread -lrt -lm diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/README b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/README new file mode 100644 index 0000000..8186b04 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/README @@ -0,0 +1,5 @@ + +This is the original cyclictest program from the PREEMPT-RT test +suite as of version 0.92. + +See git://git.kernel.org/pub/scm/linux/kernel/git/clrkwllms/rt-tests.git diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c new file mode 100644 index 0000000..b92596c --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c @@ -0,0 +1,2269 @@ +/* + * High resolution timer test software + * + * (C) 2013 Clark Williams <williams@redhat.com> + * (C) 2013 John Kacur <jkacur@redhat.com> + * (C) 2008-2012 Clark Williams <williams@redhat.com> + * (C) 2005-2007 Thomas Gleixner <tglx@linutronix.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License Version + * 2 as published by the Free Software Foundation. + * + */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <stdarg.h> +#include <unistd.h> +#include <fcntl.h> +#include <getopt.h> +#include <pthread.h> +#include <signal.h> +#include <sched.h> +#include <string.h> +#include <time.h> +#include <errno.h> +#include <limits.h> +#include <linux/unistd.h> + +#include <sys/prctl.h> +#include <sys/stat.h> +#include <sys/sysinfo.h> +#include <sys/types.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <sys/utsname.h> +#include <sys/mman.h> +#include "rt_numa.h" + +#include "rt-utils.h" + +#define DEFAULT_INTERVAL 1000 +#define DEFAULT_DISTANCE 500 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +/* Ugly, but .... */ +#define gettid() syscall(__NR_gettid) +#define sigev_notify_thread_id _sigev_un._tid + +#define USEC_PER_SEC 1000000 +#define NSEC_PER_SEC 1000000000 + +#define HIST_MAX 1000000 + +#define MODE_CYCLIC 0 +#define MODE_CLOCK_NANOSLEEP 1 +#define MODE_SYS_ITIMER 2 +#define MODE_SYS_NANOSLEEP 3 +#define MODE_SYS_OFFSET 2 + +#define TIMER_RELTIME 0 + +/* Must be power of 2 ! */ +#define VALBUF_SIZE 16384 + +#define KVARS 32 +#define KVARNAMELEN 32 +#define KVALUELEN 32 + +int enable_events; + +static char *policyname(int policy); + +#define write_check(__fd, __buf, __len) \ + do { \ + int __ret = write(__fd, __buf, __len); \ + (void)__ret; \ + } while (0); + +enum { + NOTRACE, + CTXTSWITCH, + IRQSOFF, + PREEMPTOFF, + PREEMPTIRQSOFF, + WAKEUP, + WAKEUPRT, + LATENCY, + FUNCTION, + CUSTOM, +}; + +/* Struct to transfer parameters to the thread */ +struct thread_param { + int prio; + int policy; + int mode; + int timermode; + int signal; + int clock; + unsigned long max_cycles; + struct thread_stat *stats; + int bufmsk; + unsigned long interval; + int cpu; + int node; + int tnum; +}; + +/* Struct for statistics */ +struct thread_stat { + unsigned long cycles; + unsigned long cyclesread; + long min; + long max; + long act; + double avg; + long *values; + long *hist_array; + long *outliers; + pthread_t thread; + int threadstarted; + int tid; + long reduce; + long redmax; + long cycleofmax; + long hist_overflow; + long num_outliers; +}; + +static int shutdown; +static int tracelimit = 0; +static int notrace = 0; +static int ftrace = 0; +static int kernelversion; +static int verbose = 0; +static int oscope_reduction = 1; +static int lockall = 0; +static int tracetype = NOTRACE; +static int histogram = 0; +static int histofall = 0; +static int duration = 0; +static int use_nsecs = 0; +static int refresh_on_max; +static int force_sched_other; +static int priospread = 0; +static int check_clock_resolution; +static int ct_debug; +static int use_fifo = 0; +static pthread_t fifo_threadid; +static int aligned = 0; +static int secaligned = 0; +static int offset = 0; +static int laptop = 0; + +static pthread_cond_t refresh_on_max_cond = PTHREAD_COND_INITIALIZER; +static pthread_mutex_t refresh_on_max_lock = PTHREAD_MUTEX_INITIALIZER; + +static pthread_mutex_t break_thread_id_lock = PTHREAD_MUTEX_INITIALIZER; +static pid_t break_thread_id = 0; +static uint64_t break_thread_value = 0; + +static struct timespec globalt; + +/* Backup of kernel variables that we modify */ +static struct kvars { + char name[KVARNAMELEN]; + char value[KVALUELEN]; +} kv[KVARS]; + +static char *procfileprefix = "/proc/sys/kernel/"; +static char *fileprefix; +static char tracer[MAX_PATH]; +static char fifopath[MAX_PATH]; +static char **traceptr; +static int traceopt_count; +static int traceopt_size; + +static struct thread_param **parameters; +static struct thread_stat **statistics; + +static void print_stat(FILE *fp, struct thread_param *par, int index, int verbose, int quiet); + +static int latency_target_fd = -1; +static int32_t latency_target_value = 0; + +/* Latency trick + * if the file /dev/cpu_dma_latency exists, + * open it and write a zero into it. This will tell + * the power management system not to transition to + * a high cstate (in fact, the system acts like idle=poll) + * When the fd to /dev/cpu_dma_latency is closed, the behavior + * goes back to the system default. + * + * Documentation/power/pm_qos_interface.txt + */ +static void set_latency_target(void) +{ + struct stat s; + int err; + + if (laptop) { + warn("not setting cpu_dma_latency to save battery power\n"); + return; + } + + errno = 0; + err = stat("/dev/cpu_dma_latency", &s); + if (err == -1) { + err_msg_n(errno, "WARN: stat /dev/cpu_dma_latency failed"); + return; + } + + errno = 0; + latency_target_fd = open("/dev/cpu_dma_latency", O_RDWR); + if (latency_target_fd == -1) { + err_msg_n(errno, "WARN: open /dev/cpu_dma_latency"); + return; + } + + errno = 0; + err = write(latency_target_fd, &latency_target_value, 4); + if (err < 1) { + err_msg_n(errno, "# error setting cpu_dma_latency to %d!", latency_target_value); + close(latency_target_fd); + return; + } + printf("# /dev/cpu_dma_latency set to %dus\n", latency_target_value); +} + + +enum kernelversion { + KV_NOT_SUPPORTED, + KV_26_LT18, + KV_26_LT24, + KV_26_33, + KV_30 +}; + +enum { + ERROR_GENERAL = -1, + ERROR_NOTFOUND = -2, +}; + +static char functiontracer[MAX_PATH]; +static char traceroptions[MAX_PATH]; + +static int trace_fd = -1; +static int tracemark_fd = -1; + +static int kernvar(int mode, const char *name, char *value, size_t sizeofvalue) +{ + char filename[128]; + int retval = 1; + int path; + size_t len_prefix = strlen(fileprefix), len_name = strlen(name); + + if (len_prefix + len_name + 1 > sizeof(filename)) { + errno = ENOMEM; + return 1; + } + + memcpy(filename, fileprefix, len_prefix); + memcpy(filename + len_prefix, name, len_name + 1); + + path = open(filename, mode); + if (path >= 0) { + if (mode == O_RDONLY) { + int got; + if ((got = read(path, value, sizeofvalue)) > 0) { + retval = 0; + value[got-1] = '\0'; + } + } else if (mode == O_WRONLY) { + if (write(path, value, sizeofvalue) == sizeofvalue) + retval = 0; + } + close(path); + } + return retval; +} + +static void setkernvar(const char *name, char *value) +{ + int i; + char oldvalue[KVALUELEN]; + + if (kernelversion < KV_26_33) { + if (kernvar(O_RDONLY, name, oldvalue, sizeof(oldvalue))) + fprintf(stderr, "could not retrieve %s\n", name); + else { + for (i = 0; i < KVARS; i++) { + if (!strcmp(kv[i].name, name)) + break; + if (kv[i].name[0] == '\0') { + strncpy(kv[i].name, name, + sizeof(kv[i].name)); + strncpy(kv[i].value, oldvalue, + sizeof(kv[i].value)); + break; + } + } + if (i == KVARS) + fprintf(stderr, "could not backup %s (%s)\n", + name, oldvalue); + } + } + if (kernvar(O_WRONLY, name, value, strlen(value))) + fprintf(stderr, "could not set %s to %s\n", name, value); + +} + +static void restorekernvars(void) +{ + int i; + + for (i = 0; i < KVARS; i++) { + if (kv[i].name[0] != '\0') { + if (kernvar(O_WRONLY, kv[i].name, kv[i].value, + strlen(kv[i].value))) + fprintf(stderr, "could not restore %s to %s\n", + kv[i].name, kv[i].value); + } + } +} + +static inline void tsnorm(struct timespec *ts) +{ + while (ts->tv_nsec >= NSEC_PER_SEC) { + ts->tv_nsec -= NSEC_PER_SEC; + ts->tv_sec++; + } +} + +static inline int tsgreater(struct timespec *a, struct timespec *b) +{ + return ((a->tv_sec > b->tv_sec) || + (a->tv_sec == b->tv_sec && a->tv_nsec > b->tv_nsec)); +} + +static inline int64_t calcdiff_ns(struct timespec t1, struct timespec t2) +{ + struct timespec r; + + r.tv_sec = t1.tv_sec - t2.tv_sec; + r.tv_nsec = t1.tv_nsec - t2.tv_nsec; + if (r.tv_nsec < 0) { + r.tv_sec--; + r.tv_nsec += NSEC_PER_SEC; + } + + return r.tv_sec * NSEC_PER_SEC + r.tv_nsec; +} + +static inline int64_t calcdiff(struct timespec t1, struct timespec t2) +{ + return calcdiff_ns(t1, t2) / 1000; +} + +void traceopt(char *option) +{ + char *ptr; + if (traceopt_count + 1 > traceopt_size) { + traceopt_size += 16; + printf("expanding traceopt buffer to %d entries\n", traceopt_size); + traceptr = realloc(traceptr, sizeof(char*) * traceopt_size); + if (traceptr == NULL) + fatal ("Error allocating space for %d trace options\n", + traceopt_count+1); + } + ptr = malloc(strlen(option)+1); + if (ptr == NULL) + fatal("error allocating space for trace option %s\n", option); + printf("adding traceopt %s\n", option); + strcpy(ptr, option); + traceptr[traceopt_count++] = ptr; +} + +static int trace_file_exists(char *name) +{ + struct stat sbuf; + char *tracing_prefix = get_debugfileprefix(); + char path[MAX_PATH]; + strcat(strcpy(path, tracing_prefix), name); + return stat(path, &sbuf) ? 0 : 1; +} + +#define TRACEBUFSIZ 1024 +static __thread char tracebuf[TRACEBUFSIZ]; + +static void tracemark(char *fmt, ...) __attribute__((format(printf, 1, 2))); +static void tracemark(char *fmt, ...) +{ + va_list ap; + int len; + + /* bail out if we're not tracing */ + /* or if the kernel doesn't support trace_mark */ + if (tracemark_fd < 0) + return; + + va_start(ap, fmt); + len = vsnprintf(tracebuf, TRACEBUFSIZ, fmt, ap); + va_end(ap); + write_check(tracemark_fd, tracebuf, len); +} + + + +void tracing(int on) +{ + if (on) { + switch (kernelversion) { + case KV_26_LT18: gettimeofday(0,(struct timezone *)1); break; + case KV_26_LT24: prctl(0, 1); break; + case KV_26_33: + case KV_30: + write_check(trace_fd, "1", 1); + break; + default: break; + } + } else { + switch (kernelversion) { + case KV_26_LT18: gettimeofday(0,0); break; + case KV_26_LT24: prctl(0, 0); break; + case KV_26_33: + case KV_30: + write_check(trace_fd, "0", 1); + break; + default: break; + } + } +} + +static int settracer(char *tracer) +{ + if (valid_tracer(tracer)) { + setkernvar("current_tracer", tracer); + return 0; + } + return -1; +} + +static void setup_tracer(void) +{ + if (!tracelimit || notrace) + return; + + if (mount_debugfs(NULL)) + fatal("could not mount debugfs"); + + if (kernelversion >= KV_26_33) { + char testname[MAX_PATH]; + + fileprefix = get_debugfileprefix(); + if (!trace_file_exists("tracing_enabled") && + !trace_file_exists("tracing_on")) + warn("tracing_enabled or tracing_on not found\n" + "debug fs not mounted, " + "TRACERs not configured?\n", testname); + } else + fileprefix = procfileprefix; + + if (kernelversion >= KV_26_33) { + int ret; + + if (trace_file_exists("tracing_enabled") && + !trace_file_exists("tracing_on")) + setkernvar("tracing_enabled", "1"); + + /* ftrace_enabled is a sysctl variable */ + /* turn it on if you're doing anything but nop or event tracing */ + + fileprefix = procfileprefix; + if (tracetype) + setkernvar("ftrace_enabled", "1"); + else + setkernvar("ftrace_enabled", "0"); + fileprefix = get_debugfileprefix(); + + /* + * Set default tracer to nop. + * this also has the nice side effect of clearing out + * old traces. + */ + ret = settracer("nop"); + + switch (tracetype) { + case NOTRACE: + /* no tracer specified, use events */ + enable_events = 1; + break; + case FUNCTION: + ret = settracer("function"); + break; + case IRQSOFF: + ret = settracer("irqsoff"); + break; + case PREEMPTOFF: + ret = settracer("preemptoff"); + break; + case PREEMPTIRQSOFF: + ret = settracer("preemptirqsoff"); + break; + case CTXTSWITCH: + if (valid_tracer("sched_switch")) + ret = settracer("sched_switch"); + else { + if ((ret = event_enable("sched/sched_wakeup"))) + break; + ret = event_enable("sched/sched_switch"); + } + break; + case WAKEUP: + ret = settracer("wakeup"); + break; + case WAKEUPRT: + ret = settracer("wakeup_rt"); + break; + default: + if (strlen(tracer)) { + ret = settracer(tracer); + if (strcmp(tracer, "events") == 0 && ftrace) + ret = settracer(functiontracer); + } + else { + printf("cyclictest: unknown tracer!\n"); + ret = 0; + } + break; + } + + if (enable_events) + /* turn on all events */ + event_enable_all(); + + if (ret) + fprintf(stderr, "Requested tracer '%s' not available\n", tracer); + + setkernvar(traceroptions, "print-parent"); + setkernvar(traceroptions, "latency-format"); + if (verbose) { + setkernvar(traceroptions, "sym-offset"); + setkernvar(traceroptions, "sym-addr"); + setkernvar(traceroptions, "verbose"); + } else { + setkernvar(traceroptions, "nosym-offset"); + setkernvar(traceroptions, "nosym-addr"); + setkernvar(traceroptions, "noverbose"); + } + if (traceopt_count) { + int i; + for (i = 0; i < traceopt_count; i++) + setkernvar(traceroptions, traceptr[i]); + } + setkernvar("tracing_max_latency", "0"); + if (trace_file_exists("latency_hist")) + setkernvar("latency_hist/wakeup/reset", "1"); + + /* open the tracing on file descriptor */ + if (trace_fd == -1) { + char path[MAX_PATH]; + strcpy(path, fileprefix); + if (trace_file_exists("tracing_on")) + strcat(path, "tracing_on"); + else + strcat(path, "tracing_enabled"); + if ((trace_fd = open(path, O_WRONLY)) == -1) + fatal("unable to open %s for tracing", path); + } + + /* open the tracemark file descriptor */ + if (tracemark_fd == -1) { + char path[MAX_PATH]; + strcat(strcpy(path, fileprefix), "trace_marker"); + if ((tracemark_fd = open(path, O_WRONLY)) == -1) + warn("unable to open trace_marker file: %s\n", path); + } + + } else { + setkernvar("trace_all_cpus", "1"); + setkernvar("trace_freerunning", "1"); + setkernvar("trace_print_on_crash", "0"); + setkernvar("trace_user_triggered", "1"); + setkernvar("trace_user_trigger_irq", "-1"); + setkernvar("trace_verbose", "0"); + setkernvar("preempt_thresh", "0"); + setkernvar("wakeup_timing", "0"); + setkernvar("preempt_max_latency", "0"); + if (ftrace) + setkernvar("mcount_enabled", "1"); + setkernvar("trace_enabled", "1"); + setkernvar("latency_hist/wakeup_latency/reset", "1"); + } + + tracing(1); +} + +/* + * parse an input value as a base10 value followed by an optional + * suffix. The input value is presumed to be in seconds, unless + * followed by a modifier suffix: m=minutes, h=hours, d=days + * + * the return value is a value in seconds + */ +int parse_time_string(char *val) +{ + char *end; + int t = strtol(val, &end, 10); + if (end) { + switch (*end) { + case 'm': + case 'M': + t *= 60; + break; + + case 'h': + case 'H': + t *= 60*60; + break; + + case 'd': + case 'D': + t *= 24*60*60; + break; + + } + } + return t; +} + +/* + * Raise the soft priority limit up to prio, if that is less than or equal + * to the hard limit + * if a call fails, return the error + * if successful return 0 + * if fails, return -1 +*/ +static int raise_soft_prio(int policy, const struct sched_param *param) +{ + int err; + int policy_max; /* max for scheduling policy such as SCHED_FIFO */ + int soft_max; + int hard_max; + int prio; + struct rlimit rlim; + + prio = param->sched_priority; + + policy_max = sched_get_priority_max(policy); + if (policy_max == -1) { + err = errno; + err_msg("WARN: no such policy\n"); + return err; + } + + err = getrlimit(RLIMIT_RTPRIO, &rlim); + if (err) { + err = errno; + err_msg_n(err, "WARN: getrlimit failed"); + return err; + } + + soft_max = (rlim.rlim_cur == RLIM_INFINITY) ? policy_max : rlim.rlim_cur; + hard_max = (rlim.rlim_max == RLIM_INFINITY) ? policy_max : rlim.rlim_max; + + if (prio > soft_max && prio <= hard_max) { + rlim.rlim_cur = prio; + err = setrlimit(RLIMIT_RTPRIO, &rlim); + if (err) { + err = errno; + err_msg_n(err, "WARN: setrlimit failed"); + /* return err; */ + } + } else { + err = -1; + } + + return err; +} + +/* + * Check the error status of sched_setscheduler + * If an error can be corrected by raising the soft limit priority to + * a priority less than or equal to the hard limit, then do so. + */ +static int setscheduler(pid_t pid, int policy, const struct sched_param *param) +{ + int err = 0; + +try_again: + err = sched_setscheduler(pid, policy, param); + if (err) { + err = errno; + if (err == EPERM) { + int err1; + err1 = raise_soft_prio(policy, param); + if (!err1) goto try_again; + } + } + + return err; +} + +/* Work around lack of barriers in oldish uClibc-based toolchains. */ + +static struct thread_barrier { + pthread_mutex_t lock; + pthread_cond_t wait; + unsigned int count; +} align_barr, globalt_barr; + +static inline +void barrier_init(struct thread_barrier *__restrict barrier, + unsigned int count) +{ + pthread_mutex_init(&barrier->lock, NULL); + pthread_cond_init(&barrier->wait, NULL); + barrier->count = count; +} + +static inline void barrier_destroy(struct thread_barrier *barrier) +{ + pthread_mutex_destroy(&barrier->lock); + pthread_cond_destroy(&barrier->wait); +} + +static inline void barrier_wait(struct thread_barrier *barrier) +{ + pthread_mutex_lock(&barrier->lock); + + if (barrier->count > 0) { + barrier->count--; + pthread_cond_broadcast(&barrier->wait); + while (barrier->count > 0) + pthread_cond_wait(&barrier->wait, &barrier->lock); + } + + pthread_mutex_unlock(&barrier->lock); +} + +/* + * timer thread + * + * Modes: + * - clock_nanosleep based + * - cyclic timer based + * + * Clock: + * - CLOCK_MONOTONIC + * - CLOCK_REALTIME + * + */ +void *timerthread(void *param) +{ + struct thread_param *par = param; + struct sched_param schedp; + struct sigevent sigev; + sigset_t sigset; + timer_t timer; + struct timespec now, next, interval, stop; + struct itimerval itimer; + struct itimerspec tspec; + struct thread_stat *stat = par->stats; + int stopped = 0; + cpu_set_t mask; + pthread_t thread; + + /* if we're running in numa mode, set our memory node */ + if (par->node != -1) + rt_numa_set_numa_run_on_node(par->node, par->cpu); + + if (par->cpu != -1) { + CPU_ZERO(&mask); + CPU_SET(par->cpu, &mask); + thread = pthread_self(); + if(pthread_setaffinity_np(thread, sizeof(mask), &mask) == -1) + warn("Could not set CPU affinity to CPU #%d\n", par->cpu); + } + + interval.tv_sec = par->interval / USEC_PER_SEC; + interval.tv_nsec = (par->interval % USEC_PER_SEC) * 1000; + + stat->tid = gettid(); + + sigemptyset(&sigset); + sigaddset(&sigset, par->signal); + sigprocmask(SIG_BLOCK, &sigset, NULL); + + if (par->mode == MODE_CYCLIC) { + sigev.sigev_notify = SIGEV_THREAD_ID | SIGEV_SIGNAL; + sigev.sigev_signo = par->signal; + sigev.sigev_notify_thread_id = stat->tid; + timer_create(par->clock, &sigev, &timer); + tspec.it_interval = interval; + } + + memset(&schedp, 0, sizeof(schedp)); + schedp.sched_priority = par->prio; + if (pthread_setschedparam(pthread_self(), par->policy, &schedp)) + fatal("timerthread%d: failed to set priority to %d\n", par->cpu, par->prio); + + /* Get current time */ + if (aligned || secaligned) { + barrier_wait(&globalt_barr); + if (par->tnum == 0) { + clock_gettime(par->clock, &globalt); + if (secaligned) { + /* Ensure that the thread start timestamp is not + in the past */ + if (globalt.tv_nsec > 900000000) + globalt.tv_sec += 2; + else + globalt.tv_sec++; + globalt.tv_nsec = 0; + } + } + barrier_wait(&align_barr); + now = globalt; + if(offset) { + if (aligned) + now.tv_nsec += offset * par->tnum; + else + now.tv_nsec += offset; + tsnorm(&now); + } + } + else + clock_gettime(par->clock, &now); + + next = now; + next.tv_sec += interval.tv_sec; + next.tv_nsec += interval.tv_nsec; + tsnorm(&next); + + memset(&stop, 0, sizeof(stop)); /* grrr */ + + if (duration) { + stop = now; + stop.tv_sec += duration; + } + if (par->mode == MODE_CYCLIC) { + if (par->timermode == TIMER_ABSTIME) + tspec.it_value = next; + else { + tspec.it_value = interval; + } + timer_settime(timer, par->timermode, &tspec, NULL); + } + + if (par->mode == MODE_SYS_ITIMER) { + itimer.it_interval.tv_sec = interval.tv_sec; + itimer.it_interval.tv_usec = interval.tv_nsec / 1000; + itimer.it_value = itimer.it_interval; + setitimer (ITIMER_REAL, &itimer, NULL); + } + + stat->threadstarted++; + +#ifdef CONFIG_XENO_COBALT + if (pthread_setmode_np(0, PTHREAD_WARNSW, NULL)) + fatal("pthread_setmode_np()"); +#endif + while (!shutdown) { + + uint64_t diff; + int sigs, ret; + + /* Wait for next period */ + switch (par->mode) { + case MODE_CYCLIC: + case MODE_SYS_ITIMER: + if (sigwait(&sigset, &sigs) < 0) + goto out; + break; + + case MODE_CLOCK_NANOSLEEP: + if (par->timermode == TIMER_ABSTIME) { + if ((ret = clock_nanosleep(par->clock, TIMER_ABSTIME, &next, NULL))) { + if (ret != EINTR) + warn("clock_nanosleep failed. errno: %d\n", errno); + goto out; + } + } else { + if ((ret = clock_gettime(par->clock, &now))) { + if (ret != EINTR) + warn("clock_gettime() failed: %s", strerror(errno)); + goto out; + } + if ((ret = clock_nanosleep(par->clock, TIMER_RELTIME, &interval, NULL))) { + if (ret != EINTR) + warn("clock_nanosleep() failed. errno: %d\n", errno); + goto out; + } + next.tv_sec = now.tv_sec + interval.tv_sec; + next.tv_nsec = now.tv_nsec + interval.tv_nsec; + tsnorm(&next); + } + break; + + case MODE_SYS_NANOSLEEP: + if ((ret = clock_gettime(par->clock, &now))) { + if (ret != EINTR) + warn("clock_gettime() failed: errno %d\n", errno); + goto out; + } + if (nanosleep(&interval, NULL)) { + if (errno != EINTR) + warn("nanosleep failed. errno: %d\n", errno); + goto out; + } + next.tv_sec = now.tv_sec + interval.tv_sec; + next.tv_nsec = now.tv_nsec + interval.tv_nsec; + tsnorm(&next); + break; + } + + if ((ret = clock_gettime(par->clock, &now))) { + if (ret != EINTR) + warn("clock_getttime() failed. errno: %d\n", errno); + goto out; + } + + if (use_nsecs) + diff = calcdiff_ns(now, next); + else + diff = calcdiff(now, next); + if (diff < stat->min) + stat->min = diff; + if (diff > stat->max) { + stat->max = diff; + if (refresh_on_max) + pthread_cond_signal(&refresh_on_max_cond); + } + stat->avg += (double) diff; + + if (duration && (calcdiff(now, stop) >= 0)) + shutdown++; + + if (!stopped && tracelimit && (diff > tracelimit)) { + stopped++; + tracemark("hit latency threshold (%llu > %d)", + (unsigned long long) diff, tracelimit); + tracing(0); + shutdown++; + pthread_mutex_lock(&break_thread_id_lock); + if (break_thread_id == 0) + break_thread_id = stat->tid; + break_thread_value = diff; + pthread_mutex_unlock(&break_thread_id_lock); + } + stat->act = diff; + + if (par->bufmsk) + stat->values[stat->cycles & par->bufmsk] = diff; + + /* Update the histogram */ + if (histogram) { + if (diff >= histogram) { + stat->hist_overflow++; + if (stat->num_outliers < histogram) + stat->outliers[stat->num_outliers++] = stat->cycles; + } + else + stat->hist_array[diff]++; + } + + stat->cycles++; + + next.tv_sec += interval.tv_sec; + next.tv_nsec += interval.tv_nsec; + if (par->mode == MODE_CYCLIC) { + int overrun_count = timer_getoverrun(timer); + next.tv_sec += overrun_count * interval.tv_sec; + next.tv_nsec += overrun_count * interval.tv_nsec; + } + tsnorm(&next); + + while (tsgreater(&now, &next)) { + next.tv_sec += interval.tv_sec; + next.tv_nsec += interval.tv_nsec; + tsnorm(&next); + } + + if (par->max_cycles && par->max_cycles == stat->cycles) + break; + } + +out: +#ifdef CONFIG_XENO_COBALT + if (pthread_setmode_np(PTHREAD_WARNSW, 0, NULL)) + fatal("pthread_setmode_np()"); +#endif + if (par->mode == MODE_CYCLIC) + timer_delete(timer); + + if (par->mode == MODE_SYS_ITIMER) { + itimer.it_value.tv_sec = 0; + itimer.it_value.tv_usec = 0; + itimer.it_interval.tv_sec = 0; + itimer.it_interval.tv_usec = 0; + setitimer (ITIMER_REAL, &itimer, NULL); + } + + /* switch to normal */ + schedp.sched_priority = 0; + sched_setscheduler(0, SCHED_OTHER, &schedp); + + stat->threadstarted = -1; + + return NULL; +} + + +/* Print usage information */ +static void display_help(int error) +{ + char tracers[MAX_PATH]; + char *prefix; + + prefix = get_debugfileprefix(); + if (prefix[0] == '\0') + strcpy(tracers, "unavailable (debugfs not mounted)"); + else { + fileprefix = prefix; + if (kernvar(O_RDONLY, "available_tracers", tracers, sizeof(tracers))) + strcpy(tracers, "none"); + } + + printf("cyclictest V %1.2f\n", VERSION_STRING); + printf("Usage:\n" + "cyclictest <options>\n\n" +#if LIBNUMA_API_VERSION >= 2 + "-a [CPUSET] --affinity Run thread #N on processor #N, if possible, or if CPUSET\n" + " given, pin threads to that set of processors in round-\n" + " robin order. E.g. -a 2 pins all threads to CPU 2,\n" + " but -a 3-5,0 -t 5 will run the first and fifth\n" + " threads on CPU (0),thread #2 on CPU 3, thread #3\n" + " on CPU 4, and thread #5 on CPU 5.\n" +#else + "-a [NUM] --affinity run thread #N on processor #N, if possible\n" + " with NUM pin all threads to the processor NUM\n" +#endif + "-A USEC --aligned=USEC align thread wakeups to a specific offset\n" + "-b USEC --breaktrace=USEC send break trace command when latency > USEC\n" + "-B --preemptirqs both preempt and irqsoff tracing (used with -b)\n" + "-c CLOCK --clock=CLOCK select clock\n" + " 0 = CLOCK_MONOTONIC (default)\n" + " 1 = CLOCK_REALTIME\n" + "-C --context context switch tracing (used with -b)\n" + "-d DIST --distance=DIST distance of thread intervals in us default=500\n" + "-D --duration=t specify a length for the test run\n" + " default is in seconds, but 'm', 'h', or 'd' maybe added\n" + " to modify value to minutes, hours or days\n" + " --latency=PM_QOS write PM_QOS to /dev/cpu_dma_latency\n" + "-E --event event tracing (used with -b)\n" + "-f --ftrace function trace (when -b is active)\n" + "-F --fifo=<path> create a named pipe at path and write stats to it\n" + "-h --histogram=US dump a latency histogram to stdout after the run\n" + " (with same priority about many threads)\n" + " US is the max time to be be tracked in microseconds\n" + "-H --histofall=US same as -h except with an additional summary column\n" + "-i INTV --interval=INTV base interval of thread in us default=1000\n" + "-I --irqsoff Irqsoff tracing (used with -b)\n" + "-l LOOPS --loops=LOOPS number of loops: default=0(endless)\n" + " --laptop Save battery when running cyclictest\n" + " This will give you poorer realtime results\n" + " but will not drain your battery so quickly\n" + "-m --mlockall lock current and future memory allocations\n" + "-M --refresh_on_max delay updating the screen until a new max latency is hit\n" + "-n --nanosleep use clock_nanosleep\n" + " --notrace suppress tracing\n" + "-N --nsecs print results in ns instead of us (default us)\n" + "-o RED --oscope=RED oscilloscope mode, reduce verbose output by RED\n" + "-O TOPT --traceopt=TOPT trace option\n" + "-p PRIO --prio=PRIO priority of highest prio thread (defaults to 99)\n" + "-P --preemptoff Preempt off tracing (used with -b)\n" + "-q --quiet print only a summary on exit\n" + " --priospread spread priority levels starting at specified value\n" + "-r --relative use relative timer instead of absolute\n" + "-R --resolution check clock resolution, calling clock_gettime() many\n" + " times. list of clock_gettime() values will be\n" + " reported with -X\n" + " --secaligned [USEC] align thread wakeups to the next full second,\n" + " and apply the optional offset\n" + "-s --system use sys_nanosleep and sys_setitimer\n" + "-S --smp Standard SMP testing: options -a -t -n and\n" + " same priority of all threads\n" + "-t --threads one thread per available processor\n" + "-t [NUM] --threads=NUM number of threads:\n" + " without NUM, threads = max_cpus\n" + " without -t default = 1\n" + "-T TRACE --tracer=TRACER set tracing function\n" + " configured tracers: %s\n" + "-u --unbuffered force unbuffered output for live processing\n" +#ifdef NUMA + "-U --numa Standard NUMA testing (similar to SMP option)\n" + " thread data structures allocated from local node\n" +#endif + "-v --verbose output values on stdout for statistics\n" + " format: n:c:v n=tasknum c=count v=value in us\n" + "-w --wakeup task wakeup tracing (used with -b)\n" + "-W --wakeuprt rt task wakeup tracing (used with -b)\n" + " --dbg_cyclictest print info useful for debugging cyclictest\n" + " --policy=POLI policy of realtime thread, POLI may be fifo(default) or rr\n" + " format: --policy=fifo(default) or --policy=rr\n", + tracers + ); + if (error) + exit(EXIT_FAILURE); + exit(EXIT_SUCCESS); +} + +void application_usage(void) +{ + display_help(0); +} + +static int use_nanosleep; +static int timermode = TIMER_ABSTIME; +static int use_system; +static int priority = 99; +static int policy = SCHED_FIFO; /* default policy if not specified */ +static int num_threads = 1; +static int max_cycles; +static int clocksel = 0; +static int quiet; +static int interval = DEFAULT_INTERVAL; +static int distance = -1; +static struct bitmask *affinity_mask = NULL; +static int smp = 0; + +enum { + AFFINITY_UNSPECIFIED, + AFFINITY_SPECIFIED, + AFFINITY_USEALL +}; +static int setaffinity = AFFINITY_UNSPECIFIED; + +static int clocksources[] = { + CLOCK_MONOTONIC, + CLOCK_REALTIME, +}; + +static unsigned int is_cpumask_zero(const struct bitmask *mask) +{ + return (rt_numa_bitmask_count(mask) == 0); +} + +static int cpu_for_thread(int thread_num, int max_cpus) +{ + unsigned int m, cpu, i, num_cpus; + num_cpus = rt_numa_bitmask_count(affinity_mask); + + m = thread_num % num_cpus; + + /* there are num_cpus bits set, we want position of m'th one */ + for (i = 0, cpu = 0; i < max_cpus; i++) { + if (rt_numa_bitmask_isbitset(affinity_mask, i)) { + if (cpu == m) + return i; + cpu++; + } + } + fprintf(stderr, "Bug in cpu mask handling code.\n"); + return 0; +} + + +static void parse_cpumask(const char *option, const int max_cpus) +{ + affinity_mask = rt_numa_parse_cpustring(option, max_cpus); + if (affinity_mask) { + if (is_cpumask_zero(affinity_mask)) { + rt_bitmask_free(affinity_mask); + affinity_mask = NULL; + } + } + if (!affinity_mask) + display_help(1); + + if (verbose) { + printf("%s: Using %u cpus.\n", __func__, + rt_numa_bitmask_count(affinity_mask)); + } +} + + +static void handlepolicy(char *polname) +{ + if (strncasecmp(polname, "other", 5) == 0) + policy = SCHED_OTHER; + else if (strncasecmp(polname, "batch", 5) == 0) + policy = SCHED_BATCH; + else if (strncasecmp(polname, "idle", 4) == 0) + policy = SCHED_IDLE; + else if (strncasecmp(polname, "fifo", 4) == 0) + policy = SCHED_FIFO; + else if (strncasecmp(polname, "rr", 2) == 0) + policy = SCHED_RR; + else /* default policy if we don't recognize the request */ + policy = SCHED_OTHER; +} + +static char *policyname(int policy) +{ + char *policystr = ""; + + switch(policy) { + case SCHED_OTHER: + policystr = "other"; + break; + case SCHED_FIFO: + policystr = "fifo"; + break; + case SCHED_RR: + policystr = "rr"; + break; + case SCHED_BATCH: + policystr = "batch"; + break; + case SCHED_IDLE: + policystr = "idle"; + break; + } + return policystr; +} + + +enum option_values { + OPT_AFFINITY=1, OPT_NOTRACE, OPT_BREAKTRACE, OPT_PREEMPTIRQ, OPT_CLOCK, + OPT_CONTEXT, OPT_DISTANCE, OPT_DURATION, OPT_LATENCY, OPT_EVENT, + OPT_FTRACE, OPT_FIFO, OPT_HISTOGRAM, OPT_HISTOFALL, OPT_INTERVAL, + OPT_IRQSOFF, OPT_LOOPS, OPT_MLOCKALL, OPT_REFRESH, OPT_NANOSLEEP, + OPT_NSECS, OPT_OSCOPE, OPT_TRACEOPT, OPT_PRIORITY, OPT_PREEMPTOFF, + OPT_QUIET, OPT_PRIOSPREAD, OPT_RELATIVE, OPT_RESOLUTION, OPT_SYSTEM, + OPT_SMP, OPT_THREADS, OPT_TRACER, OPT_UNBUFFERED, OPT_NUMA, OPT_VERBOSE, + OPT_WAKEUP, OPT_WAKEUPRT, OPT_DBGCYCLIC, OPT_POLICY, OPT_HELP, OPT_NUMOPTS, + OPT_ALIGNED, OPT_LAPTOP, OPT_SECALIGNED, +}; + +/* Process commandline options */ +static void process_options (int argc, char *argv[], int max_cpus) +{ + int error = 0; + int option_affinity = 0; + + for (;;) { + int option_index = 0; + /* + * Options for getopt + * Ordered alphabetically by single letter name + */ + static struct option long_options[] = { + {"affinity", optional_argument, NULL, OPT_AFFINITY}, + {"notrace", no_argument, NULL, OPT_NOTRACE }, + {"aligned", optional_argument, NULL, OPT_ALIGNED }, + {"breaktrace", required_argument, NULL, OPT_BREAKTRACE }, + {"preemptirqs", no_argument, NULL, OPT_PREEMPTIRQ }, + {"clock", required_argument, NULL, OPT_CLOCK }, + {"context", no_argument, NULL, OPT_CONTEXT }, + {"distance", required_argument, NULL, OPT_DISTANCE }, + {"duration", required_argument, NULL, OPT_DURATION }, + {"latency", required_argument, NULL, OPT_LATENCY }, + {"event", no_argument, NULL, OPT_EVENT }, + {"ftrace", no_argument, NULL, OPT_FTRACE }, + {"fifo", required_argument, NULL, OPT_FIFO }, + {"histogram", required_argument, NULL, OPT_HISTOGRAM }, + {"histofall", required_argument, NULL, OPT_HISTOFALL }, + {"interval", required_argument, NULL, OPT_INTERVAL }, + {"irqsoff", no_argument, NULL, OPT_IRQSOFF }, + {"laptop", no_argument, NULL, OPT_LAPTOP }, + {"loops", required_argument, NULL, OPT_LOOPS }, + {"mlockall", no_argument, NULL, OPT_MLOCKALL }, + {"refresh_on_max", no_argument, NULL, OPT_REFRESH }, + {"nanosleep", no_argument, NULL, OPT_NANOSLEEP }, + {"nsecs", no_argument, NULL, OPT_NSECS }, + {"oscope", required_argument, NULL, OPT_OSCOPE }, + {"traceopt", required_argument, NULL, OPT_TRACEOPT }, + {"priority", required_argument, NULL, OPT_PRIORITY }, + {"preemptoff", no_argument, NULL, OPT_PREEMPTOFF }, + {"quiet", no_argument, NULL, OPT_QUIET }, + {"priospread", no_argument, NULL, OPT_PRIOSPREAD }, + {"relative", no_argument, NULL, OPT_RELATIVE }, + {"resolution", no_argument, NULL, OPT_RESOLUTION }, + {"secaligned", optional_argument, NULL, OPT_SECALIGNED }, + {"system", no_argument, NULL, OPT_SYSTEM }, + {"smp", no_argument, NULL, OPT_SMP }, + {"threads", optional_argument, NULL, OPT_THREADS }, + {"tracer", required_argument, NULL, OPT_TRACER }, + {"unbuffered", no_argument, NULL, OPT_UNBUFFERED }, + {"numa", no_argument, NULL, OPT_NUMA }, + {"verbose", no_argument, NULL, OPT_VERBOSE }, + {"wakeup", no_argument, NULL, OPT_WAKEUP }, + {"wakeuprt", no_argument, NULL, OPT_WAKEUPRT }, + {"dbg_cyclictest", no_argument, NULL, OPT_DBGCYCLIC }, + {"policy", required_argument, NULL, OPT_POLICY }, + {"help", no_argument, NULL, OPT_HELP }, + {NULL, 0, NULL, 0} + }; + int c = getopt_long(argc, argv, "a::A::b:Bc:Cd:D:Efh:H:i:Il:MnNo:O:p:PmqrRsSt::uUvD:wWT:", + long_options, &option_index); + if (c == -1) + break; + switch (c) { + case 'a': + case OPT_AFFINITY: + option_affinity = 1; + if (smp || numa) + break; + if (optarg != NULL) { + parse_cpumask(optarg, max_cpus); + setaffinity = AFFINITY_SPECIFIED; + } else if (optind<argc && atoi(argv[optind])) { + parse_cpumask(argv[optind], max_cpus); + setaffinity = AFFINITY_SPECIFIED; + } else { + setaffinity = AFFINITY_USEALL; + } + break; + case 'A': + case OPT_ALIGNED: + aligned=1; + if (optarg != NULL) + offset = atoi(optarg) * 1000; + else if (optind<argc && atoi(argv[optind])) + offset = atoi(argv[optind]) * 1000; + else + offset = 0; + break; + case 'b': + case OPT_BREAKTRACE: + tracelimit = atoi(optarg); break; + case 'B': + case OPT_PREEMPTIRQ: + tracetype = PREEMPTIRQSOFF; break; + case 'c': + case OPT_CLOCK: + clocksel = atoi(optarg); break; + case 'C': + case OPT_CONTEXT: + tracetype = CTXTSWITCH; break; + case 'd': + case OPT_DISTANCE: + distance = atoi(optarg); break; + case 'D': + case OPT_DURATION: + duration = parse_time_string(optarg); break; + case 'E': + case OPT_EVENT: + enable_events = 1; break; + case 'f': + case OPT_FTRACE: + tracetype = FUNCTION; ftrace = 1; break; + case 'F': + case OPT_FIFO: + use_fifo = 1; + strncpy(fifopath, optarg, sizeof(fifopath) - 1); + break; + + case 'H': + case OPT_HISTOFALL: + histofall = 1; /* fall through */ + case 'h': + case OPT_HISTOGRAM: + histogram = atoi(optarg); break; + case 'i': + case OPT_INTERVAL: + interval = atoi(optarg); break; + case 'I': + case OPT_IRQSOFF: + if (tracetype == PREEMPTOFF) { + tracetype = PREEMPTIRQSOFF; + strncpy(tracer, "preemptirqsoff", sizeof(tracer)); + } else { + tracetype = IRQSOFF; + strncpy(tracer, "irqsoff", sizeof(tracer)); + } + break; + case 'l': + case OPT_LOOPS: + max_cycles = atoi(optarg); break; + case 'm': + case OPT_MLOCKALL: + lockall = 1; break; + case 'M': + case OPT_REFRESH: + refresh_on_max = 1; break; + case 'n': + case OPT_NANOSLEEP: + use_nanosleep = MODE_CLOCK_NANOSLEEP; break; + case 'N': + case OPT_NSECS: + use_nsecs = 1; break; + case 'o': + case OPT_OSCOPE: + oscope_reduction = atoi(optarg); break; + case 'O': + case OPT_TRACEOPT: + traceopt(optarg); break; + case 'p': + case OPT_PRIORITY: + priority = atoi(optarg); + if (policy != SCHED_FIFO && policy != SCHED_RR) + policy = SCHED_FIFO; + break; + case 'P': + case OPT_PREEMPTOFF: + if (tracetype == IRQSOFF) { + tracetype = PREEMPTIRQSOFF; + strncpy(tracer, "preemptirqsoff", sizeof(tracer)); + } else { + tracetype = PREEMPTOFF; + strncpy(tracer, "preemptoff", sizeof(tracer)); + } + break; + case 'q': + case OPT_QUIET: + quiet = 1; break; + case 'r': + case OPT_RELATIVE: + timermode = TIMER_RELTIME; break; + case 'R': + case OPT_RESOLUTION: + check_clock_resolution = 1; break; + case 's': + case OPT_SECALIGNED: + secaligned = 1; + if (optarg != NULL) + offset = atoi(optarg) * 1000; + else if (optind < argc && atoi(argv[optind])) + offset = atoi(argv[optind]) * 1000; + else + offset = 0; + break; + case OPT_SYSTEM: + use_system = MODE_SYS_OFFSET; break; + case 'S': + case OPT_SMP: /* SMP testing */ + if (numa) + fatal("numa and smp options are mutually exclusive\n"); + smp = 1; + num_threads = max_cpus; + setaffinity = AFFINITY_USEALL; + use_nanosleep = MODE_CLOCK_NANOSLEEP; + break; + case 't': + case OPT_THREADS: + if (smp) { + warn("-t ignored due to --smp\n"); + break; + } + if (optarg != NULL) + num_threads = atoi(optarg); + else if (optind<argc && atoi(argv[optind])) + num_threads = atoi(argv[optind]); + else + num_threads = max_cpus; + break; + case 'T': + case OPT_TRACER: + tracetype = CUSTOM; + strncpy(tracer, optarg, sizeof(tracer) - 1); + break; + case 'u': + case OPT_UNBUFFERED: + setvbuf(stdout, NULL, _IONBF, 0); break; + case 'U': + case OPT_NUMA: /* NUMA testing */ + if (smp) + fatal("numa and smp options are mutually exclusive\n"); +#ifdef NUMA + if (numa_available() == -1) + fatal("NUMA functionality not available!"); + numa = 1; + num_threads = max_cpus; + setaffinity = AFFINITY_USEALL; + use_nanosleep = MODE_CLOCK_NANOSLEEP; +#else + warn("cyclictest was not built with the numa option\n"); + warn("ignoring --numa or -U\n"); +#endif + break; + case 'v': + case OPT_VERBOSE: verbose = 1; break; + case 'w': + case OPT_WAKEUP: + tracetype = WAKEUP; break; + case 'W': + case OPT_WAKEUPRT: + tracetype = WAKEUPRT; break; + case '?': + case OPT_HELP: + display_help(0); break; + + /* long only options */ + case OPT_PRIOSPREAD: + priospread = 1; break; + case OPT_LATENCY: + /* power management latency target value */ + /* note: default is 0 (zero) */ + latency_target_value = atoi(optarg); + if (latency_target_value < 0) + latency_target_value = 0; + break; + case OPT_NOTRACE: + notrace = 1; break; + case OPT_POLICY: + handlepolicy(optarg); break; + case OPT_DBGCYCLIC: + ct_debug = 1; break; + case OPT_LAPTOP: + laptop = 1; break; + } + } + + if (option_affinity) { + if (smp) { + warn("-a ignored due to --smp\n"); + } else if (numa) { + warn("-a ignored due to --numa\n"); + } + } + + if (tracelimit) + fileprefix = procfileprefix; + + if (clocksel < 0 || clocksel > ARRAY_SIZE(clocksources)) + error = 1; + + if (oscope_reduction < 1) + error = 1; + + if (oscope_reduction > 1 && !verbose) { + warn("-o option only meaningful, if verbose\n"); + error = 1; + } + + if (histogram < 0) + error = 1; + + if (histogram > HIST_MAX) + histogram = HIST_MAX; + + if (histogram && distance != -1) + warn("distance is ignored and set to 0, if histogram enabled\n"); + if (distance == -1) + distance = DEFAULT_DISTANCE; + + if (priority < 0 || priority > 99) + error = 1; + + if (priospread && priority == 0) { + fprintf(stderr, "defaulting realtime priority to %d\n", + num_threads+1); + priority = num_threads+1; + } + + if (priority && (policy != SCHED_FIFO && policy != SCHED_RR)) { + fprintf(stderr, "policy and priority don't match: setting policy to SCHED_FIFO\n"); + policy = SCHED_FIFO; + } + + if ((policy == SCHED_FIFO || policy == SCHED_RR) && priority == 0) { + fprintf(stderr, "defaulting realtime priority to %d\n", + num_threads+1); + priority = num_threads+1; + } + + if (num_threads < 1) + error = 1; + + if (aligned && secaligned) + error = 1; + + if (aligned || secaligned) { + barrier_init(&globalt_barr, num_threads); + barrier_init(&align_barr, num_threads); + } + + if (error) { + if (affinity_mask) + rt_bitmask_free(affinity_mask); + display_help(1); + } +} + +static int check_kernel(void) +{ + struct utsname kname; + int maj, min, sub, kv, ret; + + ret = uname(&kname); + if (ret) { + fprintf(stderr, "uname failed: %s. Assuming not 2.6\n", + strerror(errno)); + return KV_NOT_SUPPORTED; + } + sscanf(kname.release, "%d.%d.%d", &maj, &min, &sub); + if (maj == 2 && min == 6) { + if (sub < 18) + kv = KV_26_LT18; + else if (sub < 24) + kv = KV_26_LT24; + else if (sub < 28) { + kv = KV_26_33; + strcpy(functiontracer, "ftrace"); + strcpy(traceroptions, "iter_ctrl"); + } else { + kv = KV_26_33; + strcpy(functiontracer, "function"); + strcpy(traceroptions, "trace_options"); + } + } else if (maj >= 3) { + kv = KV_30; + strcpy(functiontracer, "function"); + strcpy(traceroptions, "trace_options"); + + } else + kv = KV_NOT_SUPPORTED; + + return kv; +} + +static int check_timer(void) +{ + struct timespec ts; + + if (clock_getres(CLOCK_MONOTONIC, &ts)) + return 1; + + return (ts.tv_sec != 0 || ts.tv_nsec != 1); +} + +static void sighand(int sig) +{ + if (sig == SIGUSR1) { + int i; + int oldquiet = quiet; + + quiet = 0; + fprintf(stderr, "#---------------------------\n"); + fprintf(stderr, "# cyclictest current status:\n"); + for (i = 0; i < num_threads; i++) + print_stat(stderr, parameters[i], i, 0, 0); + fprintf(stderr, "#---------------------------\n"); + quiet = oldquiet; + return; + } + shutdown = 1; + if (refresh_on_max) + pthread_cond_signal(&refresh_on_max_cond); + if (tracelimit && !notrace) + tracing(0); +} + +static void print_tids(struct thread_param *par[], int nthreads) +{ + int i; + + printf("# Thread Ids:"); + for (i = 0; i < nthreads; i++) + printf(" %05d", par[i]->stats->tid); + printf("\n"); +} + +static void print_hist(struct thread_param *par[], int nthreads) +{ + int i, j; + unsigned long long int log_entries[nthreads+1]; + unsigned long maxmax, alloverflows; + + bzero(log_entries, sizeof(log_entries)); + + printf("# Histogram\n"); + for (i = 0; i < histogram; i++) { + unsigned long long int allthreads = 0; + + printf("%06d ", i); + + for (j = 0; j < nthreads; j++) { + unsigned long curr_latency=par[j]->stats->hist_array[i]; + printf("%06lu", curr_latency); + if (j < nthreads - 1) + printf("\t"); + log_entries[j] += curr_latency; + allthreads += curr_latency; + } + if (histofall && nthreads > 1) { + printf("\t%06llu", allthreads); + log_entries[nthreads] += allthreads; + } + printf("\n"); + } + printf("# Total:"); + for (j = 0; j < nthreads; j++) + printf(" %09llu", log_entries[j]); + if (histofall && nthreads > 1) + printf(" %09llu", log_entries[nthreads]); + printf("\n"); + printf("# Min Latencies:"); + for (j = 0; j < nthreads; j++) + printf(" %05lu", par[j]->stats->min); + printf("\n"); + printf("# Avg Latencies:"); + for (j = 0; j < nthreads; j++) + printf(" %05lu", par[j]->stats->cycles ? + (long)(par[j]->stats->avg/par[j]->stats->cycles) : 0); + printf("\n"); + printf("# Max Latencies:"); + maxmax = 0; + for (j = 0; j < nthreads; j++) { + printf(" %05lu", par[j]->stats->max); + if (par[j]->stats->max > maxmax) + maxmax = par[j]->stats->max; + } + if (histofall && nthreads > 1) + printf(" %05lu", maxmax); + printf("\n"); + printf("# Histogram Overflows:"); + alloverflows = 0; + for (j = 0; j < nthreads; j++) { + printf(" %05lu", par[j]->stats->hist_overflow); + alloverflows += par[j]->stats->hist_overflow; + } + if (histofall && nthreads > 1) + printf(" %05lu", alloverflows); + printf("\n"); + + printf("# Histogram Overflow at cycle number:\n"); + for (i = 0; i < nthreads; i++) { + printf("# Thread %d:", i); + for (j = 0; j < par[i]->stats->num_outliers; j++) + printf(" %05lu", par[i]->stats->outliers[j]); + if (par[i]->stats->num_outliers < par[i]->stats->hist_overflow) + printf(" # %05lu others", par[i]->stats->hist_overflow - par[i]->stats->num_outliers); + printf("\n"); + } + printf("\n"); +} + +static void print_stat(FILE *fp, struct thread_param *par, int index, int verbose, int quiet) +{ + struct thread_stat *stat = par->stats; + + if (!verbose) { + if (quiet != 1) { + char *fmt; + if (use_nsecs) + fmt = "T:%2d (%5d) P:%2d I:%ld C:%7lu " + "Min:%7ld Act:%8ld Avg:%8ld Max:%8ld\n"; + else + fmt = "T:%2d (%5d) P:%2d I:%ld C:%7lu " + "Min:%7ld Act:%5ld Avg:%5ld Max:%8ld\n"; + fprintf(fp, fmt, index, stat->tid, par->prio, + par->interval, stat->cycles, stat->min, stat->act, + stat->cycles ? + (long)(stat->avg/stat->cycles) : 0, stat->max); + } + } else { + while (stat->cycles != stat->cyclesread) { + long diff = stat->values + [stat->cyclesread & par->bufmsk]; + + if (diff > stat->redmax) { + stat->redmax = diff; + stat->cycleofmax = stat->cyclesread; + } + if (++stat->reduce == oscope_reduction) { + fprintf(fp, "%8d:%8lu:%8ld\n", index, + stat->cycleofmax, stat->redmax); + stat->reduce = 0; + stat->redmax = 0; + } + stat->cyclesread++; + } + } +} + + +/* + * thread that creates a named fifo and hands out run stats when someone + * reads from the fifo. + */ +void *fifothread(void *param) +{ + int ret; + int fd; + FILE *fp; + int i; + + if (use_fifo == 0) + return NULL; + + unlink(fifopath); + ret = mkfifo(fifopath, 0666); + if (ret) { + fprintf(stderr, "Error creating fifo %s: %s\n", fifopath, strerror(errno)); + return NULL; + } + while (!shutdown) { + fd = open(fifopath, O_WRONLY|O_NONBLOCK); + if (fd < 0) { + usleep(500000); + continue; + } + fp = fdopen(fd, "w"); + for (i=0; i < num_threads; i++) + print_stat(fp, parameters[i], i, 0, 0); + fclose(fp); + usleep(250); + } + unlink(fifopath); + return NULL; +} + +#ifdef CONFIG_XENO_COBALT + +static const char *reason_str[] = { + [SIGDEBUG_UNDEFINED] = "received SIGDEBUG for unknown reason", + [SIGDEBUG_MIGRATE_SIGNAL] = "received signal", + [SIGDEBUG_MIGRATE_SYSCALL] = "invoked syscall", + [SIGDEBUG_MIGRATE_FAULT] = "triggered fault", + [SIGDEBUG_MIGRATE_PRIOINV] = "affected by priority inversion", + [SIGDEBUG_NOMLOCK] = "process memory not locked", + [SIGDEBUG_WATCHDOG] = "watchdog triggered (period too short?)", + [SIGDEBUG_LOCK_BREAK] = "scheduler lock break", +}; + +static void sigdebug(int sig, siginfo_t *si, void *context) +{ + const char fmt[] = "%s, aborting.\n" + "(enabling CONFIG_XENO_OPT_DEBUG_TRACE_RELAX may help)\n"; + unsigned int reason = sigdebug_reason(si); + int n __attribute__ ((unused)); + static char buffer[256]; + + if (reason > SIGDEBUG_WATCHDOG) + reason = SIGDEBUG_UNDEFINED; + + switch(reason) { + case SIGDEBUG_UNDEFINED: + case SIGDEBUG_NOMLOCK: + case SIGDEBUG_WATCHDOG: + n = snprintf(buffer, sizeof(buffer), "latency: %s\n", + reason_str[reason]); + write_check(STDERR_FILENO, buffer, n); + exit(EXIT_FAILURE); + } + + n = snprintf(buffer, sizeof(buffer), fmt, reason_str[reason]); + write_check(STDERR_FILENO, buffer, n); + signal(sig, SIG_DFL); + kill(getpid(), sig); +} + +#endif + +int main(int argc, char **argv) +{ + struct sigaction sa __attribute__((unused)); + sigset_t sigset; + int signum = SIGALRM; + int mode; + int max_cpus = sysconf(_SC_NPROCESSORS_ONLN); + int i, ret = -1; + int status; + + process_options(argc, argv, max_cpus); + + if (check_privs()) + exit(EXIT_FAILURE); + + if (verbose) + printf("Max CPUs = %d\n", max_cpus); + + /* Checks if numa is on, program exits if numa on but not available */ + numa_on_and_available(); + + /* lock all memory (prevent swapping) */ + if (lockall) + if (mlockall(MCL_CURRENT|MCL_FUTURE) == -1) { + perror("mlockall"); + goto out; + } + + /* use the /dev/cpu_dma_latency trick if it's there */ + set_latency_target(); + + kernelversion = check_kernel(); + + if (kernelversion == KV_NOT_SUPPORTED) + warn("Running on unknown kernel version...YMMV\n"); + + setup_tracer(); + + if (check_timer()) + warn("High resolution timers not available\n"); + + if (check_clock_resolution) { + int clock; + uint64_t diff; + int k; + uint64_t min_non_zero_diff = UINT64_MAX; + struct timespec now; + struct timespec prev; + uint64_t reported_resolution = UINT64_MAX; + struct timespec res; + struct timespec *time; + int times; + + clock = clocksources[clocksel]; + + if (clock_getres(clock, &res)) { + warn("clock_getres failed"); + } else { + reported_resolution = (NSEC_PER_SEC * res.tv_sec) + res.tv_nsec; + } + + + /* + * Calculate how many calls to clock_gettime are needed. + * Then call it that many times. + * Goal is to collect timestamps for ~ 0.001 sec. + * This will reliably capture resolution <= 500 usec. + */ + times = 1000; + clock_gettime(clock, &prev); + for (k=0; k < times; k++) { + clock_gettime(clock, &now); + } + + diff = calcdiff_ns(now, prev); + if (diff == 0) { + /* + * No clock rollover occurred. + * Use the default value for times. + */ + times = -1; + } else { + int call_time; + call_time = diff / times; /* duration 1 call */ + times = NSEC_PER_SEC / call_time; /* calls per second */ + times /= 1000; /* calls per msec */ + if (times < 1000) + times = 1000; + } + /* sanity check */ + if ((times <= 0) || (times > 100000)) + times = 100000; + + time = calloc(times, sizeof(*time)); + + for (k=0; k < times; k++) { + clock_gettime(clock, &time[k]); + } + + if (ct_debug) { + info("For %d consecutive calls to clock_gettime():\n", times); + info("time, delta time (nsec)\n"); + } + + prev = time[0]; + for (k=1; k < times; k++) { + + diff = calcdiff_ns(time[k], prev); + prev = time[k]; + + if (diff && (diff < min_non_zero_diff)) { + min_non_zero_diff = diff; + } + + if (ct_debug) + info("%ld.%06ld %5llu\n", + time[k].tv_sec, time[k].tv_nsec, + (unsigned long long)diff); + } + + free(time); + + + if (verbose || + (min_non_zero_diff && (min_non_zero_diff > reported_resolution))) { + /* + * Measured clock resolution includes the time to call + * clock_gettime(), so it will be slightly larger than + * actual resolution. + */ + warn("reported clock resolution: %llu nsec\n", + (unsigned long long)reported_resolution); + warn("measured clock resolution approximately: %llu nsec\n", + (unsigned long long)min_non_zero_diff); + } + + } + + mode = use_nanosleep + use_system; + + sigemptyset(&sigset); + sigaddset(&sigset, signum); + sigprocmask (SIG_BLOCK, &sigset, NULL); + + signal(SIGINT, sighand); + signal(SIGTERM, sighand); + signal(SIGUSR1, sighand); +#ifdef CONFIG_XENO_COBALT + sigemptyset(&sa.sa_mask); + sa.sa_sigaction = sigdebug; + sa.sa_flags = SA_SIGINFO; + sigaction(SIGDEBUG, &sa, NULL); +#endif + + parameters = calloc(num_threads, sizeof(struct thread_param *)); + if (!parameters) + goto out; + statistics = calloc(num_threads, sizeof(struct thread_stat *)); + if (!statistics) + goto outpar; + + for (i = 0; i < num_threads; i++) { + pthread_attr_t attr; + int node; + struct thread_param *par; + struct thread_stat *stat; + + status = pthread_attr_init(&attr); + if (status != 0) + fatal("error from pthread_attr_init for thread %d: %s\n", i, strerror(status)); + + node = -1; + if (numa) { + void *stack; + void *currstk; + size_t stksize; + + /* find the memory node associated with the cpu i */ + node = rt_numa_numa_node_of_cpu(i); + + /* get the stack size set for for this thread */ + if (pthread_attr_getstack(&attr, &currstk, &stksize)) + fatal("failed to get stack size for thread %d\n", i); + + /* if the stack size is zero, set a default */ + if (stksize == 0) + stksize = PTHREAD_STACK_MIN * 2; + + /* allocate memory for a stack on appropriate node */ + stack = rt_numa_numa_alloc_onnode(stksize, node, i); + + /* set the thread's stack */ + if (pthread_attr_setstack(&attr, stack, stksize)) + fatal("failed to set stack addr for thread %d to 0x%x\n", + i, stack+stksize); + } + + /* allocate the thread's parameter block */ + parameters[i] = par = threadalloc(sizeof(struct thread_param), node); + if (par == NULL) + fatal("error allocating thread_param struct for thread %d\n", i); + memset(par, 0, sizeof(struct thread_param)); + + /* allocate the thread's statistics block */ + statistics[i] = stat = threadalloc(sizeof(struct thread_stat), node); + if (stat == NULL) + fatal("error allocating thread status struct for thread %d\n", i); + memset(stat, 0, sizeof(struct thread_stat)); + + /* allocate the histogram if requested */ + if (histogram) { + int bufsize = histogram * sizeof(long); + + stat->hist_array = threadalloc(bufsize, node); + stat->outliers = threadalloc(bufsize, node); + if (stat->hist_array == NULL || stat->outliers == NULL) + fatal("failed to allocate histogram of size %d on node %d\n", + histogram, i); + memset(stat->hist_array, 0, bufsize); + memset(stat->outliers, 0, bufsize); + } + + if (verbose) { + int bufsize = VALBUF_SIZE * sizeof(long); + stat->values = threadalloc(bufsize, node); + if (!stat->values) + goto outall; + memset(stat->values, 0, bufsize); + par->bufmsk = VALBUF_SIZE - 1; + } + + par->prio = priority; + if (priority && (policy == SCHED_FIFO || policy == SCHED_RR)) + par->policy = policy; + else { + par->policy = SCHED_OTHER; + force_sched_other = 1; + } + if (priospread) + priority--; + par->clock = clocksources[clocksel]; + par->mode = mode; + par->timermode = timermode; + par->signal = signum; + par->interval = interval; + if (!histogram) /* same interval on CPUs */ + interval += distance; + if (verbose) + printf("Thread %d Interval: %d\n", i, interval); + par->max_cycles = max_cycles; + par->stats = stat; + par->node = node; + par->tnum = i; + switch (setaffinity) { + case AFFINITY_UNSPECIFIED: par->cpu = -1; break; + case AFFINITY_SPECIFIED: + par->cpu = cpu_for_thread(i, max_cpus); + if (verbose) + printf("Thread %d using cpu %d.\n", i, + par->cpu); + break; + case AFFINITY_USEALL: par->cpu = i % max_cpus; break; + } + stat->min = 1000000; + stat->max = 0; + stat->avg = 0.0; + stat->threadstarted = 1; + status = pthread_create(&stat->thread, &attr, timerthread, par); + if (status) + fatal("failed to create thread %d: %s\n", i, strerror(status)); + + } + if (use_fifo) + status = pthread_create(&fifo_threadid, NULL, fifothread, NULL); + + while (!shutdown) { + char lavg[256]; + int fd, len, allstopped = 0; + static char *policystr = NULL; + static char *slash = NULL; + static char *policystr2; + + if (!policystr) + policystr = policyname(policy); + + if (!slash) { + if (force_sched_other) { + slash = "/"; + policystr2 = policyname(SCHED_OTHER); + } else + slash = policystr2 = ""; + } + if (!verbose && !quiet) { + fd = open("/proc/loadavg", O_RDONLY, 0666); + len = read(fd, &lavg, 255); + close(fd); + lavg[len-1] = 0x0; + printf("policy: %s%s%s: loadavg: %s \n\n", + policystr, slash, policystr2, lavg); + } + + for (i = 0; i < num_threads; i++) { + + print_stat(stdout, parameters[i], i, verbose, quiet); + if(max_cycles && statistics[i]->cycles >= max_cycles) + allstopped++; + } + + usleep(10000); + if (shutdown || allstopped) + break; + if (!verbose && !quiet) + printf("\033[%dA", num_threads + 2); + + if (refresh_on_max) { + pthread_mutex_lock(&refresh_on_max_lock); + pthread_cond_wait(&refresh_on_max_cond, + &refresh_on_max_lock); + pthread_mutex_unlock(&refresh_on_max_lock); + } + } + ret = EXIT_SUCCESS; + + outall: + shutdown = 1; + usleep(50000); + + if (quiet) + quiet = 2; + for (i = 0; i < num_threads; i++) { + if (statistics[i]->threadstarted > 0) + pthread_kill(statistics[i]->thread, SIGTERM); + if (statistics[i]->threadstarted) { + pthread_join(statistics[i]->thread, NULL); + if (quiet && !histogram) + print_stat(stdout, parameters[i], i, 0, 0); + } + if (statistics[i]->values) + threadfree(statistics[i]->values, VALBUF_SIZE*sizeof(long), parameters[i]->node); + } + + if (histogram) { + print_hist(parameters, num_threads); + for (i = 0; i < num_threads; i++) { + threadfree(statistics[i]->hist_array, histogram*sizeof(long), parameters[i]->node); + threadfree(statistics[i]->outliers, histogram*sizeof(long), parameters[i]->node); + } + } + + if (tracelimit) { + print_tids(parameters, num_threads); + if (break_thread_id) { + printf("# Break thread: %d\n", break_thread_id); + printf("# Break value: %llu\n", (unsigned long long)break_thread_value); + } + } + + + for (i=0; i < num_threads; i++) { + if (!statistics[i]) + continue; + threadfree(statistics[i], sizeof(struct thread_stat), parameters[i]->node); + } + + outpar: + for (i = 0; i < num_threads; i++) { + if (!parameters[i]) + continue; + threadfree(parameters[i], sizeof(struct thread_param), parameters[i]->node); + } + out: + /* ensure that the tracer is stopped */ + if (tracelimit && !notrace) + tracing(0); + + + /* close any tracer file descriptors */ + if (tracemark_fd >= 0) + close(tracemark_fd); + if (trace_fd >= 0) + close(trace_fd); + + if (enable_events) + /* turn off all events */ + event_disable_all(); + + /* turn off the function tracer */ + fileprefix = procfileprefix; + if (tracetype && !notrace) + setkernvar("ftrace_enabled", "0"); + fileprefix = get_debugfileprefix(); + + /* unlock everything */ + if (lockall) + munlockall(); + + /* Be a nice program, cleanup */ + if (kernelversion < KV_26_33) + restorekernvars(); + + /* close the latency_target_fd if it's open */ + if (latency_target_fd >= 0) + close(latency_target_fd); + + if (affinity_mask) + rt_bitmask_free(affinity_mask); + + exit(ret); +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c new file mode 100644 index 0000000..b32aa02 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2009 John Kacur <jkacur@redhat.com> + * + * error routines, similar to those found in + * Advanced Programming in the UNIX Environment 2nd ed. + */ +#include "error.h" + +/* Print an error message, plus a message for err and exit with error err */ +void err_exit(int err, char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + err_doit(err, fmt, ap); + va_end(ap); + exit(err); +} + +/* print an error message and return */ +void err_msg(char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + err_doit(0, fmt, ap); + va_end(ap); + return; +} + +/* Print an error message, plus a message for err, and return */ +void err_msg_n(int err, char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + err_doit(err, fmt, ap); + va_end(ap); + return; +} + +/* print an error message and quit */ +void err_quit(char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + err_doit(0, fmt, ap); + va_end(ap); + exit(1); +} + +void debug(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fputs("DEBUG: ", stderr); + err_doit(0, fmt, ap); + va_end(ap); +} + +void info(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fputs("INFO: ", stderr); + err_doit(0, fmt, ap); + va_end(ap); +} + +void warn(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fputs("WARN: ", stderr); + err_doit(0, fmt, ap); + va_end(ap); +} + +void fatal(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fputs("FATAL: ", stderr); + err_doit(0, fmt, ap); + va_end(ap); + exit(EXIT_FAILURE); +} + +void err_doit(int err, const char *fmt, va_list ap) +{ + vfprintf(stderr, fmt, ap); + if (err) + fprintf(stderr, ": %s\n", strerror(err)); + return; +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h new file mode 100644 index 0000000..ae05a2e --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h @@ -0,0 +1,19 @@ +#ifndef __ERROR_H +#define __ERROR_H + +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <string.h> + +void err_exit(int err, char *fmt, ...); +void err_msg(char *fmt, ...); +void err_msg_n(int err, char *fmt, ...); +void err_quit(char *fmt, ...); +void debug(char *fmt, ...); +void info(char *fmt, ...); +void warn(char *fmt, ...); +void fatal(char *fmt, ...); +void err_doit(int err, const char *fmt, va_list ap); + +#endif /* __ERROR_H */ diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h new file mode 100644 index 0000000..064e51c --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h @@ -0,0 +1,61 @@ +/* + rt-sched.h - sched_setattr() and sched_getattr() API + + (C) Dario Faggioli <raistlin@linux.it>, 2009, 2010 + Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner <daniel.wagner@bmw-carit.de + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + USA */ + +/* This file is based on Dario Faggioli's libdl. Eventually it will be + replaced by a proper implemenation of this API. */ + +#ifndef __RT_SCHED_H__ +#define __RT_SCHED_H__ + +#include <stdint.h> +#include <sys/types.h> + +#ifndef SCHED_DEADLINE +#define SCHED_DEADLINE 6 +#endif + +struct sched_attr { + uint32_t size; + uint32_t sched_policy; + uint64_t sched_flags; + + /* SCHED_NORMAL, SCHED_BATCH */ + int32_t sched_nice; + + /* SCHED_FIFO, SCHED_RR */ + uint32_t sched_priority; + + /* SCHED_DEADLINE */ + uint64_t sched_runtime; + uint64_t sched_deadline; + uint64_t sched_period; +}; + +int sched_setattr(pid_t pid, + const struct sched_attr *attr, + unsigned int flags); + +int sched_getattr(pid_t pid, + struct sched_attr *attr, + unsigned int size, + unsigned int flags); + +#endif /* __RT_SCHED_H__ */ diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c new file mode 100644 index 0000000..3882d23 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c @@ -0,0 +1,319 @@ +/* + * Copyright (C) 2009 Carsten Emde <carsten.emde@osadl.org> + * Copyright (C) 2010 Clark Williams <williams@redhat.com> + * + * based on functions from cyclictest that has + * (C) 2008-2009 Clark Williams <williams@redhat.com> + * (C) 2005-2007 Thomas Gleixner <tglx@linutronix.de> + */ +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sched.h> +#include <stdarg.h> +#include <errno.h> +#include <fcntl.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <sys/syscall.h> /* For SYS_gettid definitions */ +#include "rt-utils.h" +#include "rt-sched.h" +#include "error.h" + +static char debugfileprefix[MAX_PATH]; + +/* + * Finds the tracing directory in a mounted debugfs + */ +char *get_debugfileprefix(void) +{ + char type[100]; + FILE *fp; + int size; + int found = 0; + struct stat s; + + if (debugfileprefix[0] != '\0') + goto out; + + /* look in the "standard" mount point first */ + if ((stat("/sys/kernel/debug/tracing", &s) == 0) && S_ISDIR(s.st_mode)) { + strcpy(debugfileprefix, "/sys/kernel/debug/tracing/"); + goto out; + } + + /* now look in the "other standard" place */ + if ((stat("/debug/tracing", &s) == 0) && S_ISDIR(s.st_mode)) { + strcpy(debugfileprefix, "/debug/tracing/"); + goto out; + } + + /* oh well, parse /proc/mounts and see if it's there */ + if ((fp = fopen("/proc/mounts","r")) == NULL) + goto out; + + while (fscanf(fp, "%*s %" + STR(MAX_PATH) + "s %99s %*s %*d %*d\n", + debugfileprefix, type) == 2) { + if (strcmp(type, "debugfs") == 0) { + found = 1; + break; + } + /* stupid check for systemd-style autofs mount */ + if ((strcmp(debugfileprefix, "/sys/kernel/debug") == 0) && + (strcmp(type, "systemd") == 0)) { + found = 1; + break; + } + } + fclose(fp); + + if (!found) { + debugfileprefix[0] = '\0'; + goto out; + } + + size = sizeof(debugfileprefix) - strlen(debugfileprefix); + strncat(debugfileprefix, "/tracing/", size); + +out: + return debugfileprefix; +} + +int mount_debugfs(char *path) +{ + char *mountpoint = path; + char cmd[MAX_PATH]; + char *prefix; + int ret; + + /* if it's already mounted just return */ + prefix = get_debugfileprefix(); + if (strlen(prefix) != 0) { + info("debugfs mountpoint: %s\n", prefix); + return 0; + } + if (!mountpoint) + mountpoint = "/sys/kernel/debug"; + + sprintf(cmd, "mount -t debugfs debugfs %s", mountpoint); + ret = system(cmd); + if (ret != 0) { + fprintf(stderr, "Error mounting debugfs at %s: %s\n", mountpoint, strerror(errno)); + return -1; + } + return 0; + +} + +static char **tracer_list; +static char *tracer_buffer; +static int num_tracers; +#define CHUNKSZ 1024 + +/* + * return a list of the tracers configured into the running kernel + */ + +int get_tracers(char ***list) +{ + int ret; + FILE *fp; + char buffer[CHUNKSZ]; + char *prefix = get_debugfileprefix(); + char *tmpbuf = NULL; + char *ptr; + int tmpsz = 0; + + /* if we've already parse it, return what we have */ + if (tracer_list) { + *list = tracer_list; + return num_tracers; + } + + /* open the tracing file available_tracers */ + sprintf(buffer, "%savailable_tracers", prefix); + if ((fp = fopen(buffer, "r")) == NULL) + fatal ("Can't open %s for reading\n", buffer); + + /* allocate initial buffer */ + ptr = tmpbuf = malloc(CHUNKSZ); + if (ptr == NULL) + fatal("error allocating initial space for tracer list\n"); + + /* read in the list of available tracers */ + while((ret = fread(buffer, sizeof(char), CHUNKSZ, fp))) { + if ((ptr+ret+1) > (tmpbuf+tmpsz)) { + tmpbuf = realloc(tmpbuf, tmpsz + CHUNKSZ); + if (tmpbuf == NULL) + fatal("error allocating space for list of valid tracers\n"); + tmpsz += CHUNKSZ; + } + strncpy(ptr, buffer, ret); + ptr += ret; + } + fclose(fp); + if (tmpsz == 0) + fatal("error reading available tracers\n"); + + tracer_buffer = tmpbuf; + + /* get a buffer for the pointers to tracers */ + if (!(tracer_list = malloc(sizeof(char *)))) + fatal ("error allocatinging tracer list buffer\n"); + + /* parse the buffer */ + ptr = strtok(tmpbuf, " \t\n\r"); + do { + tracer_list[num_tracers++] = ptr; + tracer_list = realloc(tracer_list, sizeof(char*)*(num_tracers+1)); + tracer_list[num_tracers] = NULL; + } while ((ptr = strtok(NULL, " \t\n\r")) != NULL); + + /* return the list and number of tracers */ + *list = tracer_list; + return num_tracers; +} + + +/* + * return zero if tracername is not a valid tracer, non-zero if it is + */ + +int valid_tracer(char *tracername) +{ + char **list; + int ntracers; + int i; + + ntracers = get_tracers(&list); + if (ntracers == 0 || tracername == NULL) + return 0; + for (i = 0; i < ntracers; i++) + if (strncmp(list[i], tracername, strlen(list[i])) == 0) + return 1; + return 0; +} + +/* + * enable event tracepoint + */ +int setevent(char *event, char *val) +{ + char *prefix = get_debugfileprefix(); + char buffer[MAX_PATH]; + int fd; + int ret; + + sprintf(buffer, "%s%s", prefix, event); + if ((fd = open(buffer, O_WRONLY)) < 0) { + warn("unable to open %s\n", buffer); + return -1; + } + if ((ret = write(fd, val, strlen(val))) < 0) { + warn("unable to write %s to %s\n", val, buffer); + close(fd); + return -1; + } + close(fd); + return 0; +} + +int event_enable_all(void) +{ + return setevent("events/enable", "1"); +} + +int event_disable_all(void) +{ + return setevent("events/enable", "0"); +} + +int event_enable(char *event) +{ + char path[MAX_PATH]; + sprintf(path, "events/%s/enable", event); + return setevent(path, "1"); +} + +int event_disable(char *event) +{ + char path[MAX_PATH]; + sprintf(path, "events/%s/enable", event); + return setevent(path, "0"); +} + +int check_privs(void) +{ + int policy = sched_getscheduler(0); + struct sched_param param, old_param; + + /* if we're already running a realtime scheduler + * then we *should* be able to change things later + */ + if (policy == SCHED_FIFO || policy == SCHED_RR) + return 0; + + /* first get the current parameters */ + if (sched_getparam(0, &old_param)) { + fprintf(stderr, "unable to get scheduler parameters\n"); + return 1; + } + param = old_param; + + /* try to change to SCHED_FIFO */ + param.sched_priority = 1; + if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { + fprintf(stderr, "Unable to change scheduling policy!\n"); + fprintf(stderr, "either run as root or join realtime group\n"); + return 1; + } + + /* we're good; change back and return success */ + return sched_setscheduler(0, policy, &old_param); +} + +const char *policy_to_string(int policy) +{ + switch (policy) { + case SCHED_OTHER: + return "SCHED_OTHER"; + case SCHED_FIFO: + return "SCHED_FIFO"; + case SCHED_RR: + return "SCHED_RR"; + case SCHED_BATCH: + return "SCHED_BATCH"; + case SCHED_IDLE: + return "SCHED_IDLE"; + case SCHED_DEADLINE: + return "SCHED_DEADLINE"; + } + + return "unknown"; +} + +uint32_t string_to_policy(const char *str) +{ + if (!strcmp(str, "other")) + return SCHED_OTHER; + else if (!strcmp(str, "fifo")) + return SCHED_FIFO; + else if (!strcmp(str, "rr")) + return SCHED_RR; + else if (!strcmp(str, "batch")) + return SCHED_BATCH; + else if (!strcmp(str, "idle")) + return SCHED_IDLE; + else if (!strcmp(str, "deadline")) + return SCHED_DEADLINE; + + return 0; +} + +pid_t gettid(void) +{ + return syscall(SYS_gettid); +} diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h new file mode 100644 index 0000000..a7c7640 --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h @@ -0,0 +1,32 @@ +#ifndef __RT_UTILS_H +#define __RT_UTILS_H + +#include <stdint.h> +#include <linux/sched.h> + +#ifndef SCHED_NORMAL +#define SCHED_NORMAL SCHED_OTHER +#endif + +#define _STR(x) #x +#define STR(x) _STR(x) +#define MAX_PATH 256 + +int check_privs(void); +char *get_debugfileprefix(void); +int mount_debugfs(char *); +int get_tracers(char ***); +int valid_tracer(char *); + +int setevent(char *event, char *val); +int event_enable(char *event); +int event_disable(char *event); +int event_enable_all(void); +int event_disable_all(void); + +const char *policy_to_string(int policy); +uint32_t string_to_policy(const char *str); + +pid_t gettid(void); + +#endif /* __RT_UTILS.H */ diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h new file mode 100644 index 0000000..98e7d0f --- /dev/null +++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h @@ -0,0 +1,277 @@ +/* + * A numa library for cyclictest. + * The functions here are designed to work whether cyclictest has been + * compiled with numa support or not, and whether the user uses the --numa + * option or not. + * They should also work correctly with older versions of the numactl lib + * such as the one found on RHEL5, or with the newer version 2 and above. + * + * The difference in behavior hinges on whether LIBNUMA_API_VERSION >= 2, + * in which case we will employ the bitmask affinity behavior -or- + * either LIBNUMA_API_VERSION < 2 or NUMA support is missing altogether, + * in which case we retain the older affinity behavior which can either + * specify a single CPU core or else use all cores. + * + * (C) 2010 John Kacur <jkacur@redhat.com> + * (C) 2010 Clark Williams <williams@redhat.com> + * + */ + +#ifndef _RT_NUMA_H +#define _RT_NUMA_H + +#include "rt-utils.h" +#include "error.h" + +static int numa = 0; + +#ifdef NUMA +#include <numa.h> + +#ifndef LIBNUMA_API_VERSION +#define LIBNUMA_API_VERSION 1 +#endif + +static void * +threadalloc(size_t size, int node) +{ + if (node == -1) + return malloc(size); + return numa_alloc_onnode(size, node); +} + +static void +threadfree(void *ptr, size_t size, int node) +{ + if (node == -1) + free(ptr); + else + numa_free(ptr, size); +} + +static void rt_numa_set_numa_run_on_node(int node, int cpu) +{ + int res; + res = numa_run_on_node(node); + if (res) + warn("Could not set NUMA node %d for thread %d: %s\n", + node, cpu, strerror(errno)); + return; +} + +static void *rt_numa_numa_alloc_onnode(size_t size, int node, int cpu) +{ + void *stack; + stack = numa_alloc_onnode(size, node); + if (stack == NULL) + fatal("failed to allocate %d bytes on node %d for cpu %d\n", + size, node, cpu); + return stack; +} + +#if LIBNUMA_API_VERSION >= 2 + +/* + * Use new bit mask CPU affinity behavior + */ +static int rt_numa_numa_node_of_cpu(int cpu) +{ + int node; + node = numa_node_of_cpu(cpu); + if (node == -1) + fatal("invalid cpu passed to numa_node_of_cpu(%d)\n", cpu); + return node; +} + +static inline unsigned int rt_numa_bitmask_isbitset( const struct bitmask *mask, + unsigned long i) +{ + return numa_bitmask_isbitset(mask,i); +} + +static inline struct bitmask* rt_numa_parse_cpustring(const char* s, + int max_cpus) +{ +#ifdef HAVE_PARSE_CPUSTRING_ALL /* Currently not defined anywhere. No + autotools build. */ + return numa_parse_cpustring_all(s); +#else + /* We really need numa_parse_cpustring_all(), so we can assign threads + * to cores which are part of an isolcpus set, but early 2.x versions of + * libnuma do not have this function. A work around should be to run + * your command with e.g. taskset -c 9-15 <command> + */ + return numa_parse_cpustring((char *)s); +#endif +} + +static inline void rt_bitmask_free(struct bitmask *mask) +{ + numa_bitmask_free(mask); +} + +#else /* LIBNUMA_API_VERSION == 1 */ + +struct bitmask { + unsigned long size; /* number of bits in the map */ + unsigned long *maskp; +}; +#define BITS_PER_LONG (8*sizeof(long)) + +/* + * Map legacy CPU affinity behavior onto bit mask infrastructure + */ +static int rt_numa_numa_node_of_cpu(int cpu) +{ + unsigned char cpumask[256]; + int node, idx, bit; + int max_node, max_cpus; + + max_node = numa_max_node(); + max_cpus = sysconf(_SC_NPROCESSORS_ONLN); + + if (cpu > max_cpus) { + errno = EINVAL; + return -1; + } + + /* calculate bitmask index and relative bit position of cpu */ + idx = cpu / 8; + bit = cpu % 8; + + for (node = 0; node <= max_node; node++) { + if (numa_node_to_cpus(node, (void *) cpumask, sizeof(cpumask))) + return -1; + + if (cpumask[idx] & (1<<bit)) + return node; + } + errno = EINVAL; + return -1; +} + +static inline unsigned int rt_numa_bitmask_isbitset( const struct bitmask *mask, + unsigned long i) +{ + long bit = mask->maskp[i/BITS_PER_LONG] & (1<<(i % BITS_PER_LONG)); + return (bit != 0); +} + +static inline struct bitmask* rt_numa_parse_cpustring(const char* s, + int max_cpus) +{ + int cpu; + struct bitmask *mask = NULL; + cpu = atoi(s); + if (0 <= cpu && cpu < max_cpus) { + mask = malloc(sizeof(*mask)); + if (mask) { + /* Round up to integral number of longs to contain + * max_cpus bits */ + int nlongs = (max_cpus+BITS_PER_LONG-1)/BITS_PER_LONG; + + mask->maskp = calloc(nlongs, sizeof(long)); + if (mask->maskp) { + mask->maskp[cpu/BITS_PER_LONG] |= + (1UL << (cpu % BITS_PER_LONG)); + mask->size = max_cpus; + } else { + free(mask); + mask = NULL; + } + } + } + return mask; +} + +static inline void rt_bitmask_free(struct bitmask *mask) +{ + free(mask->maskp); + free(mask); +} + +#endif /* LIBNUMA_API_VERSION */ + +static void numa_on_and_available() +{ + if (numa && (numa_available() == -1)) + fatal("--numa specified and numa functions not available.\n"); +} + +#else /* ! NUMA */ + +struct bitmask { + unsigned long size; /* number of bits in the map */ + unsigned long *maskp; +}; +#define BITS_PER_LONG (8*sizeof(long)) + +static inline void *threadalloc(size_t size, int n) { return malloc(size); } +static inline void threadfree(void *ptr, size_t s, int n) { free(ptr); } +static inline void rt_numa_set_numa_run_on_node(int n, int c) { } +static inline int rt_numa_numa_node_of_cpu(int cpu) { return -1; } +static void *rt_numa_numa_alloc_onnode(size_t s, int n, int c) { return NULL; } + +/* + * Map legacy CPU affinity behavior onto bit mask infrastructure + */ +static inline unsigned int rt_numa_bitmask_isbitset( const struct bitmask *mask, + unsigned long i) +{ + long bit = mask->maskp[i/BITS_PER_LONG] & (1<<(i % BITS_PER_LONG)); + return (bit != 0); +} + +static inline struct bitmask* rt_numa_parse_cpustring(const char* s, + int max_cpus) +{ + int cpu; + struct bitmask *mask = NULL; + cpu = atoi(s); + if (0 <= cpu && cpu < max_cpus) { + mask = malloc(sizeof(*mask)); + if (mask) { + /* Round up to integral number of longs to contain + * max_cpus bits */ + int nlongs = (max_cpus+BITS_PER_LONG-1)/BITS_PER_LONG; + + mask->maskp = calloc(nlongs, sizeof(long)); + if (mask->maskp) { + mask->maskp[cpu/BITS_PER_LONG] |= + (1UL << (cpu % BITS_PER_LONG)); + mask->size = max_cpus; + } else { + free(mask); + mask = NULL; + } + } + } + return mask; +} + +static inline void rt_bitmask_free(struct bitmask *mask) +{ + free(mask->maskp); + free(mask); +} + +static inline void numa_on_and_available(void) { } + +#endif /* NUMA */ + +/* + * Any behavioral differences above are transparent to these functions + */ +/** Returns number of bits set in mask. */ +static inline unsigned int rt_numa_bitmask_count(const struct bitmask *mask) +{ + unsigned int num_bits = 0, i; + for (i = 0; i < mask->size; i++) { + if (rt_numa_bitmask_isbitset(mask, i)) + num_bits++; + } + /* Could stash this instead of recomputing every time. */ + return num_bits; +} + +#endif /* _RT_NUMA_H */ diff --git a/kernel/xenomai-v3.2.4/doc/Makefile.am b/kernel/xenomai-v3.2.4/doc/Makefile.am new file mode 100644 index 0000000..3d3f5ec --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/Makefile.am @@ -0,0 +1,6 @@ +SUBDIRS=gitdoc doxygen asciidoc + +gitdoc doxygen asciidoc: FORCE + $(MAKE) -C $@ + +.PHONY: FORCE diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc new file mode 100644 index 0000000..dce7f40 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc @@ -0,0 +1,1935 @@ +Migrating from Xenomai 2.x to 3.x +================================= + +== Configuration == + +=== User programs and libraries === + +Changes in +xeno-config+:: + +As with Xenomai 2.x, +xeno-config+ is available for retrieving the +compilation and link flags for building Xenomai 3.x applications. This +script will work for both the Cobalt and Mercury environments +indifferently. + + * Each +--skin=<api>+ option specifier can be abbreviated as + +--<api>+. For instance, +--psos+ is a shorthand for +--skin=psos+ on + the command line. + + * Over Cobalt, only *xeno-config --posix --ldflags* (or *--rtdm* as + an alias) returns the proper linker flags to cause POSIX routines + invoked by the application to be wrapped to their respective Xenomai + implementation. No other API will imply such wrapping. For this + reason, *--cobalt --ldflags* should be used for linking exclusively + against the Cobalt library (i.e. +libcobalt.so+) *without* symbol + wrapping. Conversely, mentioning *--posix* along with other API + switches with *--ldflags* will cause POSIX symbol wrapping to take + place, e.g. use *--posix --alchemy --ldflags* for mixed API support + with POSIX symbol wrapping. + + * Over _Mercury_, +--posix+ and +--rtdm+ are ignored placeholders, + since the full POSIX API is available with the glibc and the + threading library. + + * +--[skin=]alchemy+ replaces the former +--skin=native+ switch. + + * +--core+ can be used to retrieve the name of the Xenomai core system + for which +xeno-config+ was generated. Possible output values are + +cobalt+ and +mercury+. + + * +--ccld+ retrieves a C compiler command suitable for linking a + Xenomai 3.x application. + + * +--info+ retrieves the current system information, including the + Xenomai release detected on the platform. + +[[auto-init]] +Auto-initialization:: + ++--no-auto-init+ can be passed to disable automatic initialization of +the Copperplate library when the application process enters the ++main()+ routine. + +In such a case, the application code using any API based on the +Copperplate layer, shall call the +copperplate_init(int *argcp, char +*const **argvp)+ routine manually, as part of its initialization +process, _before_ any real-time service is invoked. + +This routine takes the addresses of the argument count and vector +passed to the main() routine respectively. copperplate_init() handles +the Xenomai options present in the argument vector, stripping them +out, leaving only unprocessed options on return in the vector, +updating the count accordingly. + ++xeno-config+ enables the Copperplate auto-init feature by default. + +x86 vsyscall support:: + +The +--enable-x86-sep+ configuration switch was renamed to ++--enable-x86-vsyscall+ to fix a misnomer. This option should be left +enabled (default), unless *linuxthreads* are used instead of *NPTL*. + +=== Kernel parameters (Cobalt) === + +System parameters renamed:: + +* xeno_hal.supported_cpus -> xenomai.supported_cpus +* xeno_hal.disable -> xenomai.state=disabled +* xeno_hal.cpufreq -> xenomai.cpufreq +* xeno_nucleus.watchdog_timeout -> xenomai.watchdog_timeout +* xeno_nucleus.xenomai_gid -> xenomai.allowed_group +* xeno_nucleus.sysheap_size -> xenomai.sysheap_size +* xeno_hal.smi (x86 only) -> xenomai.smi +* xeno_hal.smi_mask (x86 only) -> xenomai.smi_mask + +Obsolete parameters dropped:: + +* xeno_rtdm.tick_arg +* rtdm.devname_hashtab_size +* rtdm.protocol_hashtab_size + +.Rationale +********************************************************************** +Periodic timing is directly handled from the API layer in +user-space. Cobalt kernel timing is tickless. +********************************************************************** + +== Getting the system state == + +When running Copperplate-based APIs (i.e. all but pure POSIX), +querying the state of the real-time system should be done via the new +Xenomai registery interface available with Xenomai 3.x, which is +turned on when +--enable-registry+ is passed to the configuration +script for building the Xenomai libraries and programs. + +The new registry support is common to the Cobalt and Mercury cores, +with only marginal differences due to the presence (or lack of) co- +kernel in the system. + +=== New FUSE-based registry interface === + +The Xenomai system state is now fully exported via a FUSE-based +filesystem. The hierarchy of the Xenomai registry is organized as +follows: + +---------------------------------------------------------------------------- +/mount-point /* registry fs root, defaults to /var/run/xenomai */ + /user /* user name */ + /session /* shared session name or anon@<pid> */ + /pid /* application (main) pid */ + /skin /* API name: alchemy/vxworks/psos/... */ + /family /* object class (task, semaphore, ...) */ + { exported objects... } + /system /* session-wide information */ +---------------------------------------------------------------------------- + +Each leaf entry under a session hierarchy is normally viewable, for +retrieving the information attached to the corresponding object, such +as its state, and/or value. There can be multiple sessions hosted +under a single registry mount point. + +The /system hierarchy provides information about the current state of +the Xenomai core, aggregating data from all processes which belong to +the parent session. Typically, the status of all threads and heaps +created by the session can be retrieved. + +The registry daemon is a companion tool managing exactly one registry +mount point, which is specified by the --root option on the command +line. This daemon is automatically spawned by the registry support +code as required. There is normally no action required from users for +managing it. + +=== /proc/xenomai interface === + +The /proc/xenomai interface is still available when running over the +Cobalt core, mainly for pure POSIX-based applications. The following +changes took place: + +Thread status:: + +All pseudo-files reporting the various thread states moved under the +new +sched/+ hierarchy, i.e. + ++{sched, stat, acct}+ -> +sched/{threads, stat, acct}+ + +Clocks:: + +With the introduction of dynamic clock registration in the Cobalt +core, the +clock/+ hierarchy was added, to reflect the current state +of all timers from the registered Xenomai clocks. + +There is no kernel-based time base management anymore with Xenomai +{xenover}. Functionally speaking, only the former _master_ time base +remains, periodic timing is now controlled locally from the Xenomai +libraries in user-space. + +Xenomai {xenover} defines a built-in clock named _coreclk_, which has +the same properties than the former _master_ time base available with +Xenomai 2.x (i.e. tickless with nanosecond resolution). + +The settings of existing clocks can be read from entries under the new +clock/ hierarchy. Active timers for each clock can be read from +entries under the new +timer/+ hierarchy. + +As a consequence of these changes: + + * the information previously available from the +timer+ entry is now +obtained by reading +clock/coreclk+. + + * the information previously available from +timerstat/master+ is now +obtained by reading +timer/coreclk+. + +// break list +Core clock gravity:: + +The gravity value for a Xenomai clock gives the amount of time by +which the next timer shot should be anticipated. This is a static +adjustment value, to account for the basic latency of the target +system for responding to external events. Such latency may be +introduced by hardware effects (e.g. bus or cache latency), or +software issues (e.g. code running with interrupts disabled). + +The clock gravity management departs from Xenomai 2.x as follows: + + * different gravity values are applied, depending on which context a + timer activates. This may be a real-time IRQ handler (_irq_), a RTDM + driver task (_kernel_), or a Xenomai application thread running in + user-space (_user_). Xenomai 2.x does not differentiate, only + applying a global gravity value regardless of the activated context. + + * in addition to the legacy +latency+ file which now reports + the _user_ timer gravity (in nanoseconds), i.e. used for timers + activating user-space threads, the full gravity triplet applied to + timers running on the core clock can be accessed by reading + +clock/coreclk+ (also in nanoseconds). + + * at reset, the _user_ gravity for the core clock now represents the +sum of the scheduling *and* hardware timer reprogramming time as a +count of nanoseconds. This departs from Xenomai 2.x for which only the +former was accounted for as a global gravity value, regardless of the +target context for the timer. + +The following command reports the current gravity triplet for the +target system, along with the setup information for the core timer: + +-------------------------------------------- +# cat xenomai/clock/coreclk +gravity: irq=848 kernel=8272 user=35303 +devices: timer=decrementer, clock=timebase + status: on+watchdog + setup: 151 + ticks: 220862243033 +-------------------------------------------- + +Conversely, writing to this file manually changes the gravity values +of the Xenomai core clock: + +------------------------------------------------------ + /* change the user gravity (default) */ +# echo 3000 > /proc/xenomai/clock/coreclck + /* change the IRQ gravity */ +# echo 1000i > /proc/xenomai/clock/coreclck + /* change the user and kernel gravities */ +# echo "2000u 1000k" > /proc/xenomai/clock/coreclck +------------------------------------------------------ + ++interfaces+ removed:: + +Only the POSIX and RTDM APIs remain implemented directly in kernel +space, and are always present when the Cobalt core enabled in the +configuration. All other APIs are implemented in user-space over the +Copperplate layer. This makes the former +interfaces+ contents +basically useless, since the corresponding information for the +POSIX/RTDM interfaces can be obtained via +sched/threads+ +unconditionally. + ++registry/usage+ changed format:: + +The new print out is <used slot count>/<total slot count>. + +== Binary object features == + +=== Loading Xenomai libraries dynamically === + +The new +--enable-dlopen-libs+ configuration switch must be turned on +to allow Xenomai libaries to be dynamically loaded via dlopen(3). + +This replaces the former +--enable-dlopen-skins+ switch. Unlike the +latter, +--enable-dlopen-libs+ does not implicitly disable support for +thread local storage, but rather selects a suitable TLS model +(i.e. _global-dynamic_). + +=== Thread local storage === + +The former +--with-__thread+ configuration switch was renamed ++--enable-tls+. + +As mentioned earlier, TLS is now available to dynamically loaded +Xenomai libraries, e.g. +--enable-tls --enable-dlopen-libs+ on a +configuration line is valid. This would select the _global-dynamic_ +TLS model instead of _initial-exec_, to make sure all thread-local +variables may be accessed from any code module. + +== Process-level management == + +=== Main thread shadowing === + +Any application linked against +libcobalt+ has its main thread +attached to the real-time system automatically, this operation is +called _auto-shadowing_. As a side-effect, the entire process's memory +is locked, for current and future mappings +(i.e. +mlockall(MCL_CURRENT|MCL_FUTURE)+). + +=== Shadow signal handler === + +Xenomai's +libcobalt+ installs a handler for the SIGWINCH (aka +_SIGSHADOW_) signal. This signal may be sent by the Cobalt core to any +real-time application, for handling internal duties. + +Applications are allowed to interpose on the SIGSHADOW handler, +provided they first forward all signal notifications to this routine, +then eventually handle all events the Xenomai handler won't process. + +This handler was renamed from `xeno_sigwinch_handler()` (Xenomai 2.x) +to `cobalt_sigshadow_handler()` in Xenomai 3.x. The function prototype +did not change though, i.e.: + +---------------------------------------------------------------- +int cobalt_sigshadow_handler(int sig, siginfo_t *si, void *ctxt) +---------------------------------------------------------------- + +A non-zero value is returned whenever the event was handled internally +by the Xenomai system. + +=== Debug signal handler === + +Xenomai's +libcobalt+ installs a handler for the SIGXCPU (aka +_SIGDEBUG_) signal. This signal may be sent by the Cobalt core to any +real-time application, for notifying various debug events. + +Applications are allowed to interpose on the SIGDEBUG handler, +provided they eventually forward all signal notifications they won't +process to the Xenomai handler. + +This handler was renamed from `xeno_handle_mlock_alert()` (Xenomai +2.x) to `cobalt_sigdebug_handler()` in Xenomai 3.x. The function +prototype did not change though, i.e.: + ++void cobalt_sigdebug_handler(int sig, siginfo_t *si, void *ctxt)+ + +=== Copperplate auto-initialization === + +Copperplate is a library layer which mediates between the real-time +core services available on the platform, and the API exposed to the +application. It provides typical programming abstractions for +emulating real-time APIs. All non-POSIX APIs are based on Copperplate +services (e.g. _alchemy_, _psos_, _vxworks_). + +When Copperplate is built for running over the Cobalt core, it sits on +top of the +libcobalt+ library. Conversely, it is directly stacked on +top of the *glibc* or *uClibc* when built for running over the Mercury +core. + +Normally, Copperplate should initialize from a call issued by the ++main()+ application routine. To make this process transparent for the +user, the +xeno-config+ script emits link flags which temporarily +overrides the +main()+ routine with a Copperplate-based replacement, +running the proper initialization code as required, before branching +back to the user-defined application entry point. + +This behavior may be disabled by passing the +<<auto-init,+--no-auto-init+>> option. + +== RTDM interface changes == + +=== Files renamed === + +- Redundant prefixes were removed from the following files: + +[normal] +rtdm/rtdm_driver.h -> rtdm/driver.h +[normal] +rtdm/rtcan.h -> rtdm/can.h +[normal] +rtdm/rtserial.h -> rtdm/serial.h +[normal] +rtdm/rttesting.h -> rtdm/testing.h +[normal] +rtdm/rtipc.h -> rtdm/ipc.h + +=== Driver API === + +==== New device description model ==== + +Several changes have taken place in the device description passed to ++rtdm_dev_register()+ (i.e. +struct rtdm_device+). Aside of fixing +consistency issues, the bulk of changes is aimed at narrowing the gap +between the regular Linux device driver model and RTDM. + +To this end, RTDM in Xenomai 3 shares the Linux namespace for named +devices, which are now backed by common character device objects from +the regular Linux device model. As a consequence of this, file +descriptors obtained on named RTDM devices are regular file +descriptors, visible from the +/proc/<pid>/fd+ interface. + +===== Named device description ===== + +The major change required for supporting this closer integration of +RTDM into the regular Linux driver model involved splitting the device +driver properties from the device instance definitions, which used to +be combined in Xenomai 2.x into the +rtdm_device+ descriptor. + +.Xenomai 2.x named device description +--------------------------------------------- +static struct rtdm_device foo_device0 = { + .struct_version = RTDM_DEVICE_STRUCT_VER, + .device_flags = RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE, + .device_id = 0 + .context_size = sizeof(struct foo_context), + .ops = { + .open = foo_open, + .ioctl_rt = foo_ioctl_rt, + .ioctl_nrt = foo_ioctl_nrt, + .close = foo_close, + }, + .device_class = RTDM_CLASS_EXPERIMENTAL, + .device_sub_class = RTDM_SUBCLASS_FOO, + .profile_version = 42, + .device_name = "foo0", + .driver_name = "foo driver", + .driver_version = RTDM_DRIVER_VER(1, 0, 0), + .peripheral_name = "Ultra-void IV board driver", + .proc_name = device.device_name, + .provider_name = "Whoever", +}; + +static struct rtdm_device foo_device1 = { + .struct_version = RTDM_DEVICE_STRUCT_VER, + .device_flags = RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE, + .device_id = 1 + .context_size = sizeof(struct foo_context), + .ops = { + .open = foo_open, + .ioctl_rt = foo_ioctl_rt, + .ioctl_nrt = foo_ioctl_nrt, + .close = foo_close, + }, + .device_class = RTDM_CLASS_EXPERIMENTAL, + .device_sub_class = RTDM_SUBCLASS_FOO, + .profile_version = 42, + .device_name = "foo1", + .device_data = NULL, + .driver_name = "foo driver", + .driver_version = RTDM_DRIVER_VER(1, 0, 0), + .peripheral_name = "Ultra-void IV board driver", + .proc_name = device.device_name, + .provider_name = "Whoever", +}; + +foo0.device_data = &some_driver_data0; +ret = rtdm_dev_register(&foo0); +... +foo1.device_data = &some_driver_data1; +ret = rtdm_dev_register(&foo1); + +--------------------------------------------- + +The legacy description above would only create "virtual" device +entries, private to the RTDM device namespace, with no visible +counterparts into the Linux device namespace. + +.Xenomai 3.x named device description +--------------------------------------------- + +static struct rtdm_driver foo_driver = { + .profile_info = RTDM_PROFILE_INFO(foo, + RTDM_CLASS_EXPERIMENTAL, + RTDM_SUBCLASS_FOO, + 42), + .device_flags = RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE, + .device_count = 2, + .context_size = sizeof(struct foo_context), + .ops = { + .open = foo_open, + .ioctl_rt = foo_ioctl_rt, + .ioctl_nrt = foo_ioctl_nrt, + .close = foo_close, + }, +}; + +static struct rtdm_device foo_devices[2] = { + [ 0 ... 1 ] = { + .driver = &foo_driver, + .label = "foo%d", + }, +}; + +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("Ultra-void IV board driver"); +MODULE_AUTHOR'"Whoever"); + +foo_devices[0].device_data = &some_driver_data0; +ret = rtdm_dev_register(&foo_devices[0]); +... +foo_devices[1].device_data = &some_driver_data1; +ret = rtdm_dev_register(&foo_devices[1]); + +--------------------------------------------- + +The current description above will cause the device nodes +/dev/rtdm/foo0 and /dev/rtdm/foo1 to be created in the Linux device +namespace. Application may open these device nodes for interacting +with the RTDM driver, as they would do with any regular _chrdev_ +driver. + +===== Protocol device description ===== + +Similarly, the registration data for protocol devices have been +changed to follow the new generic layout: + +.Xenomai 2.x protocol device description +--------------------------------------------- +static struct rtdm_device foo_device = { + .struct_version = RTDM_DEVICE_STRUCT_VER, + .device_flags = RTDM_PROTOCOL_DEVICE, + .context_size = sizeof(struct foo_context), + .device_name = "foo", + .protocol_family= PF_FOO, + .socket_type = SOCK_DGRAM, + .socket_nrt = foo_socket, + .ops = { + .close_nrt = foo_close, + .recvmsg_rt = foo_recvmsg, + .sendmsg_rt = foo_sendmsg, + .ioctl_rt = foo_ioctl, + .ioctl_nrt = foo_ioctl, + .read_rt = foo_read, + .write_rt = foo_write, + .select_bind = foo_select, + }, + .device_class = RTDM_CLASS_EXPERIMENTAL, + .device_sub_class = RTDM_SUBCLASS_FOO, + .profile_version = 1, + .driver_name = "foo", + .driver_version = RTDM_DRIVER_VER(1, 0, 0), + .peripheral_name = "Unexpected protocol driver", + .proc_name = device.device_name, + .provider_name = "Whoever", + .device_data = &some_driver_data, +}; + +ret = rtdm_dev_register(&foo_device); +... + +--------------------------------------------- + +.Xenomai 3.x protocol device description +--------------------------------------------- +static struct rtdm_driver foo_driver = { + .profile_info = RTDM_PROFILE_INFO(foo, + RTDM_CLASS_EXPERIMENTAL, + RTDM_SUBCLASS_FOO, + 1), + .device_flags = RTDM_PROTOCOL_DEVICE, + .device_count = 1, + .context_size = sizeof(struct foo_context), + .protocol_family = PF_FOO, + .socket_type = SOCK_DGRAM, + .ops = { + .socket = foo_socket, + .close = foo_close, + .recvmsg_rt = foo_recvmsg, + .sendmsg_rt = foo_sendmsg, + .ioctl_rt = foo_ioctl, + .ioctl_nrt = foo_ioctl, + .read_rt = foo_read, + .write_rt = foo_write, + .select = foo_select, + }, +}; + +static struct rtdm_device foo_device = { + .driver = &foo_driver, + .label = "foo", + .device_data = &some_driver_data, +}; + +ret = rtdm_dev_register(&foo_device); +... + +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("Unexpected protocol driver"); +MODULE_AUTHOR'"Whoever"); + +--------------------------------------------- + +* +.device_count+ has been added to reflect the (maximum) number of + device instances which may be managed by the driver. This + information is used to dynamically reserve a range of major/minor + numbers for named RTDM devices in the Linux device namespace, by a + particular driver. Device minors are assigned to RTDM device + instances in order of registration starting from minor #0, unless + RTDM_FIXED_MINOR is present in the device flags. In the latter case, + rtdm_device.minor is used verbatim by the RTDM core when registering + the device. + +* +.device_id+ was removed from the device description, as the minor + number it was most commonly holding is now available from a call to + rtdm_fd_minor(). Drivers should use +.device_data+ for storing + private information attached to device instances. + +* +.struct_version+ was dropped, as it provided no additional feature + to the standard module versioning scheme. + +* +.proc_name+ was dropped, as it is redundant with the device + name. Above all, using a /proc information label different from the + actual device name is unlikely to be a good idea. + +* +.device_class+, +.device_sub_class+ and +.profile_version+ numbers + have been grouped in a dedicated profile information descriptor + (+struct rtdm_profile_info+), one *must* initialize using the + +RTDM_PROFILE_INFO()+ macro. + +* +.driver_name+ was dropped, as it adds no value to the plain module + name (unless the module name is deliberately obfuscated, that is). + +* +.peripheral_name+ was dropped, as this information should be + conveyed by MODULE_DESCRIPTION(). + +* +.provider_name+ was dropped, as this information should be conveyed + by MODULE_AUTHOR(). + +* +.driver_version+ was dropped, as this information should be + conveyed by MODULE_VERSION(). + +==== Introduction of file descriptors ==== + +Xenomai 3 introduces a file descriptor abstraction for RTDM +drivers. For this reason, all RTDM driver handlers and services which +used to receive a `user_info` opaque argument describing the calling +context, now receive a `rtdm_fd` pointer standing for the target file +descriptor for the operation. + +As a consequence of this: + +- The +rtdm_context_get/put()+ call pair has been replaced by + +rtdm_fd_get/put()+. + +- Likewise, the +rtdm_context_lock/unlock()+ call pair has been + replaced by +rtdm_fd_lock/unlock()+. + +- +rtdm_fd_to_private()+ is available to fetch the context-private + memory allocated by the driver for a particular RTDM file + descriptor. Conversely, +rtdm_private_to_fd()+ returns the file + descriptor owning a particular context-private memory area. + +- +rtdm_fd_minor() retrieves the minor number assigned to the current + named device instance using its file descriptor. + +- +xenomai/rtdm/open_files+ and +xenomai/rtdm/fildes+ now solely + report file descriptors obtained using the driver-to-driver API. + RTDM file descriptors obtained from applications appear under the + regular /proc/<pid>/fd hierarchy. All RTDM file descriptors obtained + by an application are automatically released when the latter exits. + +[CAUTION] +Because RTDM file descriptors may be released and destroyed +asynchronously, rtdm_fd_get() and rtdm_fd_lock() may return -EIDRM if +a file descriptor fetched from some driver-private registry becomes +stale prior to calling these services. Typically, this may happen if +the descriptor is released from the ->close() handler implemented by +the driver. Therefore, make sure to always carefully check the return +value of these services. + +[NOTE] +Unlike Xenomai 2.x, RTDM file descriptors returned to Xenomai 3 +applications fall within the regular Linux range. Each open RTDM +connection is actually mapped over a regular file descriptor, which +RTDM services from _libcobalt_ recognize and handle. + +==== Updated device operation descriptor ==== + +As visible from the previous illustrations, a few handlers have been +moved to the device operation descriptor, some dropped, other renamed, +mainly for the sake of consistency: + +* +.select_bind+ was renamed as +.select+ in the device operation + descriptor. + +* +.open_rt+ was dropped, and +.open_nrt+ renamed as +.open+. Opening + a named device instance always happens from secondary mode. In + addition, the new handler is now part of the device operation + descriptor +.ops+. + +.Rationale +********************************************************************** +Opening a device instance most often requires allocating resources +managed by the Linux kernel (memory mappings, DMA etc), which is only +available to regular calling contexts. +********************************************************************** + +* Likewise, +.socket_rt+ was dropped, and +.socket_nrt+ renamed as + +.socket+. Opening a protocol device instance always happens from + secondary mode. In addition, the new handler is now part of the + device operation descriptor +.ops+. + +* As a consequence of the previous changes, +.close_rt+ was dropped, + and +.close_nrt+ renamed as +.close+. Closing a device instance + always happens from secondary mode. + +* .open, .socket and .close handlers have become optional in Xenomai + 3.x. + +[[rtdm-mmap]] +* The device operation descriptor +.ops+ shows two new members, namely + +.mmap+ for handling memory mapping requests to the RTDM driver, and + +get_unmapped_area+, mainly for supporting such memory mapping + operations in MMU-less configurations. These handlers - named after + the similar handlers defined in the regular file_operation + descriptor - always operate from secondary mode on behalf of the + calling task context, so that they may invoke regular kernel + services safely. + +[NOTE] +See the documentation in the +http://xenomai.org/documentation/xenomai-3/html/xeno3prm/[Programming +Reference Manual] covering the device registration and operation +handlers for a complete description. + +==== Changes to RTDM services ==== + +- rtdm_dev_unregister() loses the poll_delay argument, and its return + value. Instead, this service waits indefinitely for all ongoing + connection to be drop prior to unregistering the device. The new + prototype is therefore: + +------------------ +void rtdm_dev_unregister(struct rtdm_device *device); +------------------ + +.Rationale +********************************************************************** +Drivers are most often not willing to deal with receiving a device +busy condition from a module exit routine (which is the place devices +should be unregistered from). Drivers which really want to deal with +such condition should simply use module refcounting in their own code. +******************************************************************** + +- rtdm_task_init() shall be called from secondary mode. + +.Rationale +********************************************************************** +Since Xenomai 3, rtdm_task_init() involves creating a regular kernel +thread, which will be given real-time capabilities, such as running +under the control of the Cobalt kernel. In order to invoke standard +kernel services, rtdm_task_init() must be called from a regular Linux +kernel context. +********************************************************************** + +- rtdm_task_join() has been introduced to wait for termination of a + RTDM task regardless of the caller's execution mode, which may be + primary or secondary. In addition, rtdm_task_join() does not need to + poll for such event unlike rtdm_task_join_nrt(). + +.Rationale +********************************************************************** +rtdm_task_join() supersedes rtdm_task_join_nrt() feature-wise with +less usage restrictions, therefore the latter has become pointless. It +is therefore deprecated and will be phased out in the next release. +********************************************************************** + +- A RTDM task cannot be forcibly removed from the scheduler by another + thread for immediate deletion. Instead, the RTDM task is notified + about a pending cancellation request, which it should act upon when + detected. To this end, RTDM driver tasks should call the new + +rtdm_task_should_stop()+ service to detect such notification from + their work loop, and exit accordingly. + +.Rationale +********************************************************************** +Since Xenomai 3, a RTDM task is based on a regular kernel thread with +real-time capabilities when controlled by the Cobalt kernel. The Linux +kernel requires kernel threads to exit at their earliest convenience +upon notification, which therefore applies to RTDM tasks as well. +********************************************************************** + +- +rtdm_task_set_period()+ now accepts a start date for the periodic +timeline. Zero can be passed to emulate the previous call form, +setting the first release point when the first period after the +current date elapses. + +- +rtdm_task_wait_period()+ now copies back the count of overruns into +a user-provided variable if -ETIMEDOUT is returned. NULL can be passed +to emulate the previous call form, discarding this information. + +- Both +rtdm_task_set_period()+ and +rtdm_task_wait_period()+ may be + invoked over a Cobalt thread context. + +- RTDM_EXECUTE_ATOMICALLY() is deprecated and will be phased out in + the next release. Drivers should prefer the newly introduced RTDM + wait queues, or switch to the Cobalt-specific + cobalt_atomic_enter/leave() call pair, depending on the use case. + +.Rationale +******************************************************************* +This construct is not portable to a native implementation of RTDM, and +may be replaced by other means. The usage patterns of +RTDM_EXECUTE_ATOMICALLY() used to be: + +- somewhat abusing the big nucleus lock (i.e. nklock) grabbed by + RTDM_EXECUTE_ATOMICALLY(), for serializing access to a section that + should be given its own lock instead, improving concurrency in the + same move. Such section does not call services from the Xenomai + core, and does NOT specifically require the nucleus lock to be + held. In this case, a RTDM lock (rtdm_lock_t) should be used to + protect the section instead of RTDM_EXECUTE_ATOMICALLY(). + +- protecting a section which calls into the Xenomai core, which + exhibits one or more of the following characteristics: + + * Some callee within the section may require the nucleus lock to + be held on entry (e.g. Cobalt registry lookup). In what has to + be a Cobalt-specific case, the new cobalt_atomic_enter/leave() + call pair can replace RTDM_EXECUTE_ATOMICALLY(). However, this + construct remains by definition non-portable to Mercury. + + * A set-condition-and-wakeup pattern has to be carried out + atomically. In this case, RTDM_EXECUTE_ATOMICALLY() can be + replaced by the wakeup side of a RTDM wait queue introduced in + Xenomai 3 (e.g. rtdm_waitqueue_signal/broadcast()). + + * A test-condition-and-wait pattern has to be carried out + atomically. In this case, RTDM_EXECUTE_ATOMICALLY() can be + replaced by the wait side of a RTDM wait queue introduced in + Xenomai 3 (e.g. rtdm_wait_condition()). + +Please refer to kernel/drivers/ipc/iddp.c for an illustration of the +RTDM wait queue usage. +******************************************************************* + +- rtdm_irq_request/free() and rtdm_irq_enable/disable() call pairs + must be called from a Linux task context, which is a restriction + that did not exist previously with Xenomai 2.x. + +.Rationale +******************************************************************* +Recent evolutions of the Linux kernel with respect to IRQ management +involve complex processing for basic operations +(e.g. enabling/disabling the interrupt line) with some interrupt types +like MSI. Such processing cannot be made dual-kernel safe at a +reasonable cost, without encurring measurable latency or significant +code updates in the kernel. + +Since allocating, releasing, enabling or disabling real-time +interrupts is most commonly done from driver initialization/cleanup +context already, the Cobalt core has simply inherited those +requirements from the Linux kernel. +******************************************************************* + +- The leading _user_info_ argument to rtdm_munmap() has been + removed. + +.Rationale +********************************************************************* +With the introduction of RTDM file descriptors (see below) replacing +all _user_info_ context pointers, this argument has become irrelevant, +since this operation is not related to any file descriptor, but rather +to the current address space. +********************************************************************* + +The new prototype for this routine is therefore + +--------------------------------------- +int rtdm_munmap(void *ptr, size_t len); +--------------------------------------- + +- Additional memory mapping calls + +The new following routines are available to RTDM drivers, for mapping +memory over a user address space. They are intended to be called from +a ->mmap() handler: + +* rtdm_mmap_kmem() for mapping logical kernel memory (i.e. having + a direct physical mapping). + +* rtdm_mmap_vmem() for mapping purely virtual memory (i.e. with no + direct physical mapping). + +* rtdm_mmap_iomem() for mapping I/O memory. + +------------------------------------------------------------ +static int foo_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma) +{ + ... + switch (memory_type) { + case MEM_PHYSICAL: + ret = rtdm_mmap_iomem(vma, addr); + break; + case MEM_LOGICAL: + ret = rtdm_mmap_kmem(vma, (void *)addr); + break; + case MEM_VIRTUAL: + ret = rtdm_mmap_vmem(vma, (void *)addr); + break; + default: + return -EINVAL; + } + ... +} +------------------------------------------------------------ + +- the rtdm_nrtsig API has changed, the rtdm_nrtsig_init() function no + longer returns errors, it has the void return type. The rtdm_nrtsig_t + type has changed from an integer to a structure. In consequence, the + nrtsig handler first argument is now a pointer to the rtdm_nrtsig_t + structure. + +.Rationale +************************************************************************ +Recent versions of the I-pipe patch support an ipipe_post_work_root() +service, which has the advantage over the VIRQ support, that it does not +require allocating one different VIRQ for each handler. As a consequence +drivers may use as many rtdm_nrtsig_t structures as they like, there is +no chance of running out of VIRQs. +************************************************************************ + + The new relevant prototypes are therefore: + +------------------------------------------------------------------------- +typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg); + +void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, + rtdm_nrtsig_handler_t handler, void *arg); +------------------------------------------------------------------------- + +- a new rtdm_schedule_nrt_work() was added to allow scheduling a Linux + work queue from primary mode. + +.Rationale +************************************************************************ +Scheduling a Linux workqueue maybe a convenient way for adriver to recover +for an error which requires synchronization with Linux. Typically, recovering +from a PCI error may involve accessing the PCI config space, which requires +access to a Linux spinlock so can not be done from primary mode. +************************************************************************ + + The prototype of this new service is: + +------------------------------------------------------ +void rtdm_schedule_nrt_work(struct work_struct *work); +------------------------------------------------------ + +==== Adaptive syscalls ==== + ++ioctl()+, +read()+, +write()+, +recvmsg()+ and +sendmsg()+ have +become conforming RTDM calls, which means that Xenomai threads running +over the Cobalt core will be automatically switched to primary mode +prior to running the driver handler for the corresponding request. + +.Rationale +********************************************************************** +Real-time handlers from RTDM drivers serve time-critical requests by +definition, which makes them preferred targets of adaptive calls over +non real-time handlers. +********************************************************************** + +[NOTE] +This behavior departs from Xenomai 2.x, which would run the call from +the originating context instead (e.g. +ioctl_nrt()+ would be fired for +a caller running in secondary mode, and conversely +ioctl_rt()+ would +be called for a request issued from primary mode). + +[TIP] +RTDM drivers implementing differentiated +ioctl()+ support for both +domains should serve all real-time only requests from +ioctl_rt()+, +returning +-ENOSYS+ for any unrecognized request, which will cause the +adaptive switch to take place automatically to the +ioctl_nrt()+ +handler. The +ioctl_nrt()+ should then implement all requests which +may be valid from the regular Linux domain exclusively. + +=== Application interface === + +Unlike with Xenomai 2.x, named RTDM device nodes in Xenomai 3 are +visible from the Linux device namespace. These nodes are automatically +created by the _hotplug_ kernel facility. Application must open these +device nodes for interacting with RTDM drivers, as they would do with +any regular _chrdev_ driver. + +All RTDM device nodes are created under the +rtdm/+ sub-root from the +standard +/dev+ hierarchy, to eliminate potential name clashes with +standard drivers. + +[IMPORTANT] +Enabling DEVTMPFS in the target kernel is recommended so that the +standard +/dev+ tree immediately reflects updates to the RTDM device +namespace. You may want to enable CONFIG_DEVTMPFS and +CONFIG_DEVTMPFS_MOUNT. + +.Opening a named device instance with Xenomai 2.x +-------------------------------------------------- +fd = open("foo", O_RDWR); + or +fd = open("/dev/foo", O_RDWR); +-------------------------------------------------- + +.Opening a named device instance with Xenomai 3 +----------------------------------------------- +fd = open("/dev/rtdm/foo", O_RDWR); +----------------------------------------------- + +[TIP] +Applications can enable the CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE option +in the kernel configuration to enable legacy pathnames for named RTDM +devices. This compatibility option allows applications to open named +RTDM devices using the legacy naming scheme used by Xenomai 2.x. + +==== Retrieving device information ==== + +Device information can be retrieved via _sysfs_, instead of _procfs_ +as with Xenomai 2.x. As a result of this change, +/proc/xenomai/rtdm+ +disappeared entirely. Instead, the RTDM device information can now be +reached as follows: + +- /sys/devices/virtual/rtdm contains entries for all RTDM devices +present in the system (including named and protocol device types). +This directory is aliased to /sys/class/rtdm. + +- each /sys/devices/virtual/rtdm/<device-name> directory gives access + to device information, available from virtual files: + + * reading +profile+ returns the class and subclass ids. + + * reading +refcount+ returns the current count of outstanding + connections to the device driver. + + * reading +flags+ returns the device flags as defined by the device + driver. + + * reading +type+ returns the device type (_named_ or _protocol_). + +=== Inter-Driver API === + +The legacy (and redundant) rt_dev_*() API for calling the I/O services +exposed by a RTDM driver from another driver was dropped, in favour of +a direct use of the existing rtdm_*() API in kernel space. For +instance, calls to +rt_dev_open()+ should be converted to ++rtdm_open()+, +rt_dev_socket()+ to +rtdm_socket()+ and so on. + +.Rationale +****************************************************************** +Having two APIs for exactly the same purpose is uselessly confusing, +particularly for kernel programming. Since the user-space version of +the rt_dev_*() API was also dropped in favor of the regular POSIX I/O +calls exposed by +libcobalt+, the choice was made to retain the most +straightforward naming for the RTDM-to-RTDM API, keeping the +rtdm_+ +prefix. +****************************************************************** + +== Analogy interface changes == + +=== Files renamed === + +- DAQ drivers in kernel space now pull all Analogy core header files + from <rtdm/analogy/*.h>. In addition: + +[normal] +analogy/analogy_driver.h -> rtdm/analogy/driver.h +[normal] +analogy/driver.h -> rtdm/analogy/driver.h +[normal] +analogy/analogy.h -> rtdm/analogy.h + +- DAQ drivers in kernel space should include <rtdm/analogy/device.h> + instead of <rtdm/analogy/driver.h>. + +- Applications need to include only a single file for pulling all + routine declarations and constant definitions required for invoking + the Analogy services from user-space, namely <rtdm/analogy.h>, i.e. + +[normal] +analogy/types.h +analogy/command.h +analogy/device.h +analogy/subdevice.h +analogy/instruction.h +analogy/ioctl.h -> all files merged into rtdm/analogy.h + +As a consequence of these changes, the former include/analogy/ file +tree has been entirely removed. + +== RTnet changes == + +RTnet is integrated into Xenomai 3, but some of its behaviour and +interfaces were changed in an attempt to simplify it. + +- a network driver kernel module can not be unloaded as long as the + network interface it implements is up + +- the RTnet drivers API changed, to make it simpler, and closer to + the mainline API + + * module refcounting is now automatically done by the stack, no + call is necessary to RTNET_SET_MODULE_OWNER, RTNET_MOD_INC_USE_COUNT, + RTNET_MOD_DEC_USE_COUNT + + * per-driver skb receive pools were removed from drivers, they are + now handled by the RTnet stack. In consequence, drivers now need + to pass an additional argument to the rt_alloc_etherdev() service: + the number of buffers in the pool. The new prototype is: + +------------------------------------------------------------------------------------ +struct rtnet_device *rt_alloc_etherdev(unsigned sizeof_priv, unsigned rx_pool_size); +------------------------------------------------------------------------------------ + + * in consequence, any explicit call to rtskb_pool_init() can be + removed. In addition, drivers should now use the + rtnetdev_alloc_rtskb() to allocate buffers from the network device + receive pool; much like its counterpart netdev_alloc_skb(), it takes + as first argument a pointer to a network device structure. Its + prototype is: + +-------------------------------------------------------------------------------- +struct rtskb *rtnetdev_alloc_rtskb(struct rtnet_device *dev, unsigned int size); +-------------------------------------------------------------------------------- + + * for driver which wish to explicitly handle skb pools, the + signature of rtskb_pool_init has changed: it takes an additional + pointer to a structure containing callbacks called when the first + buffer is allocated and when the last buffer is returned, so that + the rtskb_pool() can implicitly lock a parent structure. The new + prototype is: + +----------------------------------------------------------------------- +struct rtskb_pool_lock_ops { + int (*trylock)(void *cookie); + void (*unlock)(void *cookie); +}; + +unsigned int rtskb_pool_init(struct rtskb_pool *pool, + unsigned int initial_size, + const struct rtskb_pool_lock_ops *lock_ops, + void *lock_cookie); +----------------------------------------------------------------------- + + * for the typical case where an skb pool locks the containing + module, the function rtskb_module_pool_init() was added which has + the same interface as the old rtskb_poll_init() function. Its + prototype is: + +----------------------------------------------------------------------- +unsigned int rtskb_module_pool_init(struct rtskb_pool *pool, + unsigned int initial_size); +----------------------------------------------------------------------- + + + * in order to ease the port of recent drivers, the following + services were added, which work much like their Linux counterpart: + rtnetdev_priv(), rtdev_emerg(), rtdev_alert(), rtdev_crit(), + rtdev_err(), rtdev_warn(), rtdev_notice(), rtdev_info(), + rtdev_dbg(), rtdev_vdbg(), RTDEV_TX_OK, RTDEV_TX_BUSY, + rtskb_tx_timestamp(). Their declarations are equivalent to: + +----------------------------------------------------------------------- +#define RTDEV_TX_OK 0 +#define RTDEV_TX_BUSY 1 + +void *rtndev_priv(struct rtnet_device *dev); + +void rtdev_emerg(struct rntet_device *dev, const char *format, ...); +void rtdev_alert(struct rntet_device *dev, const char *format, ...); +void rtdev_crit(struct rntet_device *dev, const char *format, ...); +void rtdev_err(struct rntet_device *dev, const char *format, ...); +void rtdev_warn(struct rntet_device *dev, const char *format, ...); +void rtdev_notice(struct rntet_device *dev, const char *format, ...); +void rtdev_info(struct rntet_device *dev, const char *format, ...); +void rtdev_dbg(struct rntet_device *dev, const char *format, ...); +void rtdev_vdbg(struct rntet_device *dev, const char *format, ...); + +void rtskb_tx_timestamp(struct rtskb *skb); +----------------------------------------------------------------------- + + +== POSIX interface changes == + +As mentioned earlier, the former *POSIX skin* is known as the *Cobalt +API* in Xenomai 3.x, available as +libcobalt.{so,a}+. The Cobalt API +also includes the code of the former +libxenomai+, which is no longer +a standalone library. + ++libcobalt+ exposes the set of POSIX and ISO/C standard features +specifically implemented by Xenomai to honor real-time requirements +using the Cobalt core. + +=== Interrupt management === + +- The former +pthread_intr+ API once provided by Xenomai 2.x is gone. + +[[irqhandling]] + +.Rationale +********************************************************************** +Handling real-time interrupt events from user-space can be done safely +only if some top-half code exists for acknowledging the issuing device +request from kernel space, particularly when the interrupt line is +shared. This should be done via a RTDM driver, exposing a +read(2)+ or ++ioctl(2)+ interface, for waiting for interrupt events from +applications running in user-space. +********************************************************************** + +Failing this, the low-level interrupt service code in user-space +would be sensitive to external thread management actions, such as +being stopped because of GDB/ptrace(2) interaction. Unfortunately, +preventing the device acknowledge code from running upon interrupt +request may cause unfixable breakage to happen (e.g. IRQ storm +typically). + +Since the application should provide proper top-half code in a +dedicated RTDM driver for synchronizing on IRQ receipt, the RTDM API +available in user-space is sufficient. + +Removing the +pthread_intr+ API should be considered as a strong hint +for keeping driver code in kernel space, where it naturally belongs +to. + +[TIP] +[[userirqtip]] +This said, in the seldom cases where running a device driver in +user-space is the best option, one may rely on the RTDM-based UDD +framework shipped with Xenomai 3. UDD stands for _User-space Device +Driver_, enabling interrupt control and I/O memory access interfaces +to applications in a safe manner. It is reminiscent of the UIO +framework available with the Linux kernel, adapted to the dual +kernel Cobalt environment. + +=== Scheduling === + +- Cobalt implements the following POSIX.1-2001 services not present in + Xenomai 2.x: +sched_setscheduler(2)+, +sched_getscheduler(2)+. + +- The +SCHED_FIFO+, +SCHED_RR+, +SCHED_SPORADIC+ and +SCHED_TP+ + classes now support up to 256 priority levels, instead of 99 as + previously with Xenomai 2.x. However, +sched_get_priority_max(2)+ + still returns 99. Only the Cobalt extended call forms + (e.g. +pthread_attr_setschedparam_ex()+, +pthread_create_ex()+) + recognize these additional levels. + +- The new +sched_get_priority_min_ex()+ and + +sched_get_priority_max_ex()+ services should be used for querying + the static priority range of Cobalt policies. + +- `pthread_setschedparam(3)` may cause a secondary mode switch for the + caller, but will not cause any mode switch for the target thread + unlike with Xenomai 2.x. + +[normal] + This is a requirement for maintaining both the *glibc* and the + Xenomai scheduler in sync, with respect to thread priorities, since + the former maintains a process-local priority cache for the threads + it knows about. Therefore, an explicit call to the the regular + `pthread_setschedparam(3)` shall be issued upon each priority change + Xenomai-wise, for maintaining consistency. + +[normal] + In the Xenomai 2.x implementation, the thread being set a new + priority would receive a SIGSHADOW signal, triggering a call to + `pthread_setschedparam(3)` immediately. + +.Rationale +********************************************************************** +The target Xenomai thread may hold a mutex or any resource which may +only be held in primary mode, in which case switching to secondary +mode for applying the priority change at any random location over a +signal handler may create a pathological issue. In addition, +`pthread_setschedparam(3)` is not async-safe, which makes the former +method fragile. +********************************************************************** + +[normal] + Conversely, a thread which calls +pthread_setschedparam(3)+ does know + unambiguously whether the current calling context is safe for the + incurred migration. + +- A new SCHED_WEAK class is available to POSIX threads, which may be + optionally turned on using the +CONFIG_XENO_OPT_SCHED_WEAK+ kernel + configuration switch. + +[normal] + By this feature, Xenomai now accepts Linux real-time scheduling + policies (SCHED_FIFO, SCHED_RR) to be weakly scheduled by the Cobalt + core, within a low priority scheduling class (i.e. below the Xenomai + real-time classes, but still above the idle class). + +[normal] + Xenomai 2.x already had a limited form of such policy, based on + scheduling SCHED_OTHER threads at the special SCHED_FIFO,0 priority + level in the Xenomai core. SCHED_WEAK is a generalization of such + policy, which provides for 99 priority levels, to cope with the full + extent of the regular Linux SCHED_FIFO/RR priority range. + +[normal] + For instance, a (non real-time) Xenomai thread within the SCHED_WEAK + class at priority level 20 in the Cobalt core, may be scheduled with + policy SCHED_FIFO/RR at priority 20, by the Linux kernel. The code + fragment below would set the scheduling parameters accordingly, + assuming the Cobalt version of +pthread_setschedparam(3)+ is invoked: + +---------------------------------------------------------------------- + struct sched_param param = { + .sched_priority = -20, + }; + + pthread_setschedparam(tid, SCHED_FIFO, ¶m); +---------------------------------------------------------------------- + +[normal] + Switching a thread to the SCHED_WEAK class can be done by negating + the priority level in the scheduling parameters sent to the Cobalt + core. For instance, SCHED_FIFO, prio=-7 would be scheduled as + SCHED_WEAK, prio=7 by the Cobalt core. + +[normal] + SCHED_OTHER for a Xenomai-enabled thread is scheduled as + SCHED_WEAK,0 by the Cobalt core. When the SCHED_WEAK support is + disabled in the kernel configuration, only SCHED_OTHER is available + for weak scheduling of threads by the Cobalt core. + +- A new SCHED_QUOTA class is available to POSIX threads, which may be + optionally turned on using the +CONFIG_XENO_OPT_SCHED_QUOTA+ kernel + configuration switch. + +[normal] + This policy enforces a limitation on the CPU consumption of + threads over a globally defined period, known as the quota + interval. This is done by pooling threads with common requirements + in groups, and giving each group a share of the global period (see + CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD). + +[normal] + When threads have entirely consumed the quota allotted to the group + they belong to, the latter is suspended as a whole, until the next + quota interval starts. At this point, a new runtime budget is given + to each group, in accordance with its share. + +- When called from primary mode, sched_yield(2) now delays the caller + for a short while *only in case* no context switch happened as a + result of the manual round-robin. The delay ends next time the + regular Linux kernel switches tasks, or a kernel (virtual) tick has + elapsed (TICK_NSEC), whichever comes first. + +[normal] + Typically, a Xenomai thread undergoing the SCHED_FIFO or SCHED_RR + policy with no contender at the same priority level would still be + delayed for a while. + +.Rationale +********************************************************************** +In most case, it is unwanted that sched_yield(2) does not cause any +context switch, since this service is commonly used for implementing a +poor man's cooperative scheduling. A typical use case involves a +Xenomai thread running in primary mode which needs to yield the CPU to +another thread running in secondary mode. By waiting for a context +switch to happen in the regular kernel, we guarantee that the manual +round-robin takes place between both threads, despite the execution +mode mismatch. By limiting the incurred delay, we prevent a regular +high priority SCHED_FIFO thread stuck in a tight loop, from locking +out the delayed Xenomai thread indefinitely. +********************************************************************** + +=== Thread management === + +- The minimum and default stack size is set to `max(64k, + PTHREAD_STACK_MIN)`. + +- pthread_set_name_np() has been renamed to pthread_setname_np() with + the same arguments, to conform with the GNU extension equivalent. + +- pthread_set_mode_np() has been renamed to pthread_setmode_np() for + naming consistency with pthread_setname_np(). In addition, the call + introduces the PTHREAD_DISABLE_LOCKBREAK mode flag, which disallows + breaking the scheduler lock. + +[normal] + When unset (default case), a thread which holds the scheduler lock + drops it temporarily while sleeping. When set, any attempt to block + while holding the scheduler lock will cause a break condition to be + immediately raised, with the caller receiving EINTR. + +[WARNING] +A Xenomai thread running with PTHREAD_DISABLE_LOCKBREAK and +PTHREAD_LOCK_SCHED both set may enter a runaway loop when attempting +to sleep on a resource or synchronization object (e.g. mutex or +condition variable). + +=== Semaphores === + +- With Cobalt, sem_wait(3), sem_trywait(3), sem_timedwait(3), and + sem_post(3) have gained fast acquisition/release operations not + requiring any system call, unless a contention exists on the + resource. As a consequence, those services may not systematically + switch callers executing in relaxed mode to real-time mode, unlike + with Xenomai 2.x. + +=== Process management === + +- In a +fork(2)+ -> +exec(2)+ sequence, all Cobalt API objects created + by the child process before it calls +exec(2)+ are automatically + flushed by the Xenomai core. + +[[real-time-signals]] +=== Real-time signals === + +- Support for Xenomai real-time signals is available. + +[normal] +Cobalt replacements for +sigwait(3)+, +sigwaitinfo(2)+, ++sigtimedwait(2)+, +sigqueue(3)+ and +kill(2)+ are +available. +pthread_kill(3)+ was changed to send thread-directed +Xenomai signals (instead of regular Linux signals). + +[normal] +Cobalt-based signals are stricly real-time. Both the sender and +receiver sides work exclusively from the primary domain. However, only +synchronous handling is available, with a thread waiting explicitly +for a set of signals, using one of the +sigwait+ calls. There is no +support for asynchronous delivery of signals to handlers. For this +reason, there is no provision in the Cobalt API for masking signals, +as Cobalt signals are implicitly blocked for a thread until the latter +invokes one of the +sigwait+ calls. + +[normal] +Signals from SIGRTMIN..SIGRTMAX are queued. + +[normal] +COBALT_DELAYMAX is defined as the maximum number of overruns which can +be reported by the Cobalt core in the siginfo.si_overrun field, for +any signal. + +- Cobalt's +kill(2)+ implementation supports group signaling. + +[normal] +Cobalt's implementation of kill(2) behaves identically to the regular +system call for non thread-directed signals (i.e. pid <= 0). In this +case, the caller switches to secondary mode. + +[normal] +Otherwise, Cobalt first attempts to deliver a thread-directed signal +to the thread whose kernel TID matches the given process id. If this +thread is not waiting for signals at the time of the call, kill(2) then +attempts to deliver the signal to a thread from the same process, +which currently waits for a signal. + +- +pthread_kill(3)+ is a conforming call. + +[normal] +When Cobalt's replacement for +pthread_kill(3)+ is invoked, a +Xenomai-enabled caller is automatically switched to primary mode on +its way to sending the signal, under the control of the real-time +co-kernel. Otherwise, the caller keeps running under the control of +the regular Linux kernel. + +[normal] +This behavior also applies to the new Cobalt-based replacement for the ++kill(2)+ system call. + +=== Timers === + +- POSIX timers are no longer dropped when the creator thread + exits. However, they are dropped when the container process exits. + +- If the thread signaled by a POSIX timer exits, the timer is + automatically stopped at the first subsequent timeout which fails + sending the notification. The timer lingers until it is deleted by a + call to +timer_delete(2)+ or when the process exits, whichever comes + first. + +- timer_settime(2) may be called from a regular thread (i.e. which is + not Xenomai-enabled). + +- EPERM is not returned anymore by POSIX timer calls. EINVAL is + substituted in the corresponding situation. + +- Cobalt replacements for +timerfd_create(2)+, +timerfd_settime(2)+ and ++timerfd_gettime(2)+ have been introduced. The implementation delivers +I/O notifications to RTDM file descriptors upon Cobalt-originated +real-time signals. + +- `pthread_make_periodic_np()` and `pthread_wait_np()` have been +removed from the API. + +.Rationale +********************************************************************** +With the introduction of services to support real-time signals, those +two non-portable calls have become redundant. Instead, Cobalt-based +applications should set up a periodic timer using the +`timer_create(2)`+`timer_settime(2)` call pair, then wait for release +points via `sigwaitinfo(2)`. Overruns can be detected by looking at the +siginfo.si_overrun field. + +Alternatively, applications may obtain a file descriptor referring to +a Cobalt timer via the `timerfd_create(2)` call, and `read(2)` from it to wait +for timeouts. + +In addition, applications may include a timer in a synchronous +multiplexing operation involving other event sources, by passing a +file descriptor returned by the `timerfd_create(2)` service to a `select(2)` +call. +********************************************************************** + +[TIP] +A limited emulation of the pthread_make_periodic_np() and +pthread_wait_np() calls is available from the <<trank,Transition +Kit>>. + +=== Clocks === + +- The internal identifier of CLOCK_HOST_REALTIME has changed from 42 + to 8. + +[CAUTION] +This information should normally remain opaque to applications, as it +is subject to change with ABI revisions. + +=== Message queues === + +- +mq_open(3)+ default attributes align on the regular kernel values, + i.e. 10 msg x 8192 bytes (instead of 128 x 128). + +- +mq_send(3)+ now enforces a maximum priority value for messages + (32768). + +=== POSIX I/O services === + +- A Cobalt replacement for mmap(2) has been introduced. The + implementation invokes the <<rtdm-mmap, +.mmap+ operation handler>> + from the appropriate RTDM driver the file descriptor is connected + to. + +- A Cobalt replacement for fcntl(2) has been introduced. The + implementation currently deals with the O_NONBLOCK flag exclusively. + +- Cobalt's select(2) service is not automatically restarted anymore + upon Linux signal receipt, conforming to the POSIX standard (see man + signal(7)). In such an event, -1 is returned and errno is set to + EINTR. + +- The former +include/rtdk.h+ header is gone in Xenomai +3.x. Applications should include +include/stdio.h+ instead. +Similarly, the real-time suitable STDIO routines are now part of ++libcobalt+. + +== Alchemy interface (formerly _native API_) == + +=== General === + +- The API calls supporting a wait operation may return the -EIDRM +error code only when the target object was deleted while +pending. Otherwise, passing a deleted object identifier to an API call +will result in -EINVAL being returned. + +=== Interrupt management === + +- The +RT_INTR+ API is gone. Please see the <<irqhandling,rationale>> + for not handling low-level interrupt service code from user-space. + +[TIP] +It is still possible to have the application wait for interrupt +receipts, as explained <<userirqtip,here>>. + +=== I/O regions === + +- The RT_IOREGION API is gone. I/O memory resources should be + controlled from a RTDM driver instead. + +[TIP] +<<userirqtip,UDD>> provides a simple way to implement mini-drivers +exposing any kind of memory regions to applications in user-space, via +Cobalt's mmap(2) call. + +=== Timing services === + +- +rt_timer_tsc()+, +rt_timer_ns2tsc()+ and +rt_timer_tsc2ns()+ have + been removed from the API. + +.Rationale +********************************************************************** +Due to the accumulation of rounding errors, using raw timestamp values +from the underlying clock source hardware for measuring long +timespans may yield (increasingly) wrong results. + +Either we guarantee stable computations with counts of nanoseconds +from within the application, or with raw timestamps instead, +regardless of the clock source frequency, but we can't provide such +guarantee for both. From an API standpoint, the nanosecond unit is +definitely the best option as the meaning won't vary between clock +sources. + +Avoiding the overhead of the tsc->ns conversion as a justification to +use raw TSC counts does not fly anymore, as all architectures +implement fast arithmetics for this operation over Cobalt, and +Mercury's (virtual) timestamp counter is actually mapped over +CLOCK_MONOTONIC. +********************************************************************** + +[TIP] +Alchemy users should measure timespans (or get timestamps) as counts +of nanoseconds as returned by rt_timer_read() instead. + +- +rt_timer_inquire()+ has a void return type, instead of always + returning zero as previously. As a consequence of the previously + documented change regarding TSC values, the current TSC count is no + more returned into the RT_TIMER_INFO structure. + +- +rt_timer_set_mode()+ is obsolete. The clock resolution has become a +per-process setting, which should be set using the ++--alchemy-clock-resolution+ switch on the command line. + +[TIP] +Tick-based timing can be obtained by setting the resolution of the +Alchemy clock for the application, here to one millisecond (the +argument expresses a count nanoseconds per tick). As a result of +this, all timeout and date values passed to Alchemy API calls will be +interpreted as counts of milliseconds. +---------------------------------------------------------- +# xenomai-application --alchemy-clock-resolution=1000000 +---------------------------------------------------------- + +[normal] +By default, the Alchemy API sets the clock resolution for the new +process to one nanosecond (i.e. tickless, highest resolution). + +- TM_INFINITE also means infinite wait with all +rt_*_until()+ call + forms. + +- +rt_task_set_periodic()+ does not suspend the target task anymore. +If a start date is specified, then +rt_task_wait_period()+ will apply +the initial delay. + +.Rationale +********************************************************************** +A periodic Alchemy task has to call +rt_task_wait_period()+ from +within its work loop for sleeping until the next release point is +reached. Since waiting for the initial and subsequent release points +will most often happen at the same code location in the application, +the semantics of rt_task_set_periodic() can be simplified so that only +rt_task_wait_period() may block the caller. +********************************************************************** + +[TIP] +In the unusual case where you do need to have the current task wait +for the initial release point outside of its periodic work loop, you +can issue a call to +rt_task_wait_period()+ separately, exclusively +for this purpose, i.e. +--------------------------------------------------------------- + /* wait for the initial release point. */ + ret = rt_task_wait_period(&overruns); + /* ...more preparation work... */ + for (;;) { + /* wait for the next release point. */ + ret = rt_task_wait_period(&overruns); + /* ...do periodic work... */ + } +--------------------------------------------------------------- +However, this work around won't work if the caller is not the target +task of rt_task_set_periodic(), which is fortunately unusual for most +applications. + +[normal] ++rt_task_set_periodic()+ still switches to primary as previously over +Cobalt. However, it does not return -EWOULDBLOCK anymore. + +- TM_ONESHOT was dropped, because the operation mode of the hardware + timer has no meaning for the application. The core Xenomai system + always operates the available timer chip in oneshot mode anyway. + A tickless clock has a period of one nanosecond. + +- Unlike with Xenomai 2.x, the target task to +rt_task_set_periodic()+ + must be local to the current process. + +[TIP] +A limited emulation of the deprecated rt_task_set_periodic() behavior +is available from the <<trank,Transition Kit>>. + +=== Mutexes === + +- For consistency with the standard glibc implementation, deleting a + RT_MUTEX object in locked state is no longer a valid operation. + +- +rt_mutex_inquire()+ does not return the count of waiters anymore. + +.Rationale +********************************************************************** +Obtaining the current count of waiters only makes sense for debugging +purpose. Keeping it in the API would introduce a significant overhead +to maintain internal consistency. +********************************************************************** + +[normal] +The +owner+ field of a RT_MUTEX_INFO structure now reports the owner's +task handle, instead of its name. When the mutex is unlocked, a NULL +handle is returned, which has the same meaning as a zero value in the +former +locked+ field. + +=== Condition variables === + +- For consistency with the standard glibc implementation, deleting a + RT_COND object currently pended by other tasks is no longer a valid + operation. + +- Like +rt_mutex_inquire()+, +rt_cond_inquire()+ does not return the +count of waiting tasks anymore. + +=== Events === + +- Event flags (RT_EVENT) are represented by a regular integer, instead + of a long integer as with Xenomai 2.x. This change impacts the + following calls: + + * rt_event_create() + * rt_event_signal() + * rt_event_clear() + * rt_event_wait() + * rt_event_wait_until() + +.Rationale +********************************************************************** +Using long integers for representing event bit masks potentially +creates a portability issue for applications between 32 and 64bit CPU +architectures. This issue is solved by using 32bit integers on 32/64 +bit machines, which is normally more than enough for encoding the set +of events received by a single RT_EVENT object. +********************************************************************** + +[TIP] +These changes are covered by the <<trank,Transition Kit>>. + +=== Task management === + +- +rt_task_notify()+ and +rt_task_catch()+ have been removed. They are + meaningless in a userland-only context. + +- As a consequence of the previous change, the T_NOSIG flag to + +rt_task_set_mode()+ was dropped in the same move. + +- T_SUSP cannot be passed to rt_task_create() or rt_task_spawn() + anymore. + +- T_FPU is obsolete. FPU management is automatically enabled for + Alchemy tasks if the hardware supports it, disabled otherwise. + +.Rationale +********************************************************************** +This behavior can be achieved by not calling +rt_task_start()+ +immediately after +rt_task_create()+, or by calling ++rt_task_suspend()+ before +rt_task_start()+. +********************************************************************** + +- +rt_task_shadow()+ now accepts T_LOCK, T_WARNSW. + +- +rt_task_create()+ now accepts T_LOCK, T_WARNSW and T_JOINABLE. + +- The RT_TASK_INFO structure returned by +rt_task_inquire()+ has + changed: + * fields +relpoint+ and +cprio+ have been removed, since the + corresponding information is too short-lived to be valuable to + the caller. The task's base priority is still available from + the +prio+ field. + * new field +pid+ represents the Linux kernel task identifier for + the Alchemy task, as obtained from syscall(__NR_gettid). + * other fields which represent runtime statistics are now avail + from a core-specific +stat+ field sub-structure. + +- New +rt_task_send_until()+, +rt_task_receive_until()+ calls are + available, as variants of +rt_task_send()+ and +rt_task_receive()+ + respectively, with absolute timeout specification. + +- rt_task_receive() does not inherit the priority of the sender, +although the requests will be queued by sender priority. + +[normal] +Instead, the application decides about the server priority instead of +the real-time core applying implicit dynamic boosts. + +- +rt_task_slice()+ now returns -EINVAL if the caller currently holds + the scheduler lock, or attempts to change the round-robin settings + of a thread which does not belong to the current process. + +- T_CPU disappears from the +rt_task_create()+ mode flags. The new + +rt_task_set_affinity()+ service is available for setting the CPU + affinity of a task. + +[TIP] +An emulation of rt_task_create() and rt_task_spawn() accepting the +deprecated flags is available from the <<trank,Transition Kit>>. + +- +rt_task_sleep_until()+ does not return -ETIMEDOUT anymore. Waiting + for a date in the past blocks the caller indefinitely. + +=== Message queues === + +- As Alchemy-based applications run in user-space, the following + +rt_queue_create()+ mode bits from the former _native_ API are + obsolete: + + * Q_SHARED + * Q_DMA + +[TIP] +Placeholders for those deprecated definitions are available from the +<<trank,Transition Kit>>. + +=== Heaps === + +- As Alchemy-based applications run in user-space, the following + +rt_heap_create()+ mode bits from the former _native_ API are + obsolete: + + * H_MAPPABLE + * H_SHARED + * H_NONCACHED + * H_DMA + +[TIP] +If you need to allocate a chunk of DMA-suitable memory, then you +should create a RTDM driver for this purpose. + +- +rt_heap_alloc_until()+ is a new call for waiting for a memory + chunk, specifying an absolute timeout date. + +- with the removal of H_DMA, returning a physical address (phys_addr) + in +rt_heap_inquire()+ does not apply anymore. + +[TIP] +Placeholders for those deprecated definitions are available from the +<<trank,Transition Kit>>. + +=== Alarms === + +- +rt_alarm_wait()+ has been removed. + +.Rationale +************************************************************** +An alarm handler can be passed to +rt_alarm_create()+ instead. +************************************************************** + +- The RT_ALARM_INFO structure returned by +rt_alarm_inquire()+ has + changed: + * field +expiration+ has been removed, since the corresponding + information is too short-lived to be valuable to the caller. + + * field +active+ has been added, to reflect the current state of + the alarm object. If non-zero, the alarm is enabled + (i.e. started). + +[TIP] +An emulation of rt_alarm_wait() is available from the +<<trank,Transition Kit>>. + +=== Message pipes === + +- +rt_pipe_create()+ now returns the minor number assigned to the + connection, matching the /dev/rtp<minor> device usable by the + regular threads. As a consequence of this, any return value higher + or equal to zero denotes a successful operation, a negative return + denotes an error. + +- Writing to a message pipe is allowed from all contexts, including + from alarm handlers. + +- +rt_pipe_read_until()+ is a new call for waiting for input from a + pipe, specifying an absolute timeout date. + +== pSOS interface changes == + +=== Memory regions === + +- +rn_create()+ may return ERR_NOSEG if the region control block + cannot be allocated internally. + +=== Scheduling === + +- The emulator converts priority levels between the core POSIX and + pSOS scales using normalization (pSOS -> POSIX) and denormalization + (POSIX -> pSOS) handlers. + +[normal] +Applications may override the default priority +normalization/denormalization handlers, by implementing the following +routines. + +------------------------------------------------------------ +int psos_task_normalize_priority(unsigned long psos_prio); + +unsigned long psos_task_denormalize_priority(int core_prio); +------------------------------------------------------------ + +[normal] +Over Cobalt, the POSIX scale is extended to 257 levels, which allows +to map pSOS over the POSIX scale 1:1, leaving +normalization/denormalization handlers as no-ops by default. + +== VxWorks interface changes == + +=== Task management === + +- +WIND_*+ status bits are synced to the user-visible TCB only as a +result of a call to +taskTcb()+ or +taskGetInfo()+. + +[normal] +As a consequence of this change, any reference to a user-visible TCB +should be refreshed by calling +taskTcb()+ anew, each time reading the ++status+ field is required. + +=== Scheduling === + +- The emulator converts priority levels between the core POSIX and + VxWorks scales using normalization (VxWorks -> POSIX) and + denormalization (POSIX -> VxWorks) handlers. + +[normal] +Applications may override the default priority +normalization/denormalization handlers, by implementing the following +routines. + +------------------------------------------------------------ +int wind_task_normalize_priority(int wind_prio); + +int wind_task_denormalize_priority(int core_prio); +------------------------------------------------------------ + +[[trank]] +== Using the Transition Kit == + +Xenomai 2 applications in user-space may use a library and a set of +compatibility headers, aimed at easing the process of transitioning to +Xenomai 3. + +Enabling this compatibility layer is done via passing specific +compilation and linker flags when building the +application. +xeno-config+ can retrieve those flags using the ++--cflags+ and +--ldflags+ switches as usual, with the addition of the ++--compat+ flag. Alternatively, passing the +--[skin=]native+ switch +as to +xeno-config+ implicitly turns on the compatibility mode for the +Alchemy API. + +[NOTE] +The transition kit does not currently cover _all_ the changes +introduced in Xenomai 3 yet, but a significant subset of them +nevertheless. + +.A typical Makefile fragment implicitly turning on backward compatibility +------------------------------------------------------------ +PREFIX := /usr/xenomai +CONFIG_CMD := $(PREFIX)/bin/xeno-config +CFLAGS= $(shell $(CONFIG_CMD) --skin=native --cflags) -g +LDFLAGS= $(shell $(CONFIG_CMD) --skin=native --ldflags) +CC = $(shell $(CONFIG_CMD) --cc) +------------------------------------------------------------ + +.Another example for using with the POSIX API +------------------------------------------------------------ +PREFIX := /usr/xenomai +CONFIG_CMD := $(PREFIX)/bin/xeno-config +CFLAGS= $(shell $(CONFIG_CMD) --skin=posix --cflags --compat) -g +LDFLAGS= $(shell $(CONFIG_CMD) --skin=posix --ldflags --compat) +CC = $(shell $(CONFIG_CMD) --cc) +------------------------------------------------------------ diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am b/kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am new file mode 100644 index 0000000..2bacb7c --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am @@ -0,0 +1,136 @@ +HTML_DOCS = \ + html/MIGRATION \ + html/README.APPLICATIONS \ + html/README.INSTALL \ + html/TROUBLESHOOTING.COBALT \ + html/TROUBLESHOOTING.MERCURY \ + html/asciidoc-icons \ + html/asciidoc-icons/callouts \ + html/man1/autotune \ + html/man1/chkkconf \ + html/man1/clocktest \ + html/man1/corectl \ + html/man1/dohell \ + html/man1/latency \ + html/man1/rtcanconfig \ + html/man1/rtcanrecv \ + html/man1/rtcansend \ + html/man1/slackspot \ + html/man1/switchtest \ + html/man1/xeno \ + html/man1/xeno-config \ + html/man1/xeno-test + +PDF_DOCS = \ + MIGRATION.pdf \ + README.APPLICATIONS.pdf \ + README.INSTALL.pdf \ + TROUBLESHOOTING.COBALT.pdf \ + TROUBLESHOOTING.MERCURY.pdf + +TXT_DOCS = \ + MIGRATION.txt \ + README.APPLICATIONS.txt \ + README.INSTALL.txt \ + TROUBLESHOOTING.COBALT.txt \ + TROUBLESHOOTING.MERCURY.txt + +MAN1_DOCS = \ + man1/autotune.1 \ + man1/chkkconf.1 \ + man1/clocktest.1 \ + man1/corectl.1 \ + man1/cyclictest.1 \ + man1/dohell.1 \ + man1/latency.1 \ + man1/rtcanconfig.1 \ + man1/rtcanrecv.1 \ + man1/rtcansend.1 \ + man1/slackspot.1 \ + man1/switchtest.1 \ + man1/xeno-config.1 \ + man1/xeno-test.1 \ + man1/xeno.1 + +EXTRA_DIST := \ + MIGRATION.adoc \ + README.APPLICATIONS.adoc \ + README.INSTALL.adoc \ + TROUBLESHOOTING.COBALT.adoc \ + TROUBLESHOOTING.MERCURY.adoc \ + plaintext.conf \ + plaintext.xsl \ + plaintext_postproc.awk \ + $(MAN1_DOCS:%.1=%.adoc) + +if XENO_BUILD_DOC + +HTML_DOCSDIR = ./ +PDF_DOCSDIR = ./ +MAN_DOCSDIR = ./ + +ASCIIDOC_HTML_OPTS=-a icons -a iconsdir=../asciidoc-icons \ + -a toc -a toclevels=3 -a max-width=55em -a xenover=$(PACKAGE_VERSION) + +ASCIIDOC_PDF_OPTS=-a icons -a toc -a toclevels=3 -a xenover=$(PACKAGE_VERSION) + +ASCIIDOC_MAN_OPTS=-a xenover=$(PACKAGE_VERSION) + +ASCIIDOC_TXT_OPTS=-a xenover=$(PACKAGE_VERSION) -a encoding=ascii + +tmpdir=adoc_plaintext + +all-local: $(HTML_DOCS) $(PDF_DOCS) $(TXT_DOCS) $(MAN1_DOCS) + +html/%: %.adoc Makefile + @$(mkdir_p) $@ + $(ASCIIDOC) -n -b xhtml11 $(ASCIIDOC_HTML_OPTS) -o $@/index.html $< + +%.1: %.adoc Makefile + @$(mkdir_p) man1 + $(A2X) -f manpage -D man1 $(ASCIIDOC_MAN_OPTS) $< + +%.pdf: %.adoc Makefile + $(A2X) -f pdf -D . $(ASCIIDOC_PDF_OPTS) $< + +$(tmpdir)/%.txt: %.adoc Makefile plaintext.conf plaintext.xsl + @$(mkdir_p) $(tmpdir) + $(ASCIIDOC) --backend docbook -f $(srcdir)/plaintext.conf \ + --doctype article $(ASCIIDOC_TXT_OPTS) \ + --out-file $(tmpdir)/$*.xml $< + xsltproc --stringparam toc.section.depth 3 --nonet \ + --output $(tmpdir)/$*.html $(srcdir)/plaintext.xsl \ + $(tmpdir)/$*.xml + w3m -cols 80 -dump -T text/html -no-graph $(tmpdir)/$*.html > $@ + +%.txt: $(tmpdir)/%.txt Makefile plaintext_postproc.awk + awk -f $(srcdir)/plaintext_postproc.awk $(tmpdir)/$*.txt > $@ + +html/asciidoc-icons: + $(RM) -R asciidoc-icons + @if test -d /usr/share/doc/asciidoc/images/; then \ + cp -a /usr/share/doc/asciidoc/images/icons/ html/asciidoc-icons; \ + elif test -d /usr/share/asciidoc/images/icons/; then \ + cp -a /usr/share/asciidoc/images/icons/ html/asciidoc-icons; \ + else \ + cp -a /etc/asciidoc/images/icons/ html/asciidoc-icons; \ + fi + +html/asciidoc-icons/callouts: html/asciidoc-icons + +.PHONY: html/asciidoc-icons + +include $(top_srcdir)/doc/install.rules + +install-data-local: install-docs-local +uninstall-local: uninstall-docs + +else +install-data-local: +uninstall-local: +endif + +distclean-local: clean-local + +clean-local: + $(RM) -R $(HTML_DOCS) $(PDF_DOCS) $(TXT_DOCS) $(tmpdir) diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc new file mode 100644 index 0000000..4447bc0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc @@ -0,0 +1,78 @@ +Running applications with Xenomai 3.x +===================================== + +Running a Xenomai 3 application +------------------------------- + +For _Cobalt_, you will need the real-time core built into the target +Linux kernel as described in link:installing-xenomai-3-x[this +document]. + +For _Mercury_, you need no Xenomai-specific kernel support so far, +beyond what your host Linux kernel already provides. Your kernel +should at least provide high resolution timer support +(+CONFIG_HIGH_RES_TIMERS+), and likely complete preemption +(_PREEMPT_RT_) if your application requires short and bounded +latencies. + +Any Xenomai-based application recognizes a set of standard options +that may be passed on the command line, described in +link:application-setup-and-init#Standard_Xenomai_command_line_options[this document]. + +In addition, the *Alchemy*, *pSOS (TM)* and *VxWorks (TM)* APIs running +over the Xenomai core can define the clock resolution to be used, +given as a count of nano-seconds, i.e. HZ=(1000000000 / ns), by the ++--{alchemy/psos/vxworks}-clock-resolution=<ns>+ option. + +If your application combines multiple APIs, you may pass several +clock-resolution switches to set them all. + +The default value depends on the API being considered. For instance, +the VxWorks (TM) and pSOS (TM) emulators default to millisecond clock +rates. The Alchemy API is tickless by default, +i.e. +--alchemy-clock-resolution=1+. + +[CAUTION] +Specifying a resolution greater than 1 nanosecond requires the low +resolution clock support to be available from the Xenomai libraries +(see the +--enable-lores-clock+ +link:installing-xenomai-3-x#Generic_configuration_options_both_cores[configuration +switch]). + +Valgrind support +---------------- + +Running Xenomai applications over _Valgrind_ is currently available to +the _Mercury_ core only. + +When the Valgrind API is available to the application process, the +configuration symbol CONFIG_XENO_VALGRIND_API is defined at build +time, and may be tested for existence by the application code. See the +tool documentation at +http://valgrind.org/docs/manual/manual-core-adv.html#manual-core-adv.clientreq/[this address]. + +The Xenomai autoconf script will detect the Valgrind core header on +the build system automatically, and define this symbol accordingly +(i.e. /usr/include/valgrind/valgrind.h). + +[NOTE] +You may need to install the Valgrind development package on your build +system to provide for the core header files. For instance, such +package is called _valgrind-devel_ on Fedora. + +Available real-time APIs +------------------------ + +[horizontal] +*Alchemy*:: + This is a re-implementation from scratch of Xenomai's + 2.x _native_ API, fully rebased on the new RTOS + abstraction interface. + +*pSOS*:: + http://www.windriver.com[pSOS (TM)] is a registered + trademark of Wind River Systems, Inc. + +*VxWorks*:: + http://www.windriver.com[VxWorks (TM)] is a registered + trademark of Wind River Systems, Inc. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc new file mode 100644 index 0000000..da96686 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc @@ -0,0 +1,879 @@ +Installing Xenomai 3.x +====================== + +Introduction +------------ + +Xenomai 3 is the new architecture of the Xenomai real-time framework, +which can run seamlessly side-by-side Linux as a co-kernel system, or +natively over mainline Linux kernels. In the latter case, the +mainline kernel can be supplemented by the +https://www.kernel.org/pub/linux/kernel/projects/rt/[PREEMPT-RT patch] +to meet stricter response time requirements than standard kernel +preemption would bring. + +One of the two available real-time cores is selected at build +time. The dual kernel core is codenamed _Cobalt_, the native Linux +implementation is called _Mercury_. + +[NOTE] +If you are looking for detailed information about installing a legacy +Xenomai 2.x release, please refer to link:installing-xenomai-2.x[this +document] instead. Please note that Xenomai 2.x is discontinued and +not maintained anymore. + +Installation steps +------------------ + +Xenomai follows a split source model, decoupling the kernel space +support from the user-space libraries. + +To this end, kernel and user-space Xenomai components are respectively +available under the `kernel/` and `lib/` sub-trees. Other top-level +directories, such as `scripts/`, `testsuite/` and `utils/`, provide +additional scripts and programs to be used on either the build host, +or the runtime target. + +The `kernel/` sub-tree which implements the in-kernel support code is +seen as a built-in extension of the Linux kernel. Therefore, the +standard Linux kernel configuration process should be used to define +the various settings for the Xenomai kernel components. All of the +kernel code Xenomai currently introduces implements the _Cobalt_ core +(i.e. dual kernel configuration). As of today, the _Mercury_ core +needs no Xenomai-specific code in kernel space. + +The `lib/` sub-tree contains the various user-space libraries exported +by the Xenomai framework to the applications. This tree is built +separately from the kernel support. Libraries are built in order to +support the selected core, either _Cobalt_ or _Mercury_. + +[[cobalt-core-install]] +Installing the _Cobalt_ core +---------------------------- +Preparing the _Cobalt_ kernel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +_Xenomai/cobalt_ provides a real-time extension kernel seamlessly +integrated to Linux, therefore the first step is to build it as part +of the target kernel. To this end, `scripts/prepare-kernel.sh` is a +shell script which sets up the target kernel properly. The syntax is +as follows: + +------------------------------------------------------------------------------ +$ scripts/prepare-kernel.sh [--linux=<linux-srctree>] +[--ipipe=<ipipe-patch>] [--arch=<target-arch>] +------------------------------------------------------------------------------ + +`--linux`:: specifies the path of the target kernel source tree. Such + kernel tree may be already configured or not, indifferently. This + path defaults to $PWD. + +`--ipipe`:: specifies the path of the interrupt pipeline (aka I-pipe) + patch to apply against the kernel tree. Suitable patches are + available from the project's link:/downloads/ipipe/[download + area]. This parameter can be omitted if the I-pipe has already + been patched in, or the script shall suggest an appropriate + one. The script will detect whether the interrupt pipeline code is + already present into the kernel tree, and skip this operation if + so. + +`--arch`:: tells the script about the target architecture. If + unspecified, the build host architecture suggested as a reasonable + default. + +For instance, the following command would prepare the Linux tree +located at `/home/me/linux-3.10-ipipe` in order to patch the Xenomai +support in: + +------------------------------------------------------------------------------ +$ cd xenomai-3 +$ scripts/prepare-kernel.sh --linux=/home/me/linux-3.10 +------------------------------------------------------------------------------ + +Note: The script will infer the location of the Xenomai kernel code +from its own location within the Xenomai source tree. For instance, if +`/home/me/xenomai-3/scripts/prepare-kernel.sh` is executing, then +the Xenomai kernel code available from +`/home/me/xenomai-3/kernel/cobalt` will be patched in the target +Linux kernel. + + +Configuring and compiling the _Cobalt_ kernel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once prepared, the target kernel can be configured as usual. All +Xenomai configuration options are available from the "Xenomai" +toplevel Kconfig menu. + +There are several important kernel configuration options, documented +in the link:troubleshooting-a-dual-kernel-configuration#kconf[TROUBLESHOOTING] +guide. + +Once configured, the kernel can be compiled as usual. + +If you want several different configs/builds at hand, you may reuse +the same source by adding `O=../build-<target>` to each make +invocation. + +In order to cross-compile the Linux kernel, pass an ARCH and +CROSS_COMPILE variable on make command line. See sections +<<cobalt-core-arm,"Building a _Cobalt/arm_ kernel">>, +<<cobalt-core-powerpc,"Building a _Cobalt/powerpc_ kernel">>, +<<cobalt-core-x86,"Building a _Cobalt/x86_ kernel">>, +for examples. + +[[cobalt-kernel-parameters]] +_Cobalt_ kernel parameters +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Cobalt kernel accepts the following set of parameters, which +should be passed on the kernel command line by the boot loader. + +[options="header",grid="cols",frame="topbot",cols="2,3,1"] +|============================================================================ +^|NAME ^|DESCRIPTION ^|DEFAULT + +|xenomai.allowed_group=<gid> | Enable non-root access to Xenomai +services from user-space. <gid> is the ID of the Linux user group +whose members should be allowed such access by the Cobalt core. | None + +|xenomai.sysheap_size=<kbytes> | Set the size of the memory heap used +internally by the Cobalt core to allocate runtime objects. This value +is expressed in kilo-bytes. | 256 + +|xenomai.state=<state> | Set the initial state of the Cobalt core at +boot up, which may be _enabled_, _stopped_ or _disabled_. See the +documentation about the +link:../documentation/xenomai-3/html/man1/corectl/index.html[corectl(1)] +utility for a description of these states. | enabled + +|xenomai.smi=<state> | *x86-specific*: Set the state of the SMI +workaround. The possible values are _disabled_, _detect_ and +_enabled_. See the discussion about link:dealing-with-x86-SMI[SMIs] +for a description of these states.| detect + +|xenomai.smi_mask=<source-mask> | *x86-specific*: Set of bits to mask +in the SMI control register. | 1 (=global disable) + +|============================================================================ + +[[cobalt-build-examples]] +Examples of building the _Cobalt_ kernel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The examples in following sections use the following conventions: + +`$linux_tree`:: path to the target kernel sources +`$xenomai_root`:: path to the Xenomai sources + + +[[cobalt-core-x86]] +Building a _Cobalt/x86_ kernel (32/64bit) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Building _Xenomai/cobalt_ for x86 is almost the same for 32bit and 64bit +platforms. You should note, however, that it is not possible to run +Xenomai libraries compiled for x86_32 on a kernel compiled for x86_64, +and conversely. + +Assuming that you want to build natively for a x86_64 system (x86_32 +cross-build options from x86_64 appear between brackets), you would +typically run: + +------------------------------------------------------------------------------ +$ cd $linux_tree +$ $xenomai_root/scripts/prepare-kernel.sh --arch=x86 \ + --ipipe=ipipe-core-X.Y.Z-x86-NN.patch +$ make [ARCH=i386] xconfig/gconfig/menuconfig +------------------------------------------------------------------------------ +...configure the kernel (see also the recommended settings +link:configuring-for-x86-based-dual-kernels[here]). + +Enable Xenomai options, then build with: +------------------------------------------------------------------------------ +$ make [ARCH=i386] bzImage modules +------------------------------------------------------------------------------ + +Now, let's say that you really want to build Xenomai for a +Pentium-based x86 32bit platform, using the native host toolchain; the +typical steps would be as follows: + +------------------------------------------------------------------------------ +$ cd $linux_tree +$ $xenomai_root/scripts/prepare-kernel.sh --arch=i386 \ + --ipipe=ipipe-core-X.Y.Z-x86-NN.patch +$ make xconfig/gconfig/menuconfig +------------------------------------------------------------------------------ +...configure the kernel (see also the recommended settings +link:configuring-for-x86-based-dual-kernels[here]). + +Enable Xenomai options, then build with: +------------------------------------------------------------------------------ +$ make bzImage modules +------------------------------------------------------------------------------ + +Similarly, for a 64bit platform, you would use: + +------------------------------------------------------------------------------ +$ cd $linux_tree +$ $xenomai_root/scripts/prepare-kernel.sh --arch=x86_64 \ + --ipipe=ipipe-core-X.Y.Z-x86-NN.patch +$ make xconfig/gconfig/menuconfig +------------------------------------------------------------------------------ +...configure the kernel (see also the recommended settings +link:configuring-for-x86-based-dual-kernels[here]). + +Enable Xenomai options, then build with: +------------------------------------------------------------------------------ +$ make bzImage modules +------------------------------------------------------------------------------ + +The remaining examples illustrate how to cross-compile a +_Cobalt_-enabled kernel for various architectures. Of course, you would +have to install the proper cross-compilation toolchain for the target +system first. + +[[cobalt-core-powerpc]] +Building a _Cobalt/powerpc_ kernel +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A typical cross-compilation setup, in order to build Xenomai for a +ppc-6xx architecture running a 3.10.32 kernel. We use the DENX ELDK +cross-compiler: + +------------------------------------------------------------------------------ +$ cd $linux_tree +$ $xenomai_root/scripts/prepare-kernel.sh --arch=powerpc \ + --ipipe=ipipe-core-3.10.32-powerpc-1.patch +$ make ARCH=powerpc CROSS_COMPILE=ppc_6xx- xconfig/gconfig/menuconfig +------------------------------------------------------------------------------ +...select the kernel and Xenomai options, save the configuration +------------------------------------------------------------------------------ +$ make ARCH=powerpc CROSS_COMPILE=powerpc-linux- uImage modules +------------------------------------------------------------------------------ +...manually install the kernel image and modules to the proper location + +[[cobalt-core-arm]] +Building _Cobalt/arm_ kernel +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using codesourcery toolchain named `arm-none-linux-gnueabi-gcc` and +compiling for a CSB637 board (AT91RM9200 based), a typical compilation +will look like: + +------------------------------------------------------------------------------ +$ cd $linux_tree +$ $xenomai_root/scripts/prepare-kernel.sh --arch=arm \ + --ipipe=ipipe-core-X.Y.Z-x86-NN.patch +$ mkdir -p $build_root/linux +$ make ARCH=arm CROSS_COMPILE=arm-none-linux-gnueabi- O=$build_root/linux \ + csb637_defconfig +$ make ARCH=arm CROSS_COMPILE=arm-none-linux-gnueabi- O=$build_root/linux \ + bzImage modules +------------------------------------------------------------------------------ +...manually install the kernel image, system map and modules to the proper location + + +[[mercury-core-install]] +Installing the _Mercury_ core +----------------------------- + +For _Mercury_, you need no Xenomai-specific kernel support so far, +beyond what your host Linux kernel already provides. Your kernel +should at least provide high resolution timer support +(`CONFIG_HIGH_RES_TIMERS`), and likely complete preemption +(_PREEMPT_RT_) if your application requires short and bounded +latencies. + +Kernels with no real-time support can be used too, likely for basic +debugging tasks, and/or running applications which do not have strict +response time requirements. + +Therefore, unlike with _Cobalt_, there is no additional steps for +preparing and/or configuring the kernel for _Mercury_. + +[[library-install]] +Installing the Xenomai libraries and tools +------------------------------------------ + +Prerequisites +~~~~~~~~~~~~~ + +Generic requirements (both cores) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- GCC must have support for legacy atomic builtins (__sync form). + +- GCC should have a (sane/working) support for TLS preferably, +although this is not mandatory if building with `--disable-tls`. + +- If you plan to enable the user-space registry support + (i.e. +--enable-registry+), then CONFIG_FUSE_FS must be enabled in + the target kernel running the real-time applications. In addition, + the FUSE development libraries must be available from the toolchain. + +- If you plan to build from the sources available from the Xenomai GIT + tree (git.xenomai.org), the autoconf (>= 2.62), automake and libtool + packages must be available on your build system. This is not + required when building from a source tree extracted from a + link:/downloads/xenomai/[release tarball]. + +_Cobalt_-specific requirements +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- The kernel version must be 3.10 or better. + +- An interrupt pipeline (I-pipe) patch must be available for your + target kernel. You can find the official patches issued by the + Xenomai project link:/downloads/ipipe/[there]. + Only patches from the *ipipe-core* series are appropriate, legacy + patches from the *adeos-ipipe* series are not. + +- A timestamp counter (TSC) is required from running on a x86_32 + hardware. Unlike with Xenomai 2.x, TSC-emulation using a PIT + register is not available. + +_Mercury_-specific requirement +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- There is no particular requirement for Mercury setups, although + using a NPTL-based glibc or uClibc is recommended. + +Configuring +~~~~~~~~~~~ + +If building the source obtained from the Xenomai GIT tree +(git.xenomai.org), the `configure` script and Makefiles must be +generated in the Xenomai source tree. The recommended way is to run +the automatic reconfiguration script shipped, from the top of the +source tree: + +--------------------- +$ ./scripts/bootstrap +--------------------- + +If building from a link:/downloads/xenomai/[release tarball], a set of +autoconf-generated file will be readily available from the extracted +source tree, and therefore reconfiguring will not be required. + +When run, the generated `configure` script prepares for building the +libraries and programs, for both the _Cobalt_ and _Mercury_ cores. The +core-specific code which may be needed internally is automatically and +transparently selected at compilation-time by the build process. + +The options listed below can be passed to this script. + +Generic configuration options (both cores) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +[horizontal] +*--with=core=<type>*:: + + Indicates which real-time core you want to build the support + libraries for, namely _cobalt_ or _mercury_. This option + defaults to _cobalt_. + +*--prefix=<dir>*:: + + Specifies the root installation path for libraries, include + files, scripts and executables. Running `$ make install` + installs these files to `$DESTDIR/<dir>`. This directory + defaults to /usr/xenomai. + +*--enable-debug[=partial]*:: + + This switch controls the debug level. Three levels are + available, with varying overhead: + + - _symbols_ enables debug symbols to be compiled in the + libraries and executables, still turning on the optimizer + (-O2). This option has no overhead, it is useful to get + meaningful backtraces using gdb while running the application + at nominal speed. + + - _partial_ includes _symbols_, and also turns on internal + consistency checks within the Xenomai code (mostly present in + the Copperplate layer). The `CONFIG_XENO_DEBUG` macro is + defined, for both the Xenomai libraries and the applications + getting their C compilation flags from the `xeno-config` + script (i.e. `xeno-config --cflags`). The partial debug mode + implicitly turns on `--enable-assert`. A measurable overhead + is introduced by this level. This is the default level when + `--enable-debug` is mentioned with no level specification. + + - _full_ includes _partial_ settings, but the optimizer is + disabled (-O0), and even more consistency checks may be + performed. In addition to `__XENO_DEBUG__`, the macro + `CONFIG_XENO_DEBUG_FULL` is defined. This level introduces the + most overhead, which may triple the worst-case latency, or + even more. + +[normal] + Over the _Mercury_ core, enabling _partial_ or _full_ debug + modes also causes the standard malloc interface to be used + internally instead of a fast real-time allocator (TLSF). This + allows debugging memory-related issues with the help of + _Valgrind_ or other dynamic memory analysers. + +*--disable-debug*:: + + Fully turns off all consistency checks and assertions, turns + on the optimizer and disables debug symbol generation. + +*--enable-assert*:: + + A number of debug assertion statements are present into the + Xenomai libraries, checking the internal consistency of the + runtime system dynamically (see _man assert(3)_). Passing + `--disable-assert` to the _configure_ script disables built-in + assertions unconditionally. By default, assertions are enabled + in partial or full debug modes, disabled otherwise. + +*--enable-pshared*:: + + Enable shared multi-processing. When enabled, this option + allows multiple processes to share real-time objects + (e.g. tasks, semaphores). + +*--enable-registry[=/registry-root-path]*:: + + Xenomai APIs can export their internal state through a + pseudo-filesystem, which files may be read to obtain + information about the existing real-time objects, such as + tasks, semaphores, message queues and so on. This feature is + supported by http://fuse.sourceforge.net/[FUSE], which must be + available on the target system. Building the Xenomai libraries + with the registry support requires the FUSE development + libraries to available from the toolchain. In addition, + CONFIG_FUSE_FS must be enabled in the target kernel. + +[normal] +When this option is enabled, the system creates a file hierachy at +`<user>/<session>/<pid>` under the registry root path, where you +can access the internal state of the active real-time objects. The +session label is obtained from the --session runtime switch. If no +session name is specified, `anon@<pid>` will be used. E.g. looking at +the properties of a VxWorks task could be done as follows: + +If not specified in the configuration switch, the registry root path +will be +/var/run/xenomai+. + +-------------------------------------------------------------------- +$ cat /var/run/xenomai/root/anon@12656/12656/vxworks/tasks/windTask +name = windTask +errno = 0 +status = ready +priority = 70 +lock_depth = 0 +-------------------------------------------------------------------- + +[normal] + You may override the default root of the registry hierarchy + either statically at build time by passing the desired root + path to the --enable-registry configuration switch, or + dynamically by using the `--registry-root` runtime option + passed to the application. + +[NOTE] +When running over _Xenomai/cobalt_, the `/proc/xenomai` interface is +also available for inspecting the core system state. + +*--enable-lores-clock*:: + + Enables support for low resolution clocks. By default, + libraries are built with no support for tick-based timing. If + you need such support (e.g. for pSOS (TM) or VxWorks (TM) + APIs), then you can turn it on using this option. + +[NOTE] +The POSIX API does not support tick-based timing. Alchemy may use it +optionally. + +*--enable-clock-monotonic-raw*:: + + The Xenomai libraries requires a monotonic clock to be + available from the underlying POSIX interface. When + `CLOCK_MONOTONIC_RAW` is available on your system, you may + want to pass this switch, otherwise `CLOCK_MONOTONIC` will be + used by default. + +[NOTE] +The _Cobalt_ core implements `CLOCK_MONOTONIC_RAW`, so this switch is +turned on by default when building with `--with-core=cobalt`. On the +contrary, this option is turned off by default when building for the +_Mercury_ core, since we don't know in advance whether this feature +does exist on the target kernel. + +*--enable-tls*:: + + Xenomai can use GCC's thread local storage extension (TLS) to + speed up the retrieval of the per-thread information it uses + internally. This switch enables TLS, use the converse + `--disable-tls` to prevent this. + +[normal] + Due to GCC bugs regarding this feature with some + release,architecture combinations, whether TLS is turned on by + default is a per-architecture decision. Currently, this + feature is enabled for x86 and powerpc by default, other + architectures will require `--enable-tls` to be passed to the + _configure_ script explicitly. + +[normal] + Unless `--enable-dlopen-libs` is present, the _initial-exec_ + TLS model is selected. + +[normal] + When TLS is disabled, POSIX's thread-specific data management + services are used internally (i.e. pthread_set/getspecific()). + +*--enable-dlopen-libs*:: + + This switch allows programs to load Xenomai-based libraries + dynamically, using the `dlopen(3)` routine. Enabling dynamic + loading introduces some overhead in TLS accesses when enabled + (see `--enable-tls`), which might be noticeable depending on + the architecture. + +[normal] + To support dynamic loading when `--enable-tls` is turned on, + the _global-dynamic_ TLS model is automatically selected. + +[normal] + Dynamic loading of Xenomai-based libraries is disabled by + default. + +*--enable-async-cancel*:: + + Enables fully asynchronous cancellation of Xenomai threads + created by the real-time APIs, making provision to protect the + Xenomai implementation code accordingly. +[normal] + When disabled, Xenomai assumes that threads may exit due to + cancellation requests only when they reach cancellation points + (like system calls). Asynchronous cancellation is disabled + by default. + +[CAUTION] +Fully asynchronous cancellation can easily lead to resource leakage, +silent corruption, safety issues and all sorts of rampant bugs. The +only reason to turn this feature on would be aimed at cancelling +threads which run significantly long, syscall-less busy loops with no +explicit exit condition, which should probably be revisited anyway. + +*--enable-smp*:: + + Turns on SMP support for Xenomai libraries. + +[CAUTION] +SMP support must be enabled in Xenomai libraries when the +client applications are running over a SMP-capable kernel. + +*--disable-sanity*:: + + Turns off the sanity checks performed at application startup + by the Xenomai libraries. This option sets a default, which + can later be overriden using the --[no-]sanity options passed + to a Copperplate-based Xenomai application. Sanity checks are + enabled by default when configuring. + +*--enable-fortify*:: + + Enables `_FORTIFY_SOURCE` when building the Xenomai code + unless --enable-debug=full is also given on the command line, + in which case --enable-fortify is silently ignored. + +*--disable-valgrind-client*:: + + Turns off the Valgrind client support, forcing + `CONFIG_XENO_VALGRIND_API` off in the Xenomai configuration + header. + +*--enable-doc-build*:: + + Causes the inline Xenomai documentation based on the + http://doxygen.org[Doxygen markup language] to be produced as + PDF and HTML documents. Additional documentation like manpages + based on the http://asciidoc.org/[Asciidoc markup language] is + produced too. + +_Cobalt_-specific configuration options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +[options="header",grid="cols",frame="topbot",cols="m,2*d"] +|============================================================================ +^|NAME ^|DESCRIPTION ^|DEFAULT +|--enable-x86-vsyscall |Use the x86/vsyscall interface + for issuing syscalls. If disabled, + the legacy 0x80 vector will be used. + Turning on this option requires NPTL. |enabled + +|--enable-arm-tsc |Enable ARM TSC emulation. + footnote:[In the unusual + situation where Xenomai + does not support the kuser generic + emulation for the target SOC, use + this option to specify another tsc + emulation method. + See `--help` for a list of valid + values.] |kuser + +|--enable-arm-quirks |Enable quirks for specific ARM + SOCs Currently sa1100 and + xscale3 are supported. |disabled +|============================================================================ + +_Mercury_-specific configuration options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +[options="header",grid="cols",frame="topbot",cols="m,2*d"] +|============================================================================ +^|NAME ^|DESCRIPTION ^|DEFAULT +|--enable-condvar-workaround | Enable workaround for broken priority + inheritance with condition variables in glibc. This option + adds some overhead to RTOS API emulators. |disabled +|============================================================================ + +footnoteref:[disable,Each option enabled by default can be forcibly +disabled by passing `--disable-<option>` to the _configure_ script] + +Cross-compilation +~~~~~~~~~~~~~~~~~ + +In order to cross-compile the Xenomai libraries and programs, you will +need to pass a `--host` and `--build` option to the _configure_ +script. The `--host` option allow to select the architecture for which +the libraries and programs are built. The `--build` option allows to +choose the architecture on which the compilation tools are run, +i.e. the system running the _configure_ script. + +Since cross-compiling requires specific tools, such tools are +generally prefixed with the host architecture name; for example, a +compiler for the PowerPC architecture may be named +`powerpc-linux-gcc`. + +When passing `--host=powerpc-linux` to configure, it will +automatically use `powerpc-linux-` as a prefix to all +compilation tools names and infer the host architecture name from this +prefix. If configure is unable to infer the architecture name from the +cross-compilation tools prefix, you will have to manually pass the +name of all compilation tools using at least the CC and LD, variables +on configure command line. + +The easiest way to build a GNU cross-compiler might involve using +crosstool-ng, available http://crosstool-ng.org/[here]. + +If you want to avoid to build your own cross compiler, you might if +find easier to use the ELDK. It includes the GNU cross development +tools, such as the compilers, binutils, gdb, etc., and a number of +pre-built target tools and libraries required on the target +system. See http://www.denx.de/wiki/DULG/ELDK[here] for further +details. + +Some other pre-built toolchains: + +- Mentor Sourcery CodeBench Lite Edition, available +http://www.mentor.com/embedded-software/sourcery-tools/sourcery-codebench/editions/lite-edition/[here]; +- Linaro toolchain (for the ARM architecture), available +https://launchpad.net/linaro-toolchain-binaries[here]. + + +[[library-install-examples]] +Examples of building the Xenomai libraries and tools +---------------------------------------------------- + +The examples in following sections use the following conventions: + +`$xenomai_root`:: path to the Xenomai sources +`$build_root`:: path to a clean build directory +`$staging_dir`:: path to a directory that will hold the installed file + temporarily before they are moved to their final location; when used + in a cross-compilation setup, it is usually a NFS mount point from + the target's root directory to the local build host, as a + consequence of which running `make{nbsp}DESTDIR=$staging_dir{nbsp}install` on + the host immediately updates the target system with the installed + programs and libraries. + +CAUTION: In the examples below, make sure to add `--enable-smp` to the +_configure_ script options if building for a SMP-enabled kernel. + +Building the x86 libraries (32/64bit) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Assuming that you want to build the _Mercury_ libraries natively for a +x86_64/SMP system, enabling shared multi-processing support. You would +typically run: + +------------------------------------------------------------------------------ +$ mkdir $build_root && cd $build_root +$ $xenomai_root/configure --with-core=mercury --enable-smp --enable-pshared +$ make install +------------------------------------------------------------------------------ + +Conversely, cross-building the _Cobalt_ libraries from x86_64 with the +same feature set, for running on x86_32 could be: + +------------------------------------------------------------------------------ +$ mkdir $build_root && cd $build_root +$ $xenomai_root/configure --with-core=cobalt --enable-smp --enable-pshared \ + --host=i686-linux CFLAGS="-m32 -O2" LDFLAGS="-m32" +$ make install +------------------------------------------------------------------------------ + +After installing the build tree (i.e. using "make install"), the +installation root should be populated with the librairies, programs +and header files you can use to build Xenomai-based real-time +applications. This directory path defaults to `/usr/xenomai`. + +The remaining examples illustrate how to cross-compile Xenomai for +various architectures. Of course, you would have to install the proper +cross-compilation toolchain for the target system first. + +Building the PPC32 libraries +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A typical cross-compilation setup, in order to build the _Cobalt_ +libraries for a ppc-6xx architecture. In that example, we want the +debug symbols to be generated for the executable, with no runtime +overhead though. We use the DENX ELDK cross-compiler: + +------------------------------------------------------------------------------ +$ cd $build_root +$ $xenomai_root/configure --host=powerpc-linux --with-core=cobalt \ + --enable-debug=symbols +$ make DESTDIR=$staging_dir install +------------------------------------------------------------------------------ + +Building the ARM libraries +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using codesourcery toolchain named `arm-none-linux-gnueabi-gcc` and +compiling for a CSB637 board (AT91RM9200 based), a typical cross-compilation +from a x86_32 desktop would look like: + +------------------------------------------------------------------------------ +$ mkdir $build_root/xenomai && cd $build_root/xenomai +$ $xenomai_root/configure CFLAGS="-march=armv4t" LDFLAGS="-march=armv4t" \ + --build=i686-pc-linux-gnu --host=arm-none-linux-gnueabi- --with-core=cobalt +$ make DESTDIR=$staging_dir install +------------------------------------------------------------------------------ + +IMPORTANT: Unlike previous releases, Xenomai no longer passes any arm +architecture specific flags, or FPU flags to gcc, so, users are +expected to pass them using the CFLAGS and LDFLAGS variables as +demonstrated above, where the AT91RM9200 is based on the ARM920T core, +implementing the `armv4` architecture. The following table summarizes +the CFLAGS and options which were automatically passed in previous +revisions and which now need to be explicitely passed to configure, +for the supported SOCs: + +.ARM configure options and compilation flags +[options="header",frame="topbot",grid="cols",cols="2*d,m"] +|====================================================================== +^|SOC ^| CFLAGS ^| configure options +|at91rm9200 | `-march=armv4t -msoft-float` | +|at91sam9x | `-march=armv5 -msoft-float` | +|imx1 | `-march=armv4t -msoft-float` | +|imx21 | `-march=armv5 -msoft-float` | +|imx31 | `-march=armv6 -mfpu=vfp` | +|imx51/imx53 | `-march=armv7-a -mfpu=vfp3` + footnoteref:[armv7,Depending on the + gcc versions the flag for armv7 + may be `-march=armv7-a` or + `-march=armv7a`]| +|imx6q | `-march=armv7-a -mfpu=vfp3` footnoteref:[armv7] | --enable-smp +|ixp4xx | `-march=armv5 -msoft-float` | `--enable-arm-tsc=ixp4xx` +|omap3 | `-march=armv7-a -mfpu=vfp3` footnoteref:[armv7] | +|omap4 | `-march=armv7-a -mfpu=vfp3` footnoteref:[armv7] | --enable-smp +|orion | `-march=armv5 -mfpu=vfp` | +|pxa | `-march=armv5 -msoft-float` | +|pxa3xx | `-march=armv5 -msoft-float` | --enable-arm-quirks=xscale3 +|s3c24xx | `-march=armv4t -msoft-float` | +|sa1100 | `-march=armv4t -msoft-float` | --enable-arm-quirks=sa1100 +|====================================================================== + +It is possible to build for an older architecture version (v6 instead +of v7, or v4 instead of v5), if your toolchain does not support the +target architecture, the only restriction being that if SMP is +enabled, the architecture should not be less than v6. + + +Testing the installation +------------------------ + +Booting the _Cobalt_ kernel +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In order to test the Xenomai installation over _Cobalt_, you should +first try to boot the patched kernel. Check the kernel boot log for +messages like these: + +------------------------------------------------------------------------------ +$ dmesg | grep -i xenomai +I-pipe: head domain Xenomai registered. +[Xenomai] Cobalt vX.Y.Z enabled +------------------------------------------------------------------------------ + + +If the kernel fails booting, or the log messages indicates an error +status instead, see the +link:troubleshooting-a-dual-kernel-configuration#Kernel_log_displays_Xenomai_or_I-pipe_error_messages[TROUBLESHOOTING] +guide. + + +Testing the real-time system (both cores) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First, run the latency test: + +------------------------------------------------------------------------------ +$ /usr/xenomai/bin/latency +------------------------------------------------------------------------------ + +The latency test should display a message every second with minimum, +maximum and average latency values. If this test displays an error +message, hangs, or displays unexpected values, see the +link:troubleshooting-a-dual-kernel-configuration#the_latency_test_shows_high_latencies[TROUBLESHOOTING] +guide. + +If the latency test succeeds, you should try next to run the +`xeno-test` test in order to assess the worst-case latency of your +system. Try: + +------------------------------------------------------------------------------ +$ xeno-test --help +------------------------------------------------------------------------------ + +Calibrating the _Cobalt_ core timer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The accuracy of the Cobalt timing services depends on proper +calibration of its core timer. Sound factory-default calibration +values are defined for each platform Xenomai supports, but it is +recommended to calibrate the core timer specifically for the target +system. + +See the documentation about the +link:../documentation/xenomai-3/html/man1/autotune/index.html[autotune(1)] +utility. + +Building and running Xenomai 3 applications +------------------------------------------- + +Once the latency test behaves as expected on your target system, it is +deemed ready to run real-time applications. + +You may want to have a look at +link:building-applications-with-xenomai-3.x/[this +document] for details about the application build process. + +In addition, you may refer to +link:running-applications-with-xenomai-3.x/[this document] to learn +about the command line options available with Xenomai 3 applications. + +Migrating applications to Xenomai 3 +----------------------------------- + +If you plan to port an existing application based on Xenomai 2.x to +Xenomai 3.x, you should have a look at +link:migrating-from-xenomai-2.x-to-3.x/[this migration guide]. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc new file mode 100644 index 0000000..778e74f --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc @@ -0,0 +1,573 @@ +Troubleshooting a dual kernel configuration +=========================================== + +This page is a troubleshooting guide enumerating known issues +with dual kernel Xenomai configurations. + +[TIP] +If running any release from the Xenomai 2 series, or a Xenomai 3 +release using the *Cobalt* real-time core, then you are using a dual +kernel configuration, and this document was meant for you. Xenomai 3 +over the *Mercury* core stands for a single kernel configuration +instead, for which you can find specific +link:troubleshooting-a-single-kernel-configuration/[troubleshooting +information here]. + +== Kernel-related issues + +[[kconf]] +=== Common kernel configuration issues + +When configuring the Linux kernel, some options should be avoided. + +CONFIG_CPU_FREQ:: This allows the CPU frequency to be modulated with +workload, but many CPUs change the TSC counting frequency also, which +makes it useless for accurate timing when the CPU clock can +change. Also some CPUs can take several milliseconds to ramp up to +full speed. + +CONFIG_CPU_IDLE:: Allows the CPU to enter deep sleep states, +increasing the time it takes to get out of these sleep states, hence +the latency of an idle system. Also, on some CPU, entering these deep +sleep states causes the timers used by Xenomai to stop functioning. + +CONFIG_KGDB:: This option should not be enabled, except with x86. + +CONFIG_CONTEXT_TRACKING_FORCE:: This option which appeared in kernel +3.8 is forced off by I-pipe patches since 3.14 onward, as it is +incompatible with interrupt pipelining, and has no upside for regular +users. However, you have to manually disable it for older kernels when +present. Common effects observed with this feature enabled include +RCU-related kernel warnings during real-time activities, and +pathologically high latencies. + +=== Kernel hangs after "Uncompressing Linux... done, booting the kernel." + +This means that the kernel crashes before the console is enabled. You +should enable the +CONFIG_EARLY_PRINTK+ option. For some architectures +(x86, arm), enabling this option also requires passing the ++earlyprintk+ parameter on the kernel command line. See +'Documentation/kernel-parameters.txt' for possible values. + +For the ARM architecture, you have to enable +CONFIG_DEBUG_KERNEL+ and ++CONFIG_DEBUG_LL+ in order to be able to enable +CONFIG_EARLY_PRINTK+. + +=== Kernel OOPSes + +Please make sure to check the <<kconf,"Kernel configuration">> section +first. + +If nothing seems wrong there, try capturing the OOPS information using +a _serial console_ or _netconsole_, then post it to the +mailto:xenomai@xenomai.org[xenomai mailing list], along with the +kernel configuration file (aka `.config`) matching the kernel build. + +=== Kernel boots but does not print any message + +Your distribution may be configured to pass the +quiet+ option on the +kernel command line. In this case, the kernel does not print all the +log messages, however, they are still available using the +dmesg+ +command. + +[[kerror]] +=== Kernel log displays Xenomai or I-pipe error messages + +[[no-timer]] +==== I-pipe: could not find timer for cpu #N + +The most probable reason is that no hardware timer chip is available +for Xenomai timing operations. + +Check that you did not enable some of the conflicting options listed +in the <<kconf,"Kernel configuration">> section. + +With AMD x86_64 CPUs:: You will most likely also see the following +message: +-------------------------------------------- +I-pipe: cannot use LAPIC as a tick device +I-pipe: disable C1E power state in your BIOS +-------------------------------------------- +The interrupt pipeline outputs this message if C1E option is enabled +in the BIOS. To fix this issue, disable C1E support in the BIOS. In +some Award BIOS this option is located in the +Advanced BIOS +Features->+ menu (+AMD C1E Support+). + +[WARNING] +Disabling the +AMD K8 Cool&Quiet+ feature in the BIOS will *NOT* solve +this problem. + +With other CPU architectures:: The interrupt pipeline implementation +may lack a registration for a hardware timer available to Xenomai +timing operations (e.g. a call to +ipipe_timer_register()+). + +If you are working on porting the interrupt pipeline to some ARM SoC, +you may want to have a look at this +link:porting-xenomai-to-a-new-arm-soc/#The_general_case[detailed +information]. + +[[SMI]] +==== SMI-enabled chipset found, but SMI workaround disabled + +You may have an issue with System Management Interrupts on your x86 +platform. You may want to look at +link:dealing-with-x86-smi-troubles/[this document]. + +=== Xenomai and Linux devices share the same IRQ vector + +This x86-specific issue might still happen on legacy hardware with no +MSI support. See +link:what-if-xenomai-and-linux-devices-share-the-same-IRQ[this +article] from the Knowledge Base. + +=== Kernel issues specific to the Xenomai 2.x series + +==== system init failed, code -19 + +See <<no-timer, this entry>>. + +==== Local APIC absent or disabled! + +The Xenomai 2.x _nucleus_ issues this warning if the kernel +configuration enables the local APIC support +(+CONFIG_X86_LOCAL_APIC+), but the processor status gathered at boot +time by the kernel says that no local APIC support is available. +There are two options for fixing this issue: + +* either your CPU really has _no_ local APIC hardware, in which case +you need to rebuild a kernel with LAPIC support disabled. + +* or it does have a local APIC but the kernel boot parameters did not +specify to activate it using the _lapic_ option. The latter is +required since 2.6.9-rc4 for boxes which APIC hardware is disabled by +default by the BIOS. You may want to look at the file +'Documentation/kernel-parameters.txt' from the Linux source tree, for +more information about this parameter. + +== Application-level issues + +[[vsyscall]] +=== --enable-x86-sep needs NPTL and Linux 2.6.x or higher +or, + +=== --enable-x86-vsyscall requires NPTL ... + +This message may happen when starting a Xenomai 2.x or 3.x application +respectively. On the x86 architecture, the configure script option +mentioned allows Xenomai to use the _vsyscall_ mechanism for issuing +system calls, based on the most efficient method determined by the +kernel for the current system. This mechanism is only available from +NPTL-enabled glibc releases. + +Turn off this feature for other libc flavours. + +=== Cobalt core not enabled in kernel + +As mentioned in the message, the target kernel is lacking Cobalt +support. See +link:installing-xenomai-3-x/#Installing_the_Cobalt_core[this document] +for detailed information about installing Cobalt. + +=== binding failed: Function not implemented + +Another symptom of the previous issue, i.e. the Cobalt core is not +enabled in the target kernel. + +=== binding failed: Operation not permitted + +This is the result of an attempt to run a Xenomai application as an +unprivileged user, which fails because invoking Xenomai services +requires +CAP_SYS_NICE+. However, you may allow a specific group of +users to access Xenomai services, by following the instructions on +link:running-a-Xenomai-application-as-a-regular-user[this page]. + +=== incompatible ABI revision level + +Same as below: + +=== ABI mismatch + +The ABI concerned by this message is the system call binary interface +between the Xenomai libraries and the real-time kernel services it +invokes (e.g. +libcobalt+ and the Cobalt kernel with Xenomai +3.x). This ABI may evolve over time, only between major Xenomai +releases or testing candidate releases (i.e. -rc series) though. When +this happens, the ABI level required by the application linked against +Xenomai libraries may not match the ABI exposed by the Xenomai +co-kernel implementation on the target machine, which is the situation +this message reports. + +To fix this issue, just make sure to rebuild both the Xenomai kernel +support and the user-space binaries for your target system. If however +you did install the appropriate Xenomai binaries on your target +system, chances are that stale files from a previous Xenomai +installation still exist on your system, causing the mismatch. + +Each major Xenomai release (e.g. 2.1.x, 2.2.x ... 2.6.x, 3.0.x ...) +defines such kernel/user ABI, which remains stable across minor update +releases (e.g. 2.6.0 -> 2.6.4). This guarantee makes partial updates +possible with production systems (i.e. kernel and/or user support). +For instance, any application built over the Xenomai 2.6.0 binaries +can run over a Xenomai 2.6.4 kernel support, and conversely. + +[TIP] +Debian-based distributions (notably Ubuntu) may ship with +pre-installed Xenomai libraries. Make sure that these files don't get +in the way if you plan to install a more recent Xenomai kernel +support. + +=== <program>: not found + +Although the program in question may be present, this message may +happen on ARM platforms when a mismatch exists between the kernel and +user library configurations with respect to EABI support. Typically, +if user libraries are compiled with a toolchain generating OABI code, +the result won't run over a kernel not enabling the ++CONFIG_OABI_COMPAT+ option. Conversely, the product of a compilation +with an EABI toolchain won't run on a kernel not enabling the ++CONFIG_AEABI+ option. + +=== incompatible feature set + +When a Xenomai application starts, the set of core features it +requires is compared to the feature set the kernel provides. This +message denotes a mismatch between both sets, which can be solved by +fixing the kernel and/or user build configuration. Further details +are available from link:installing-xenomai-3-x[this page] for Xenomai +3, and link:installing-xenomai-2-x[this page] for Xenomai 2. + +==== feature mismatch: missing="smp/nosmp" + +On SMP-capable architectures, both kernel and user-space components +(i.e. Xenomai libraries) must be compiled with the same setting with +respect to SMP support. + +SMP support in the kernel is controlled via the +CONFIG_SMP+ option. +The +--enable-smp+ configuration switch enables this feature for the +Xenomai libraries (conversely, +--disable-smp+ disables it). + +[CAUTION] +Using Xenomai libraries built for a single-processor configuration +(i.e. +--disable-smp+) over a SMP kernel (i.e. +CONFIG_SMP=y+) is +*NOT* valid. On the other hand, using Xenomai libraries built with SMP +support enabled over a single-processor kernel is fine. + +=== Application-level issues specific to the Xenomai 2.x series + +The following feature mismatches can be detected with the 2.x series: + +==== feature mismatch: missing="kuser_tsc" + +See the <<arm-tsc, "ARM tsc emulation issues">> section. + +[NOTE] +This issue does not affect Xenomai 3.x as the latter requires modern +I-pipe series which must provide _KUSER_TSC_ support on the ARM +architecture. + +==== feature mismatch: missing="sep" + +This error is specific to the x86 architecture on Xenomai 2.x, for +pre-Pentium CPUs which do not provide the _sysenter/sysexit_ +instruction pair. See <<vsyscall, this section>>. + +[NOTE] +This issue does not affect Xenomai 3.x as the latter does not +support pre-Pentium systems in the first place. + +==== feature mismatch: missing="tsc" + +This error is specific to the x86 architecture on Xenomai 2.x, for +pre-Pentium CPUs which do not provide the _rdtsc_ instruction. In this +particular case, +--enable-x86-tsc+ cannot be mentioned in the +configuration options for building the user libraries, since the +processor does not support this feature. + +The rule of thumb is to pick the *exact* processor for your x86 +platform when configuring the kernel, at the very least the most +specific model which is close to the target CPU, not a generic +placeholder such as _i586_, for which _rdtsc_ is not available. + +If your processor does not provide the _rdtsc_ instruction, you have +to pass +--disable-x86-tsc+ option to the configure script for +building the user librairies. In this case, Xenomai will provide a +(much slower) emulation of the hardware TSC. + +[NOTE] +This issue does not affect Xenomai 3.x as the latter does not +support pre-Pentium systems in the first place. + +[[arm-tsc]] +==== ARM tsc emulation issues + +In order to allow applications to measure short durations with as +little overhead as possible, Xenomai uses a 64 bits high resolution +counter. On x86, the counter used for this purpose is the time-stamp +counter readable by the dedicated _rdtsc_ instruction. + +ARM processors generally do not have a 64 bits high resolution counter +available in user-space, so this counter is emulated by reading +whatever high resolution counter is available on the processor, and +used as clock source in kernel-space, and extend it to 64 bits by +using data shared with the kernel. If Xenomai libraries are compiled +without emulated tsc support, system calls are used, which have a much +higher overhead than the emulated tsc code. + +In recent versions of the I-pipe patch, SOCs generally select the ++CONFIG_IPIPE_ARM_KUSER_TSC+ option, which means that the code for +reading this counter is provided by the kernel at a predetermined +address (in the vector page, a page which is mapped at the same +address in every process) and is the code used if you do not pass the ++--enable-arm-tsc+ or +--disable-arm-tsc+ option to configure, or pass ++--enable-arm-tsc=kuser+. + +This default should be fine with recent patches and most ARM +SOCs. + +However, if you see the following message: +------------------------------------------------------------------------------- +incompatible feature set +(userland requires "kuser_tsc...", kernel provides..., missing="kuser_tsc") +------------------------------------------------------------------------------- + +It means that you are either using an old patch, or that the SOC you +are using does not select the +CONFIG_IPIPE_ARM_KUSER_TSC+ option. + +So you should resort to what Xenomai did before branch 2.6: select the +tsc emulation code when compiling Xenomai user-space support by using +the +--enable-arm-tsc+ option. The parameter passed to this option is +the name of the SOC or SOC family for which you are compiling Xenomai. +Typing: +------------------------------------------------------------------------------- +/patch/to/xenomai/configure --help +------------------------------------------------------------------------------- + +will return the list of valid values for this option. + +If after having enabled this option and recompiled, you see the +following message when starting the latency test: +------------------------------------------------------------------------------- +kernel/user tsc emulation mismatch +------------------------------------------------------------------------------- +or +------------------------------------------------------------------------------- +Hardware tsc is not a fast wrapping one +------------------------------------------------------------------------------- + +It means that you selected the wrong SOC or SOC family, reconfigure +Xenomai user-space support by passing the right parameter to ++--enable-arm-tsc+ and recompile. + +The following message: +------------------------------------------------------------------------------- +Your board/configuration does not allow tsc emulation +------------------------------------------------------------------------------- + +means that the kernel-space support for the SOC you are using does not +provide support for tsc emulation in user-space. In that case, you +should recompile Xenomai user-space support passing the ++--disable-arm-tsc+ option. + +==== hardware tsc is not a fast wrapping one +or, + +==== kernel/user tsc emulation mismatch +or, + +==== board/configuration does not allow tsc emulation + +See the <<arm-tsc, "ARM tsc emulation issues">> section. + +==== native skin or CONFIG_XENO_OPT_PERVASIVE disabled + +Possible reasons for this error are: + +* you booted a kernel without Xenomai or I-pipe support, a kernel with +I-pipe and Xenomai support should have a '/proc/ipipe/version' and +'/proc/xenomai/version' files; + +* the kernel you booted does not have the +CONFIG_XENO_SKIN_NATIVE+ and ++CONFIG_XENO_OPT_PERVASIVE+ options enabled; + +* Xenomai failed to start, check the <<kerror,"Xenomai or I-pipe error +in the kernel log">> section; + +* you are trying to run Xenomai user-space support compiled for x86_32 +on an x86_64 kernel. + +==== "warning: <service> is deprecated" while compiling kernel code + +Where <service> is a thread creation service, one of: + +* +cre_tsk+ +* +pthread_create+ +* +rt_task_create+ +* +sc_tecreate+ or +sc_tcreate+ +* +taskSpawn+ or +taskInit+ +* +t_create+ + +Starting with Xenomai 3, APIs are not usable from kernel modules +anymore, at the notable exception of the RTDM device driver API, which +by essence must be used from kernel space for writing real-time device +drivers. Those warnings are there to remind you that application code +should run in user-space context instead, so that it can be ported to +Xenomai 3. + +You may switch those warnings off by enabling the ++CONFIG_XENO_OPT_NOWARN_DEPRECATED+ option in your kernel +configuration, but nevertheless, you have been *WARNED*. + +==== a Xenomai system call fails with code -38 (ENOSYS) + +Possible reasons for this error are: + +* you booted a kernel without Xenomai or I-pipe support, a kernel with +I-pipe and Xenomai support should have a '/proc/ipipe/version' and +'/proc/xenomai/version' files; + +* the kernel you booted does not have the +CONFIG_XENO_SKIN_*+ option +enabled for the skin you use, or +CONFIG_XENO_OPT_PERVASIVE+ is +disabled; + +* Xenomai failed to start, check the <<kerror,"Xenomai or I-pipe error +in the kernel log">> section; + +* you are trying to run Xenomai user-space support compiled for x86_32 +on an x86_64 kernel. + +==== the application overconsumes system memory + +Your user-space application unexpectedly commits a lot of virtual +memory, as reported by "+top+" or '/proc/<pid>/maps'. Sometimes OOM +situations may even appear during runtime on systems with limited +memory. + +The reason is that Xenomai threads are underlaid by regular POSIX +threads, for which a large default amount of stack space memory is +commonly reserved by the POSIX threading library (8MiB per thread by +the _glibc_). Therefore, the kernel will commit as much as +_8MiB{nbsp}*{nbsp}nr_threads_ bytes to RAM space for the application, +as a side-effect of calling the +mlockall()+ service to lock the +process memory, as Xenomai requires. + +This behaviour can be controlled in two ways: + +- via the _stacksize_ parameter passed to the various thread creation +routines, or +pthread_attr_setstacksize()+ directly when using the +POSIX API. + +- by setting a lower user-limit for the initial stack allocation from +the application's parent shell which all threads from the child +process inherit, as illustrated below: + +--------------------------------------------------------------------- +ulimit -s <initial-size-in-kbytes> +--------------------------------------------------------------------- + +==== freeze or machine lockup + +Possible reasons may be: + +- Stack space overflow issue now biting some real-time kernel thread? + +- Spurious delay/timeout values computed by the application +(specifically: too short). + +- A case of freeze is a system call called in a loop which fails +without its return value being properly checked. + +On x86, whenever the nucleus watchdog does not trigger, you may want to +try disabling CONFIG_X86_UP_IOAPIC while keeping CONFIG_X86_UP_APIC, and +arm the kernel NMI watchdog on the LAPIC (nmi_watchdog=2). You may be +lucky and have a backtrace after the freeze. Maybe enabling all the +nucleus debug options would catch something too. + +== Issues when running Xenomai test programs + +[[latency]] +=== Issues when running the _latency_ test + +The first test to run to see if Xenomai is running correctly on your +platform is the latency test. The following sections describe the +usual reasons for this test not to run correctly. + +==== failed to open benchmark device + +You have launched +latency -t 1+ or +latency -t 2+ which both require +the kernel to have been configured with the ++CONFIG_XENO_DRIVERS_TIMERBENCH+ option. + +==== the _latency_ test hangs + +The most common reason for this issues is a too short period passed +with the +-p+ option, try increasing the period. If you enable the +watchdog (option +CONFIG_XENO_OPT_WATCHDOG+, in your kernel +configuration), you should see the <<short-period, "watchdog triggered +(period too short?)">> message. + +[[short-period]] +==== watchdog triggered (period too short?) + +The built-in Xenomai watchdog has stopped the _latency_ test because +it was using all the CPU in pure real-time mode (aka _primary +mode_). This is likely due to a too short period. Run the _latency_ +test again, passing a longer period using the +-p+ option this time. + +==== the _latency_ test shows high latencies + +The _latency_ test runs, but you are seeing high latencies. + +* make sure that you carefully followed the <<kconf,"Kernel +configuration" section>>. + +* if running on a Raspberry Pi SBC, make sure you don't hit a firmware +issue, see https://github.com/raspberrypi/firmware/issues/497. + +* if running on a x86 platform, make sure that you do not have an +issue with SMIs, see the <<SMI, section about SMIs>>. + +* if running on a x86 platform with a _legacy USB_ switch available +from the BIOS configuration, try disabling it. + +* if you do not have this option at BIOS configuration level, it does +not necessarily mean that there is no support for it, thus no +potential for high latencies; this support might just be forcibly +enabled at boot time. To solve this, in case your machine has some USB +controller hardware, make sure to enable the corresponding host +controller driver support in your kernel configuration. For instance, +UHCI-compliant hardware needs +CONFIG_USB_UHCI_HCD+. As part of its +init chores, the driver should reset the host controller properly, +kicking out the BIOS off the concerned hardware, and deactivate the +USB legacy mode if set in the same move. + +* if you observe high latencies while running X-window, try disabling +hardware acceleration in the X-window server file. With recent +versions of X-window, try using the 'fbdev' driver. Install it +(Debian package named 'xserver-xorg-video-fbdev' for instance), then +modifiy the +Device+ section to use this driver in +'/etc/X11/xorg.conf', as in: +------------------------------------------------------------------------------- +Section "Device" + Identifier "Card0" + Driver "fbdev" +EndSection +------------------------------------------------------------------------------- +With olders versions of X-window, keep the existing driver, but +add the following line to the +Device+ section: +------------------------------------------------------------------------------- + Option "NoAccel" +------------------------------------------------------------------------------- + +=== Issues when running the _switchtest_ program + +==== pthread_create: Resource temporarily unavailable + +The switchtest test creates many kernel threads, an operation which +consumes memory taken from internal pools managed by the Xenomai +real-time core. + +Xenomai 2.x and 3.x series require +CONFIG_XENO_OPT_SYS_HEAPSZ+ to be +large enough in the kernel configuration settings, to cope with the +allocation requests. + +Xenomai 2.x may also require to increase the ++CONFIG_XENO_OPT_SYS_STACKPOOLSZ+ setting. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc new file mode 100644 index 0000000..a952e15 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc @@ -0,0 +1,25 @@ +Troubleshooting a single kernel configuration +============================================= + +This page is a troubleshooting guide enumerating known issues with +single kernel Xenomai configurations. + +[TIP] +If running Xenomai 3 over the *Mercury* core, then you are using a +single kernel configuration, and this document was meant for +you. Otherwise, if you are running any release from the Xenomai 2 +series, or a Xenomai 3 release using the *Cobalt* real-time core, then +you are using a running kernel configuration, for which you can find +specific +link:troubleshooting-a-dual-kernel-configuration/[troubleshooting information here]. + +*No entry yet.* + +== Application-level issues + +=== WARNING: [main] failed to lock memory + +Your application needs the CAP_SYS_NICE and CAP_IPC_LOCK capabilities +to be granted access to Xenomai services (see +capabilities(7)). Running the application with root privileges is a +way to gain those capabilities. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc new file mode 100644 index 0000000..3462e6b --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc @@ -0,0 +1,155 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for autotune +// +// Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +AUTOTUNE(1) +========== +:doctype: manpage +:revdate: 2014/08/03 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +---- +autotune - Calibrate the Xenomai core clock timer + +SYNOPSIS +--------- +*autotune* [ options ] + +DESCRIPTION +------------ +*autotune* is a utility to determine the best calibration values (aka + _gravity triplet_) for the core clock timer. + +The time spent traversing the kernel code from the low-level Xenomai +timer handler until the kernel-based client handler is invoked, is +shorter than the time required to schedule in a kernel thread +instead. It takes even more time to switch in a user-space thread, +which entails changing the current memory address space, performing +potentially time-consuming MMU-related operations. + +For this reason, Xenomai differentiates timers on the target context +they activate, among IRQ(handler), kernel and user threads, +anticipating the next timer shot accordingly, so that such context is +activated as close as possible to the ideal time. This anticipation is +called the _gravity_ of the clock serving the timer, which is actually +a triplet representing the three possible types of contexts the timer +can activate. + +Therefore, the gravity is a static adjustment value to account for the +basic latency of the target system for responding to timer events, as +perceived by the client code waiting for the wake up events. Such +latency is increased by additional factors, such as: + +- bus or CPU cache latency, +- delay required to program the timer chip for the next shot, +- code running with interrupts disabled on the CPU to receive the IRQ, +- inter-processor serialization (_spinlocks_). + +*autotune* runs a series of internal calibration tests for estimating +the most appropriate gravity values for its real-time clock timer, +retaining the final values. + +[IMPORTANT] +*autotune* requires the *CONFIG_XENO_OPT_AUTOTUNE* option to be + enabled in the kernel configuration. + +OPTIONS +-------- +*autotune* accepts the following options: + +*--irq*:: +Estimate the IRQ gravity value, which is the shortest time the +platform needs to deliver an IRQ to a Xenomai interrupt handler in +kernel space. + +*--kernel*:: +Estimate the kernel gravity value, which is the shortest time the +platform needs to deliver an IRQ to a RTDM task running in kernel +space. This delay includes the context switching time. + +*--user*:: +Estimate the user gravity value, which is the shortest time the +platform needs to deliver an IRQ to a user-space task/thread running +in a Xenomai application process. This delay includes the context +switching time. + +*--period <ns>*:: +Set the sampling period to the given count of nanoseconds. The +estimation is performed by measuring the jitter between the ideal time +at which a timer tick should be received, and the actual time it is +eventually received, for a series of ticks. This value expresses the +delay between two of these ticks. If too short, a lockup might +occur. A commonly observed result is that the larger the delay, the higher +the latency, due to CPU cache effects (i.e. the real-time code/data is +more likely to get evicted from the cachelines as the non real-time +activity can slip in, treading over a larger address space). + +*--reset*:: +Reset the gravity values to their factory defaults. These defaults +are statically defined by the Xenomai platform code. + +*--noload*:: +Disable load generation while auto-tuning. *autotune* runs a load +generator internally in parallel to estimating the latency, in order +to eliminate irregular delays which tend to appear on fully idle +systems. Therefore, keeping the load generation enabled most often +leads to a more accurate estimation. + +*--verbose[=level]*:: +Set verbosity to the desired level, 1 means almost quiet (default), 2 +means fully verbose. + +*--help*:: +Display a short help. + +If none of +--irq+, +--kernel+ or +--user+ is given, an estimation is +done for each of them in sequence. + +VERSIONS +-------- +*autotune* appeared in Xenomai 3.0 for the _Cobalt_ real-time core. + +NOTES +----- +The auto-tuning process may take some time for completing the +estimation. Although this delay may vary across hardware platforms, +running for 30 seconds is common. + +Once the gravity values are known for a particular hardware, one may +write them to +/proc/xenomai/clock/coreclck+ from some system init +script to set up the Xenomai core clock accordingly, instead of +running the auto-tuner after each boot e.g: + +------------------------------------------------------ + /* change the user gravity to 1728 ns (default) */ +# echo 1728 > /proc/xenomai/clock/coreclck + /* change the IRQ gravity to 129 ns */ +# echo 129i > /proc/xenomai/clock/coreclck + /* change the user and kernel gravities to 1728 and 907 ns resp. */ +# echo "1728u 907k" > /proc/xenomai/clock/coreclck +------------------------------------------------------ + +Alternatively, the gravity values can be statically defined in the +kernel configuration of the target kernel: + +- CONFIG_XENO_OPT_TIMING_SCHEDLAT should be assigned the user gravity + value. + +- CONFIG_XENO_OPT_TIMING_KSCHEDLAT should be assigned the kernel + gravity value. + +- CONFIG_XENO_OPT_TIMING_IRQLAT should be assigned the IRQ gravity + value. + +AUTHOR +------- +*autotune* was written by Philippe Gerum <rpm@xenomai.org>. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc new file mode 100644 index 0000000..5a3c9a6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc @@ -0,0 +1,117 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for chkkconf +// +// Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +CHKKCONF(1) +========== +:doctype: manpage +:revdate: 2021/09/23 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +---- +chkkconf - Check kernel .config + +SYNOPSIS +--------- +*chkkconf* [ options ] + +DESCRIPTION +------------ +*chkkconf* is a common utility to check kernel configuration based +on specified checklist. The kernel configuration to verify is +a regular .config file which contains all the settings for +building a kernel image. The check list contains a series +of single-line assertions which are tested against the +contents of the kernel configuration. The default checklist +file kconf-checklist under $datarootdir(/user/xenomai/share/ +by default) contains assertions that may influence latency +for xenomai. When we use the default checklist, the utility checks +a kernel configuration for common issues which may increase latency. + + +OPTIONS +-------- +*chkkconf* accepts the following options: + +*--file*:: Specify a regular .config file. If none is specified, +the command defaults to reading /proc/config.gz on the current +machine. If this fails because any of CONFIG_IKCONFIG or +CONFIG_IKCONFIG_PROC was disabled in the running kernel, the +command fails. + +*--check-list*:: Specify a file that contains a series of single-line +assertions which are tested against the contents of the kernel +configuration. If none is specified, a default check-list is loaded +from $datarootdir/kconf-checklist(/user/xenomai/share/kconf-checklist +by default). Each assertion follows the BNF-like syntax below: + +- assertion : expr conditions + | "!" expr conditions + +- expr : symbol /* matches =y and =m */ + | symbol "=" tristate + +- tristate : "y" + | "m" + | "n" + +- conditions : dependency + | dependency arch + +- dependency : "if" symbol /* true if set as y/m */ + +- arch : "on" cputype + +- cputype : $(uname -m) + +For instance: + +- CONFIG_FOO must be set whenever CONFIG_BAR is unset can be written as +CONFIG_FOO if !CONFIG_BAR. + +- CONFIG_FOO must not be set can be written as !CONFIG_FOO, or +conversely CONFIG_FOO=n. + +- CONFIG_FOO must be built as module on aarch32 or aarch64 can be +written as CONFIG_FOO=m on aarch. + +- CONFIG_FOO must not be built-in on aarch64 if CONFIG_BAR is set can be +written as !CONFIG_FOO=y if CONFIG_BAR on aarch. + +Assertions in the check list may apply to a particular CPU architecture. +Normally, the command should be able to figure out which architecture +the kernel configuration file applies to by inspecting the first lines, +looking for the “Linux/” pattern. However, you might have to specify +this information manually to the command using the -a option if the file +referred to by the -f option does not contain such information. +The architecture name (cputype) should match the output of $(uname -m) +or some abbreviated portion of it. However, arm64 and arm are automatically +translated to aarch64 and aarch32 when found in an assertion or passed to +the -a option. + +*--arch*:: Specify CPU architecture that you want to check for. + +*--hash-size*:: Set the hash table size. + +*--quiet*:: Suppress output. + +*--help*:: +Display a short help. + +VERSIONS +-------- +*chkkconf* appeared in Xenomai 3.2 for checking kernel .config. + +AUTHOR +------- +*chkkconf* was written by Philippe Gerum <rpm@xenomai.org> and ported +by Hongzhan Chen <hongzhan.chen@intel.com> from xenomai4. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc new file mode 100644 index 0000000..f82df79 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc @@ -0,0 +1,53 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for clocktest +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +CLOCKTEST(1) +============ +:doctype: manpage +:revdate: 2008/04/01 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +---- +clocktest - Xenomai Clock Test + +SYNOPSIS +-------- +*clocktest* ['OPTIONS'] + +DESCRIPTION +----------- +*clocktest* is part of the Xenomai test suite and tests the Clock. For each +CPU, it repeatedly prints a time offset (compared to the reference +gettimeofday()), a drift value, the number of warps and the maximum warp in +microseconds. + +For this program to work, you need to run a suitable Xenomai enabled kernel +with the respective module (xeno_posix). + +OPTIONS +------- +*-C <clock_id>*:: + clock to be tested, default=0 (CLOCK_REALTIME=0, CLOCK_MONOTONIC=1, +CLOCK_HOST_REALTIME=42) + +*-T <test_duration_seconds>*:: + default=0 (Never stop, ^C to end) + +*-D*:: + print extra diagnostics for CLOCK_HOST_REALTIME + +AUTHOR +------ +*clocktest* was written by Jan Kiszka. This man page +was written by Roland Stigge. + diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc new file mode 100644 index 0000000..83f7758 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc @@ -0,0 +1,106 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for corectl +// +// Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +CORECTL(1) +========== +:doctype: manpage +:revdate: 2015/02/14 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +---- +corectl - Cobalt core control interface + +SYNOPSIS +--------- +*corectl* [ options ] + +DESCRIPTION +------------ +*corectl* is a utility to + +OPTIONS +-------- +*corectl* accepts the following options: + +*--stop [<grace-seconds>]*:: Stop the real-time services. The +following actions are taken in sequence: + +- termination of all Xenomai threads running in user-space, waiting +for them to exit for at most +grace-seconds+ if specified, or +indefinitely otherwise. + +- active RTDM drivers are notified of the transition to the stopped +state. + +- termination of lingering RTDM threads (i.e. running in kernel +space), waiting for them to exit for at most 3 seconds. + +- deactivation of the real-time timing services, control of the +hardware timer on all real-time CPUs is fully released to the host +kernel. + +Once stopped, the Cobalt core rejects all connection requests from +regular applications. + +*--start*:: Start the real-time services. The following actions are +taken in sequence: + +- activation of the real-time timing services, the Cobalt core takes +full control over the hardware timer on all real-time CPUs. + +- loaded RTDM drivers are notified of the transition to the running +state. + +Once started, the Cobalt core accepts all connection requests from +regular applications anew. + +*--status*:: Display the current Cobalt core status. The following +statuses can be returned: + +- _disabled_ denotes a fully inoperative core. This state cannot be +reached using the *corectl* command, but only by passing the ++xenomai.state=disabled+ option on the kernel command line. A disabled +core cannot be started dynamically using *corectl*. + +- _stopped_ means that no further connection request will be accepted +from applications, the real-time services are currently +unavailable. The Cobalt core can be stopped at boot time by passing +the +xenomai.state=stopped+ option on the kernel command line. A +stopped core can be started dynamically using *corectl --start*, +switching it to the _running_ state. + +- _running_ denotes an active state of the real-time core, application +requests are processed normally. This is the default state entered at +boot time, which corresponds to passing the +xenomai.state=enabled+ +option on the kernel command line. + +- _teardown_ denotes a real-time system in the process of stopping all +services. This transient status should not be seen unless some threads +are unexpectedly lingering despite a termination request was issued. + +- _warmup_ denotes a real-time system in the process of starting all +services. This transient status should not be seen unless an RTDM +driver gets stuck while switching to active mode. + +*--help*:: +Display a short help. + +If no option is passed, +--status+ is assumed by default. + +VERSIONS +-------- +*corectl* appeared in Xenomai 3.0 for the _Cobalt_ real-time core. + +AUTHOR +------- +*corectl* was written by Philippe Gerum <rpm@xenomai.org>. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc new file mode 100644 index 0000000..06e6dad --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc @@ -0,0 +1,80 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for cyclictest +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +CYCLICTEST(1) +============= +:doctype: manpage +:revdate: 2008/04/01 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +cyclictest - Xenomai high resolution timer test + +SYNOPSIS +--------- +// The general command line +*cyclictest* [options] + +DESCRIPTION +------------ +*cyclictest* is part of the Xenomai test suite and tests the POSIX skin of Xenomai with a cyclic timer test. + +For this program to work, you need to run a suitable Xenomai enabled kernel with the respective module (xeno_posix). + +OPTIONS +-------- +*cyclictest* accepts the following options: + +*-b USEC, --breaktrace=USEC*:: +send break trace command when latency > USEC + +*-c CLOCK, --clock=CLOCK*:: +select clock: +0 = CLOCK_MONOTONIC (default) +1 = CLOCK_REALTIME + +*-d DIST, --distance=DIST*:: +distance of thread intervals in us default=500 + +*-i INTV, --interval=INTV*:: +base interval of thread in us default=1000 + +*-l LOOPS, --loops=LOOPS*:: +number of loops: default=0 (endless) + +*-n, --nanosleep*:: +use clock_nanosleep + +*-p PRIO, --prio=PRIO*:: +priority of highest prio thread + +*-q, --quiet*:: +print only a summary on exit + +*-r, --relative*:: +use relative timer instead of absolute + +//.B -s, --system +//use sys_nanosleep and sys_setitimer + +*-t NUM, --threads=NUM*:: +number of threads: default=1 + +*-v, --verbose*:: +output values on stdout for statistics + +format: n:c:v n=tasknum c=count v=value in us + +AUTHOR +------- +*cyclictest* was written by Thomas Gleixner. This man page +was written by Roland Stigge. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc new file mode 100644 index 0000000..b37c358 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc @@ -0,0 +1,55 @@ +DOHELL(1) +========= +:doctype: manpage +:revdata: 2013/08/25 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +dohell - Generate load, in parallel of the latency test + +SYNOPSIS +--------- +*dohell* [ -b <path> ] [ -s <server> ] [ -p <port> ] [ -m <path> ] [ -l <path> | <duration> ] + +DESCRIPTION +------------ + +*dohell* generates some load, using commonly available commands, in parallel +of the link:../latency/index.html[latency(1)] test or as part of +link:../xeno-test/index.html[xeno-test(1)]. + +OPTIONS +-------- + +*dohell* accepts the following options: + +*-b*:: runs the hackbench test repetitively during the run-time of the +*dohell* script; + +*-s*:: run nc to continuously send data to a server through network; + +*-p*:: if *-s* is used, specificy the port to which to send data, if + not specified, port 9 (aka discard) is used; + +*-m <path>*:: run dd to write data to the *<path>* directory; + +*-l <path>*:: mutually exclusive with *<duration>*; the dohell script runs +during two runs of the LTP script *runalltests.sh* found in the *<path>* +directory; + +*<duration>*:: mutually exclusive with *-l*; run dohell for the given duration +in seconds. + +SEE ALSO +-------- + +*link:../xeno-test/index.html[xeno-test(1)], link:../latency/index.html[latency(1)]*. + +EXAMPLE +-------- +-------------------------------------------------------------------------------- +dohell -s 192.168.0.5 -m /mnt -l /ltp +-------------------------------------------------------------------------------- diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc new file mode 100644 index 0000000..bd32ea8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc @@ -0,0 +1,70 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for gpiobench +// +// Copyright (C) 2020 song chen <chensong@tj.kylinos.cn> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +GPIOBENCH(1) +========== +:doctype: manpage +:revdate: 2020/08/03 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +gpiobench - Xenomai gpio latency benchmark + +SYNOPSIS +--------- +// The general command line +*gpiobench* [ options ] + +DESCRIPTION +------------ +*gpiobench* is part of the Xenomai test suite. It is a gpio latency +benchmark program. The system must run a suitable Xenomai enabled kernel with +the respective module (xeno_timerbench). + +OPTIONS +-------- +*gpiobench* accepts the following options: + +*-h <histogram-size>*:: +default = 100, increase if your last bucket is full + +*-l <num-of-loops>*:: +default=1000, number of loops to run the test + +*-q <quiet>*:: +print only a summary on exit + +*-m <test-mode>*:: +0 = loopback (default), 1 = react + +*-c <pin-controller>*:: +name of pin controller + +*-o <output-pin>*:: +number of gpio pin as output + +*-i <interrupt-pin>*:: +number of gpin pin as interrupt + +*-p <priority>*:: +default = 99, task priority + +*-b <bracktrace>*:: +default = 1000, send break trace command when latency > breaktrace + + + +AUTHOR +------- +*gpiobench* was written by song chen. This man page +was written by song chen. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc new file mode 100644 index 0000000..42a1ae1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc @@ -0,0 +1,85 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for latency +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +LATENCY(1) +========== +:doctype: manpage +:revdate: 2008/04/19 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +latency - Xenomai timer latency benchmark + +SYNOPSIS +--------- +// The general command line +*latency* [ options ] + +DESCRIPTION +------------ +*latency* is part of the Xenomai test suite. It is a timer latency +benchmark program. The system must run a suitable Xenomai enabled kernel with +the respective module (xeno_timerbench). + +OPTIONS +-------- +*latency* accepts the following options: + +*-h*:: +print histograms of min, avg, max latencies + +*-g <file>*:: +dump histogram to <file> in a format easily readable with gnuplot. An +example script for gnuplot may be found in scripts/histo.gp in Xenomai +sources distribution + +*-s*:: +print statistics of min, avg, max latencies + +*-H <histogram-size>*:: +default = 200, increase if your last bucket is full + +*-B <bucket-size>*:: +default = 1000ns, decrease for more resolution + +*-p <period_us>*:: +sampling period + +*-l <data-lines per header>*:: +default=21, 0 to suppress headers + +*-T <test_duration_seconds>*:: +default=0, so ^C to end + +*-q*:: +suppresses RTD, RTH lines if -T is used + +*-t <test_mode>*:: +0=user task (default), 1=kernel task, 2=timer IRQ + +*-f*:: +freeze trace for each new max latency + +*-c <cpu>*:: +pin measuring task down to given CPU + +*-P <priority>*:: +task priority (test mode 0 and 1 only) + +*-b*:: +break upon mode switch + +AUTHOR +------- +*latency* was written by Philippe Gerum. This man page +was written by Roland Stigge. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc new file mode 100644 index 0000000..4d3255e --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc @@ -0,0 +1,63 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for rtcanconfig +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +RTCANCONFIG(1) +============== +:doctype: manpage +:revdate: 2008/04/19 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +rtcanconfig - Xenomai tool for configuring the CAN controller + +SYNOPSIS +--------- +// The general command line +*rtcanconfig* <can-interface> [options] [up|down|start|stop|sleep] + +DESCRIPTION +------------ +*rtcanconfig* is part of Xenomai. It is used to configure the CAN +controller. The system must run a suitable Xenomai enabled kernel with the +respective module (CAN). + +OPTIONS +-------- +*rtcanconfig* accepts the following options: + +*-v, --verbose*:: +be verbose + +*-h, --help*:: +a usage description + +*-c, --ctrlmode=CTRLMODE*:: +listenonly, loopback or none + +*-b, --baudrate=BPS*:: +baudrate in bits/sec + +*-B, --bittime=BTR0:BTR1*:: +BTR or standard bit-time + +*-B, --bittime=BRP:PROP_SEG:PHASE_SEG1:PHASE_SEG2:SJW:SAM* + +SEE ALSO +-------- +*link:../rtcanrecv/index.html[rtcanrecv(1)], link:../rtcansend/index.html[rtcansend(1)]*. + +AUTHORS +-------- +*rtcanconfig* was written by Wolfgang Grandegger and +Sebastian Smolorz. This man page was +written by Roland Stigge. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc new file mode 100644 index 0000000..570e5b4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc @@ -0,0 +1,72 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for rtcanrecv +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +RTCANRECV(1) +============ +:doctype: manpage +:revdate: 2008/04/19 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +rtcanrecv - Xenomai tool for receiving CAN messages + +SYNOPSIS +--------- +// The general command line +*rtcanrecv* [<can-interface>] [Options] + +DESCRIPTION +------------ +*rtcanrecv* is part of Xenomai. It is used to receive messages via a CAN +interface. The system must run a suitable Xenomai enabled kernel with the +respective module (xeno_native and the CAN driver). + +OPTIONS +-------- +<can-interface> is the CAN interface file. + +*rtcanrecv* accepts the following options: + +*-f, --filter=id:mask[:id:mask]...*:: +apply filter + +*-e, --error=mask*:: +receive error messages + +*-t, --timeout=MS*:: +timeout in ms + +*-T, --timestamp*:: +with absolute timestamp + +*-R, --timestamp-rel*:: +with relative timestamp + +*-v, --verbose*:: +be verbose + +*-p, --print=MODULO*:: +print every MODULO message + +*-h, --help*:: +this help + +SEE ALSO +-------- +*link:../rtcanconfig/index.html[rtcanconfig(1)], link:../rtcansend/index.html[rtcansend(1)]*. + +AUTHORS +-------- +*rtcanrecv* was written by Wolfgang Grandegger, Jan +Kiszka and Philippe Gerum. This man page +was written by Roland Stigge. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc new file mode 100644 index 0000000..990f574 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc @@ -0,0 +1,86 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for rtcansend +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +RTCANSEND(1) +============ +:doctype: manpage +:revdate: 2008/04/19 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +rtcansend - Xenomai tool for sending CAN messages + +SYNOPSIS +--------- +// The general command line +*rtcansend* <can-interface> [Options] <can-msg> + +DESCRIPTION +------------ +*rtcansend* is part of Xenomai. It is used to send messages via a CAN +interface. The system must run a suitable Xenomai enabled kernel with the +respective module (xeno_native and the CAN driver). + +OPTIONS +-------- +<can-interface> is the CAN interface file. + +<can-msg> can consist of up to 8 bytes given as a space separated list. + +*rtcansend* accepts the following options: + +*-i, --identifier=ID*:: +CAN Identifier (default = 1) + +*-r, --rtr*:: +send remote request + +*-e, --extended*:: +send extended frame + +*-l, --loop=COUNT*:: +send message COUNT times + +*-c, --count*:: +message count in data[0-3] + +*-d, --delay=MS*:: +delay in ms (default = 1ms) + +*-s, --send*:: +use send instead of sendto + +*-t, --timeout=MS*:: +timeout in ms + +*-L, --loopback=0|1*:: +switch local loopback off or on + +*-v, --verbose*:: +be verbose + +*-p, --print=MODULO*:: +print every MODULO message + +*-h, --help*:: +a usage description + +SEE ALSO +-------- +*link:../rtcanconfig/index.html[rtcanconfig(1)], link:../rtcanrecv/index.html[rtcanrecv(1)]*. + +AUTHORS +-------- +*rtcansend* was written by Wolfgang Grandegger, Jan +Kiszka and Philippe Gerum. This man page +was written by Roland Stigge. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc new file mode 100644 index 0000000..b59b992 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc @@ -0,0 +1,138 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for slackspot +// +// Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +SLACKSPOT(1) +========== +:doctype: manpage +:revdate: 2014/06/26 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +---- +slackspot - Trace secondary mode switches + +SYNOPSIS +--------- +*slackspot* [ options ] + +DESCRIPTION +------------ +*slackspot* is a utility to decode the trace data collected by the +Cobalt core when CONFIG_XENO_OPT_DEBUG_TRACE_RELAX is enabled in the +kernel configuration. + +This data describes each call hierarchy causing migration to secondary +mode (i.e. _relaxes_) within the application. *slackspot* presents +such data in a human-readable format as symbolic stack backtraces, for +helping in debugging spurious relaxes. + +OPTIONS +-------- +*slackspot* accepts the following options: + +*--file <trace-file>*:: +Read the trace information to decode from _trace-file_. By default, +trace data is read from +/proc/xenomai/debug/relax+ unless the +standard input stream was redirected, in which case +stdin+ is read. +In addition, the dash character "-" is interpreted as a placeholder +for +stdin+. + +*--path <dir[:dir...]>*:: +Search directory list for executables and dynamic +libraries. Directories are separatared by a semi-colon within the +list. Each directory may be scanned for binary executables when +resolving symbols found in stack backtraces. + +*--filter-in <name=exp[,name=...]>*:: +Only retain backtraces matching the given filters in the output. Each +filter is specified by a _name=<expr>_ pair, where _name_ identifies +the information field to be matched in the backtrace, and _expr_ is a +regular expression which should match such data. Filters are separated +by a comma within the list. The available filters are as follows: + + * _thread_ matches the thread name. + * _pid_ matches the kernel task identifier, i.e. per-task _pid_. + * _exe_ matches the name of the main executable being traced. + * _function_ matches the name of the function being traced. + * _file_ matches the path of the source file being traced. + * _map_ matches the path of the mapped executable being traced. + +*--filter <name=exp[,name=...]>*:: +A shorthand for *--filter-in*. + +*--filter-out <name=exp[,name=...]>*:: +Only retain backtraces NOT matching the given filters in the +output. This option inverts the sense of matching defined by +*--filter-in*. + +*CROSS_COMPILE=<toolchain-prefix>*:: +A cross-compilation toolchain prefix should be specified for decoding +the data obtained from a target system, on a build/development +machine. When present, the value of CROSS_COMPILE will be prepended to ++gcc+ and +addr2line+ for running the corresponding utilities on the +development system. + +VERSIONS +-------- + +*slackspot* appeared in Xenomai 3.0 for the _Cobalt_ real-time core. + +EXAMPLE +------- + +In the following scenario, the _target_ system built with the +CONFIG_XENO_OPT_DEBUG_TRACE_RELAX feature enabled in the kernel +configuration, just ran the _/bin/relax_ program. + +This program caused a transition to secondary mode switch of the +current task (_Task 2_) as a result of calling +putchar()+. The Cobalt +core saved the corresponding backtrace information, which is now +available from +/proc/xenomai/debug/relax+ on the target system. + +Since the target system has limited horsepower, and doesn't have +access to the binary utilities required for decoding the trace data, +we will send such data over the network to the _host_ system, in order +for the latter to do the decoding and display the call stacks. + +We use the standard +netcat+ utility to send and receive the contents +of +/proc/xenomai/debug/relax+ over the wire between the target and +host systems. The host will also have to mention where the +cross-compilation toolchain can be found, by setting the CROSS_COMPILE +variable appropriately. The example assumes that ++/opt/rootfs/MPC5200/lib+ is the host-based location of the system +libraries mounted over NFS onto the target file hierarchy. + +.On the target system: +--------------------------------------------------------------------------- +target> netcat -l -p 67676 -c < /proc/xenomai/debug/relax +--------------------------------------------------------------------------- +.On the host system: +--------------------------------------------------------------------------- +host> netcat target 67676 | CROSS_COMPILE=ppc_6xx- slackspot + --path=/opt/rootfs/MPC5200/lib:$HOME/frags/relax --filter thread=Task* +Thread[828] "Task 2" started by /bin/relax: + #0 0xfff00000 ??? + #1 0x000001bb ??? + #2 0x00064393 _IO_file_doallocate() in ??:? + #3 0x00073d6f _IO_doallocbuf() in ??:? + #4 0x00072d87 _IO_file_overflow() in ??:? + #5 0x00075f83 __overflow() in ??:? + #6 0x0006997b putchar() in ??:? + #7 0x100017db task2_func() in /home/rpm/frags/relax/relax.c:23 + #8 0x000078d7 task_entry() in /home/rpm/git/xenomai-forge/lib/alchemy/task.c:235 + #9 0x00005a6b start_thread() in pthread_create.c:? + #10 0x000d389f __clone() in ??:? +--------------------------------------------------------------------------- + +AUTHOR +------- +*slackspot* was written by Philippe Gerum <rpm@xenomai.org>. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc new file mode 100644 index 0000000..aa93a29 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc @@ -0,0 +1,106 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for switchtest +// +// Copyright (C) 2008 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +SWITCHTEST(1) +============= +:doctype: manpage +:revdate: 2008/04/19 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +switchtest - Xenomai context switch test + +SYNOPSIS +--------- +// The general command line +*switchtest* [options] threadspec threadspec... + +DESCRIPTION +------------ +*switchtest* is part of Xenomai. It can be used to test thread context +switches. *switchtest* creates threads of various types and attempts to +switch context between these threads, printing the count of context switches +every second. A suitable Xenomai enabled kernel with the respective module +(xeno_posix) must be installed. + +OPTIONS +-------- +Each threadspec specifies the characteristics of a thread to be created: + +threadspec = (rtk|rtup|rtus|rtuo)(_fp|_ufpp|_ufps)\*[0-9]* + +*rtk*:: +for a kernel-space real-time thread + +*rtup*:: +for a user-space real-time thread running in primary mode + +*rtus*:: +for a user-space real-time thread running in secondary mode + +*rtuo*:: +for a user-space real-time thread oscillating between primary and secondary mode + +*_fp*:: +means that the created thread will have the XNFPU bit armed (only valid for rtk) + +*_ufpp*:: +means that the created thread will use the FPU when in primary mode (invalid for rtus) + +*_ufps*:: +means that the created thread will use the FPU when in secondary mode (invalid for rtk and rtup) + +*[0-9]*:: +specifies the ID of the CPU where the created thread will run, 0 if unspecified + +Passing no +*threadspec* +is equivalent to running: + +switchtest rtkN rtkN rtk_fpN rtk_fpN rtk_fp_ufppN rtk_fp_ufppN rtupN rtupN rtup_ufppN +rtup_ufppN rtusN rtusN rtus_ufpsN rtus_ufpsN rtuoN rtuoN rtuo_ufppN rtuo_ufppN rtuo_ufpsN +rtuo_ufpsN rtuo_ufpp_ufpsN rtuo_ufpp_ufpsN + +with N=1,...,nr_cpus, i.e. occurrences of all the arguments for each CPU + +Passing only the --nofpu or -n argument is equivalent to running: + +switchtest rtkN rtkN rtupN rtupN rtusN rtusN rtuoN rtuoN + +similar to the above. + +*switchtest* accepts the following options: + +*--help, -h*:: +print usage information and exit + +*--lines <lines>, -l <lines>*:: +print headers every <lines> lines + +*--quiet or -q*:: +prevent this program from printing every second the count of ncontext switches + +*--really-quiet or -Q*:: +prevent this program from printing any output + +*--timeout <duration>, -T <duration>*:: +limit the test duration to <duration> seconds + +*--nofpu, -n*:: +disables any use of FPU instructions + +AUTHOR +------- +*switchtest* was written by Philippe Gerum and Gilles +Chanteperdrix. This man page was written by +Roland Stigge. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc new file mode 100644 index 0000000..81f67ad --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc @@ -0,0 +1,217 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for xeno-config +// +// Copyright (C) 2005, 2006 Romain Lenglet <rlenglet@users.forge.objectweb.org> +// Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +XENO-CONFIG(1) +============== +:doctype: manpage +:revdate: 2014/08/03 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +xeno-config - Retrieve Xenomai build flags and configuration + +SYNOPSIS +--------- +*xeno-config* + +*xeno-config* *--v* | *--verbose* + +*xeno-config* *--help* + +*xeno-config* *--info* + +*xeno-config* *--core* + +*xeno-config* *--version* + +*xeno-config* [*--cc*] [*--ccld*] [*--arch*] [*--prefix*] [*--posix|alchemy|rtdm|psos|vxworks|smokey*] [*--compat*] [*--auto-init*|*no-auto-init*] [*--auto-init-solib*] [*--mode-check*|*no-mode-check*] [*--cflags*] [*--ldflags*] [*--library-dir*|*libdir*|*user-libdir*] + +DESCRIPTION +------------ +*xeno-config* is a shell script which is aimed at retrieving the +Xenomai build configuration data, such as the compiler and linker +flags required for building applications. For this reason, +*xeno-config* is typically used in Makefiles. + +*xeno-config --verbose* dumps the build configuration data in a +human-readable format. + +Invoking *xeno-config* without any options is equivalent to running +*xeno-config --verbose --help*. + +OPTIONS +-------- +*--v, --verbose*:: +Output all configuration information in a human-readable format. + +*--help*:: Output the list of available command-line options. The +command exits immediately after completion. + +*--version*:: +Output the Xenomai version. + +*--cc*:: +Output the path to the C compiler command used to build the Xenomai +libraries and utilities, which is therefore suitable for compiling a +Xenomai application. + +*--ccld*:: +Output a C compiler command suitable for linking a Xenomai +application. + +*--arch*:: +Output the target CPU architecture Xenomai was compiled for, e.g. arm, +x86, powerpc etc. This may differ from the CPU architecture of the +current system, if cross-compiling. + +*--prefix*:: +Output the absolute path to the Xenomai installation directory. + +*--[skin=]{posix, alchemy, rtdm, psos, vxworks, smokey, cobalt}*:: +Select the API/skin for which *xeno-config* should print the +information required. The *skin=* prefix is optional and may be +omitted, e.g. *--posix* is equivalent to *--skin=posix*, selecting the +POSIX API. + +[NOTE] +*--native* and *--skin=native* are accepted for backward compatibility +purpose. They are stricly equivalent to passing *--alchemy --compat*. +Likewise, passing *--rtdm* or *--skin=rtdm* is stricly equivalent to +passing *--posix*, enabling POSIX I/O routines to be wrapped to their +respective Xenomai implementation. + +[CAUTION] +Over Cobalt, only *xeno-config --posix --ldflags* (or *--rtdm* as an +alias) returns the proper linker flags to cause POSIX routines invoked +by the application to be wrapped to their respective Xenomai +implementation. No other API will imply such wrapping. For this +reason, *--cobalt --ldflags* should be used for linking exclusively +against the Cobalt library (i.e. +libcobalt.so+) *without* symbol +wrapping. Conversely, mentioning *--posix* along with other API +switches with *--ldflags* will cause POSIX symbol wrapping to take +place, e.g. use *--posix --alchemy --ldflags* for mixed API support +with POSIX symbol wrapping. + +*--cflags*:: +Output the C compiler command-line options (_CFLAGS_) which are required +to compile applications based on the selected Xenomai API/skin. + +*--ldflags*:: +Output the C compiler command-line options (_LDFLAGS_) which are +required to link applications based on the selected Xenomai API/skin. + +*--library-dir, --libdir, --user-libdir*:: +These switches are synonyms, for retrieving the absolute path to the +Xenomai libraries. + +*--auto-init*:: +*--no-auto-init*:: + +By default, a process started from an executable linked with flags +returned by *xeno-config --ldflags* performs Xenomai-related inits +automatically, before the +main()+ routine is entered. + +Building with *--no-auto-init* disables such initialization. In this +case, the application code shall call the +xenomai_init()+ routine +manually, as part of its initialization chores on behalf on the ++main()+ routine, *before* any real-time service is invoked. See ++include/xenomai/init.h+. + +NOTE: This flag only makes sense when passed along with --ldflags. + +*--mode-check*:: +*--no-mode-check*:: + +Over Cobalt, a set of standard routines which may invoke regular Linux +system calls can trigger an assertion failure on entry, if the caller +must leave the real-time mode (aka "secondary mode switch") to execute +such routine. + +The assertion failure is triggered if the calling thread has set the +PTHREAD_WARNSW flag by a call to +pthread_setmode_np()+. + +By default, the mode checking routines are substituted to the original +ones using the symbol wrapping mechanism also used for interposing on +POSIX services. *--no-mode-check* disables such substitution. + +These flags make sense when passed along with --ldflags only. + +*--auto-init-solib*:: + +This switch enables the auto-initialization feature described above +for a shared library target instead of a pure executable. The main +difference resides in a position-independent (PIC) glue code being +used for bootstrapping the initialization. + +The bootstrap code runs when the shared library is attached to a +running executable, either because it appears in the static +dependencies of this executable, or when loaded dynamically via the ++dlopen()+ interface. + +*--core*:: +Output the name of the real-time core the current Xenomai installation +was built for. The possible values are _cobalt_ or _mercury_, +depending on the configuration switch *--with-core* used for building +the Xenomai libraries and utilities. + +*--compat*:: +Enable the Xenomai 2.x compatibility mode for the API/skin +selected. This switch affects the _Alchemy_ and POSIX APIs, turning on +a set of source-level compatibility wrappers when present. + +*--info*:: +Dump information about the running Xenomai-enabled system. Unlike most +other options, *--info* is aimed at being used on the target system +running Xenomai, for retrieving the current setup information. The +output of such command is a valuable information when reporting any +runtime issue to mailto:xenomai@xenomai.org[the Xenomai mailing +list]. The command exits immediately after completion. + +ENVIRONMENT VARIABLES +--------------------- + +*DESTDIR*:: + +Xenomai's handling of *DESTDIR* is conformant to the GNU coding and +installation standards, for generating pathnames rooted at some +staging area on the build system. Such staging area is commonly +NFS-mounted from the target system running Xenomai. + +If the *DESTDIR* variable is set in the environment of *xeno-config*, +its contents is prepended to all directory and file names based on the +Xenomai installation root which may be output by the command. + +If *DESTDIR* was set when installing Xenomai - typically after +cross-compiling - *DESTDIR* must be set to the same value before +calling *xeno-config* for accessing the target-based directories and +files from the build system. + +e.g. + +---------------------------------------------------------------------------- +$ configure --prefix=/usr --includedir=/usr/include/xenomai +$ make install DESTDIR=/nfsroot/target +$ DESTDIR=/nfsroot/target /nfsroot/target/bin/xeno-config --alchemy --cflags +-I/nfsroot/target/usr/include/xenomai/cobalt +-I/nfsroot/target/usr/include/xenomai -D_GNU_SOURCE +-D_REENTRANT -D__COBALT__ +-I/nfsroot/target/usr/include/xenomai/alchemy +---------------------------------------------------------------------------- + +EXIT STATUS +----------- + +*0*:: Success. + +*non-zero*:: Error. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc new file mode 100644 index 0000000..3c9eb54 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc @@ -0,0 +1,47 @@ +XENO-TEST(1) +============ +:doctype: manpage +:revdata: 2013/08/25 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +xeno-test - Run latency test under load + +SYNOPSIS +--------- +*xeno-test* [ -l loadscript ] [ latency options ] + +DESCRIPTION +------------ + +*xeno-test* runs a series of test finishing with the latency test run +with a user script to generate load, in order to measure the best case +and worst case latencies. The default command used to generate load is +"dohell 900". + +OPTIONS +-------- +*xeno-test* accepts the following options: + +*-l <loadscript>*:: +run <loadscript> while running latency, in order to measure latency +under load, the link:../dohell/index.html[dohell(1)] script is provided for +this purpose, see its link:../dohell/index.html[manual page] for more details. + +*other options*:: +are passed to the latency test, see link:../latency/index.html[latency(1)] +for the list of supported options. + +SEE ALSO +-------- + +*link:../dohell/index.html[dohell(1)], link:../latency/index.html[latency(1)]*. + +EXAMPLE +-------- +-------------------------------------------------------------------------------- +xeno-test -l "dohell -s 192.168.0.5 -m /mnt -l /ltp" -p 100 -g histo +-------------------------------------------------------------------------------- diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc new file mode 100644 index 0000000..5c5cd7c --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc @@ -0,0 +1,37 @@ +// ** The above line should force tbl to be a preprocessor ** +// Man page for xeno +// +// Copyright (C) 2010 Roland Stigge <stigge@antcom.de> +// +// You may distribute under the terms of the GNU General Public +// License as specified in the file COPYING that comes with the +// Xenomai distribution. +// +// +CLOCKTEST(1) +============ +:doctype: manpage +:revdate: 2010/14/02 +:man source: Xenomai +:man version: {xenover} +:man manual: Xenomai Manual + +NAME +----- +xeno - Wrapper for Xenomai executables + +SYNOPSIS +--------- +xeno [xenomai command] + +DESCRIPTION +------------ +*xeno* +is a wrapper script that hides distribution-specific installation +prefixes when running standard Xenomai commands. + +AUTHOR +------- +The wrapper script xeno and this manpage were initially written by +Roland Stigge for the Debian project but may be +used elsewhere. diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf new file mode 100644 index 0000000..3b1b6d6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf @@ -0,0 +1,12 @@ +[http-inlinemacro] +<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}" +[https-inlinemacro] +<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}" +[ftp-inlinemacro] +<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}" +[irc-inlinemacro] +<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}" +[mailto-inlinemacro] +<ulink url="mailto:{target}">{0={target}}: </ulink>"{target}" +[callto-inlinemacro] +<ulink url="{name}:{target}">{0={target}}: </ulink>"{target}" diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl new file mode 100644 index 0000000..ddfb960 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl @@ -0,0 +1,5 @@ +<?xml version='1.0'?> +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> +<xsl:import href="/etc/asciidoc/docbook-xsl/xhtml.xsl"/> +<xsl:output method="html" encoding="ascii" indent="no"/> +</xsl:stylesheet> diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk new file mode 100644 index 0000000..a1f2a5a --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk @@ -0,0 +1,64 @@ +BEGIN { + link_re="\"(http|file|https|ftp|irc|mailto):[^#\"]*$" +} + +/Table of Contents/ { + in_toc=1 + print $0 + next +} + +in_toc && /^([ \t]*[0-9]\.|$)/ { + print $0 + next +} + +in_toc { + in_toc=0 +} + +$0 ~ link_re { + i = match($0, link_re) + print substr($0, 1, i - 1) + unfinished_url=substr($0, i) + next +} + +unfinished_url && /"/ { + sub(/^[ \t]*/,"") + print unfinished_url$0 + unfinished_url=0 + next +} + +unfinished_url { + sub(/^[ \t]*/,"") + unfinished_url=unfinished_url$0 + next +} + +/^[0-9]\.[0-9.]*/ { + title=$0 + next +} + +title && /^[ \t]*$/ { + print "" + print title + gsub(/./, "-", title) + print title + print $0 + title=0 + next +} + +title { + print title + print $0 + title=0 + next +} + +{ + print $0 +} diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am b/kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am new file mode 100644 index 0000000..964dfdf --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am @@ -0,0 +1,43 @@ +HTML_DOCS = html/xeno3prm html/xeno3prm/search +PDF_DOCS = xeno3prm.pdf +EXTRA_DIST = xeno3prm-common.conf.in xeno3prm-html.conf.in xeno3prm-latex.conf.in + +if XENO_BUILD_DOC + +HTML_DOCSDIR = ./ +PDF_DOCSDIR = ./ + +all-local: html pdf + +html/xeno3prm/search: html/xeno3prm + +html: $(HTML_DOCS) + +pdf: $(PDF_DOCS) + +html/xeno3prm latex/xeno3prm: FORCE + @mkdir -p $@ + $(DOXYGEN) $(@F)-$(@D).conf + +%.pdf: latex/% + $(MAKE) -C $< refman.pdf + mv $</refman.pdf $@ + +distclean-local: + for dir in *-html *-latex; do \ + if test -d $$dir ; then $(RM) -R $$dir ; fi ; \ + done + +.PHONY: FORCE + +.DELETE_ON_ERROR: + +include $(top_srcdir)/doc/install.rules + +install-data-local: install-docs-local +uninstall-local: uninstall-docs + +else +install-data-local: +uninstall-local: +endif diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in new file mode 100644 index 0000000..cd77650 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in @@ -0,0 +1,879 @@ +# Doxyfile 1.3.4 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = "Xenomai" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = @PACKAGE_VERSION@ + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = . + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, +# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en +# (Japanese with English messages), Korean, Norwegian, Polish, Portuguese, +# Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited +# members of a class in the documentation of that class as if those members were +# ordinary class members. Constructors, destructors and assignment operators of +# the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. It is allowed to use relative paths in the argument list. + +STRIP_FROM_PATH = @top_srcdir@/ + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explict @brief command for a brief description. + +JAVADOC_AUTOBRIEF = YES + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# reimplements. + +INHERIT_DOCS = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = YES + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = \ + "coretags{1}=@par Tags^^@ref cobalt-core-tags \"\1\"" \ + "apitags{1}=@par Tags^^@ref api-tags \"\1\"" \ + "sideeffect=@par Side effects^^" + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources +# only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = YES + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@ + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = NO + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp +# *.h++ *.idl *.odl *.cs *.php *.php3 *.inc + +FILE_PATTERNS = *.c \ + *.h \ + *.dox + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories +# that are symbolic links (a Unix filesystem feature) are excluded from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. + +EXCLUDE_PATTERNS = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = *.c + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = YES + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command <filter> <input-file>, where <filter> +# is the value of the INPUT_FILTER tag, and <input-file> is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. + +INPUT_FILTER = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimised for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assigments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_PREDEFINED tags. + +EXPAND_ONLY_PREDEF = YES + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. + +PREDEFINED = DOXYGEN_CPP \ + CONFIG_SMP \ + "dref_type(T)=opaque" \ + "EXPORT_SYMBOL_GPL(symbol)=//" \ + "DECLARE_BITMAP(symbol, nr)=unsigned long symbol[BITS_TO_LONGS(nr)]" \ + "COBALT_IMPL(T,I,A)=T I A" \ + "COBALT_DECL(T,P)=T P" \ + "COBALT_SYSCALL(N,M,T,A)=T N A" \ + "COBALT_SYSCALL_DECL(N,T,A)=T N A" + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse the +# parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::addtions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or +# super classes. Setting the tag to NO turns the diagrams off. Note that this +# option is superceded by the HAVE_DOT option below. This is only a fallback. It is +# recommended to install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = @DOXYGEN_HAVE_DOT@ + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similiar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found on the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that a graph may be further truncated if the graph's +# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH +# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), +# the graph is not depth-constrained. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, which results in a white background. +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = \ + @top_srcdir@/include \ + @top_srcdir@/kernel/cobalt \ + @top_srcdir@/kernel/drivers \ + @top_srcdir@/lib/cobalt \ + @top_srcdir@/lib/copperplate \ + @top_srcdir@/lib/smokey \ + @top_srcdir@/lib/analogy \ + @top_srcdir@/lib/alchemy \ + @top_srcdir@/lib/vxworks \ + @top_srcdir@/lib/psos \ + @top_srcdir@/lib/trank + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = \ + @top_srcdir@/demo/posix \ + @top_srcdir@/demo/alchemy \ + @top_srcdir@/utils + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = @top_srcdir@/include \ + @top_srcdir@/include/cobalt \ + @top_srcdir@/include/mercury diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in new file mode 100644 index 0000000..3936a13 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in @@ -0,0 +1,196 @@ +@INCLUDE = xeno3prm-common.conf + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html/xeno3prm + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet + +HTML_STYLESHEET = + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output dir. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = YES + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# Configuration::addtions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvances is that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = YES + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = pxfonts + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = @LATEX_BATCHMODE@ + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in new file mode 100644 index 0000000..6a25bf7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in @@ -0,0 +1,186 @@ +@INCLUDE = xeno3prm-common.conf + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = NO + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet + +HTML_STYLESHEET = + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output dir. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = YES + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# Configuration::addtions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex/xeno3prm + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = pxfonts + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = @LATEX_BATCHMODE@ + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO diff --git a/kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am b/kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am new file mode 100644 index 0000000..d1f605c --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am @@ -0,0 +1,43 @@ +if XENO_BUILD_DOC +git-src-check: FORCE + @if test \! -e $(top_srcdir)/.git ; then \ + echo "$@ wants $top_srcdir to be a GIT working tree." ; \ + /bin/false; \ + fi +else +git-src-check: +endif + +INPUT_DOCS = \ + asciidoc/pages/Installing_Xenomai_3.x.adoc \ + asciidoc/pages/Running_Apps_with_Xenomai_3.x.adoc \ + asciidoc/pages/Migrating_to_Xenomai_3.x.adoc \ + asciidoc/pages/Troubleshooting_dual_kernel.adoc \ + asciidoc/pages/Troubleshooting_single_kernel.adoc + +OUTPUT_DOCS = \ + doc/asciidoc/README.INSTALL.adoc \ + doc/asciidoc/README.APPLICATIONS.adoc \ + doc/asciidoc/MIGRATION.adoc \ + doc/asciidoc/TROUBLESHOOTING.COBALT.adoc \ + doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc + +all-local: git-src-check + @set -e; if test \! x$(XENO_DOC_GIT) = x; then \ + if test -d doc.git; then \ + (cd doc.git && git pull --quiet --force); \ + else \ + git clone --branch master --depth 1 --quiet \ + $(XENO_DOC_GIT) doc.git; \ + fi; \ + set -- $(OUTPUT_DOCS); \ + for doc in $(INPUT_DOCS); do \ + cp doc.git/$$doc $(top_srcdir)/$$1; \ + shift; \ + done; \ + fi + +clean-local: + $(RM) -R doc.git + +.PHONY: FORCE diff --git a/kernel/xenomai-v3.2.4/doc/install.rules b/kernel/xenomai-v3.2.4/doc/install.rules new file mode 100644 index 0000000..10eaed0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/doc/install.rules @@ -0,0 +1,56 @@ +# -*- makefile -*- +# Generic rules for installation and distribution of documentation. +# +# Parameters : +# HTML_DOCS list of html documentation directories +# PDF_DOCS list of pdf files +# MAN1_DOCS list of man1 files +# +# HTML_DOCSDIR: root of generated HTML files +# PDF_DOCSDIR: root of generated PDF files +# MAN_DOCSDIR: root of generated manN sub-directories + +install-htmldocs: $(HTML_DOCS:%=$(HTML_DOCSDIR)%) + docs="$(HTML_DOCS)"; abs_builddir=$$PWD; \ + for dir in $$docs; do \ + dest=$(DESTDIR)$(htmldir)/$$dir; \ + $(mkinstalldirs) $$dest; \ + abs_dest=`cd $$dest && pwd` ; \ + cd $(HTML_DOCSDIR)$$dir || exit 1; \ + for f in * ; do \ + case $$f in \ + *~|CVS|.svn|[mM]akefile*|GNUmakefile*);; \ + *) $(INSTALL_DATA) $$f $$abs_dest/$$f;; \ + esac; \ + done; cd $$abs_builddir; \ + done + +install-pdfdocs: $(PDF_DOCS:%=$(PDF_DOCSDIR)%) + docs="$^"; dest=$(DESTDIR)$(pdfdir); \ + $(mkinstalldirs) $$dest && \ + for f in $$docs; do \ + $(INSTALL_DATA) $$f $$dest; \ + done + +install-man1: $(MAN1_DOCS:%=$(MAN_DOCSDIR)%) + docs="$^"; dest=$(DESTDIR)$(mandir)/man1; \ + $(mkinstalldirs) $$dest && \ + for f in $$docs; do \ + $(INSTALL_DATA) $$f $$dest; \ + done + +install-mandocs: install-man1 + +install-docs-local: install-htmldocs install-pdfdocs install-mandocs + +# To make distcheck happy. +uninstall-docs: + if test -n "$(HTML_DOCS)" -o -n "$(PDF_DOCS)" -o -n "$(MAN1_DOCS)"; then \ + targets="$(HTML_DOCS:%=$(DESTDIR)$(htmldir)/%) \ + $(PDF_DOCS:%=$(DESTDIR)$(pdfdir)/%) \ + $(MAN1_DOCS:%=$(DESTDIR)$(mandir)/%)"; \ + for t in $$targets; do \ + if test -d $$t; then $(RM) $$t/*; $(RM) -r $$t; \ + else $(RM) $$t; fi; \ + done; \ + fi diff --git a/kernel/xenomai-v3.2.4/include/COPYING b/kernel/xenomai-v3.2.4/include/COPYING new file mode 100644 index 0000000..e6afb50 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/COPYING @@ -0,0 +1,305 @@ + +As a special exception to the following license, the Xenomai +project gives permission for additional uses of the header files +contained in this directory. + +The exception is that, if you include these header files unmodified to +produce application programs executing in user-space that use +Xenomai services by normal Xenomai system calls, this does not +by itself cause the resulting executable to be covered by the GNU +General Public License. This is merely considered normal use of the +Xenomai system, and does not fall under the heading of "derived +work". + +This exception does not however invalidate any other reasons why the +executable file might be covered by the GNU General Public License. In +any case, this exception never applies when the application code is +built as a static or dynamically loadable portion of the Linux kernel. + +This exception applies only to the code released by the Xenomai +project under the name Xenomai and bearing this exception notice. +If you copy code from other sources into a copy of Xenomai, the +exception does not apply to the code that you add in this way. + +---------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/kernel/xenomai-v3.2.4/include/Makefile.am b/kernel/xenomai-v3.2.4/include/Makefile.am new file mode 100644 index 0000000..1e9fe02 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/Makefile.am @@ -0,0 +1,31 @@ +nodist_include_HEADERS=$(CONFIG_HEADER) + +SUBDIRS = \ + boilerplate \ + copperplate \ + smokey \ + alchemy \ + psos \ + rtdm \ + trank \ + vxworks \ + xenomai + +if XENO_COBALT +SUBDIRS += cobalt +else +SUBDIRS += mercury +endif + +DIST_SUBDIRS = \ + alchemy \ + boilerplate \ + cobalt \ + copperplate \ + mercury \ + psos \ + rtdm \ + smokey \ + trank \ + vxworks \ + xenomai diff --git a/kernel/xenomai-v3.2.4/include/alchemy/Makefile.am b/kernel/xenomai-v3.2.4/include/alchemy/Makefile.am new file mode 100644 index 0000000..8cbc3b8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/Makefile.am @@ -0,0 +1,15 @@ +includesubdir = $(includedir)/alchemy + +includesub_HEADERS = \ + alarm.h \ + buffer.h \ + compat.h \ + cond.h \ + event.h \ + heap.h \ + mutex.h \ + pipe.h \ + queue.h \ + sem.h \ + task.h \ + timer.h diff --git a/kernel/xenomai-v3.2.4/include/alchemy/alarm.h b/kernel/xenomai-v3.2.4/include/alchemy/alarm.h new file mode 100644 index 0000000..b57197a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/alarm.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_ALARM_H +#define _XENOMAI_ALCHEMY_ALARM_H + +#include <stdint.h> +#include <alchemy/timer.h> +#include <alchemy/compat.h> + +/** + * @addtogroup alchemy_alarm + * @{ + */ + +struct RT_ALARM { + uintptr_t handle; +}; + +typedef struct RT_ALARM RT_ALARM; + +/** + * @brief Alarm status descriptor + * @anchor RT_ALARM_INFO + * + * This structure reports various static and runtime information about + * a real-time alarm, returned by a call to rt_alarm_inquire(). + */ +struct RT_ALARM_INFO { + /** + * Number of past expiries. + */ + unsigned long expiries; + /** + * Name of alarm object. + */ + char name[XNOBJECT_NAME_LEN]; + /** + * Active flag. + */ + int active; +}; + +typedef struct RT_ALARM_INFO RT_ALARM_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +CURRENT_DECL(int, rt_alarm_create(RT_ALARM *alarm, + const char *name, + void (*handler)(void *arg), + void *arg)); + +CURRENT_DECL(int, rt_alarm_delete(RT_ALARM *alarm)); + +int rt_alarm_start(RT_ALARM *alarm, + RTIME value, + RTIME interval); + +int rt_alarm_stop(RT_ALARM *alarm); + +int rt_alarm_inquire(RT_ALARM *alarm, + RT_ALARM_INFO *info); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_ALARM_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/buffer.h b/kernel/xenomai-v3.2.4/include/alchemy/buffer.h new file mode 100644 index 0000000..9c0c4e6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/buffer.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_BUFFER_H +#define _XENOMAI_ALCHEMY_BUFFER_H + +#include <stdint.h> +#include <alchemy/timer.h> + +/** + * @addtogroup alchemy_buffer + * @{ + */ + +/** Creation flags. */ +#define B_PRIO 0x1 /* Pend by task priority order. */ +#define B_FIFO 0x0 /* Pend by FIFO order. */ + +struct RT_BUFFER { + uintptr_t handle; +}; + +typedef struct RT_BUFFER RT_BUFFER; + +/** + * @brief Buffer status descriptor + * @anchor RT_BUFFER_INFO + * + * This structure reports various static and runtime information about + * a real-time buffer, returned by a call to rt_buffer_inquire(). + */ +struct RT_BUFFER_INFO { + /** + * Number of tasks waiting on the read side of the buffer for + * input data. + */ + int iwaiters; + /** + * Number of tasks waiting on the write side of the buffer for + * sending out data. + */ + int owaiters; + /** + * Overall size of buffer (in bytes). + */ + size_t totalmem; + /** + * Amount of memory currently available for holding more data. + */ + size_t availmem; + /** + * Name of the buffer. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_BUFFER_INFO RT_BUFFER_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +int rt_buffer_create(RT_BUFFER *bf, + const char *name, + size_t bufsz, + int mode); + +int rt_buffer_delete(RT_BUFFER *bf); + +ssize_t rt_buffer_write_timed(RT_BUFFER *bf, + const void *ptr, size_t size, + const struct timespec *abs_timeout); + +static inline +ssize_t rt_buffer_write_until(RT_BUFFER *bf, + const void *ptr, size_t size, + RTIME timeout) +{ + struct timespec ts; + return rt_buffer_write_timed(bf, ptr, size, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +ssize_t rt_buffer_write(RT_BUFFER *bf, + const void *ptr, size_t size, + RTIME timeout) +{ + struct timespec ts; + return rt_buffer_write_timed(bf, ptr, size, + alchemy_rel_timeout(timeout, &ts)); +} + +ssize_t rt_buffer_read_timed(RT_BUFFER *bf, + void *ptr, size_t size, + const struct timespec *abs_timeout); + +static inline +ssize_t rt_buffer_read_until(RT_BUFFER *bf, + void *ptr, size_t size, + RTIME timeout) +{ + struct timespec ts; + return rt_buffer_read_timed(bf, ptr, size, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +ssize_t rt_buffer_read(RT_BUFFER *bf, + void *ptr, size_t size, + RTIME timeout) +{ + struct timespec ts; + return rt_buffer_read_timed(bf, ptr, size, + alchemy_rel_timeout(timeout, &ts)); +} + +int rt_buffer_clear(RT_BUFFER *bf); + +int rt_buffer_inquire(RT_BUFFER *bf, + RT_BUFFER_INFO *info); + +int rt_buffer_bind(RT_BUFFER *bf, + const char *name, RTIME timeout); + +int rt_buffer_unbind(RT_BUFFER *bf); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_BUFFER_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/compat.h b/kernel/xenomai-v3.2.4/include/alchemy/compat.h new file mode 100644 index 0000000..0113879 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/compat.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_COMPAT_H +#define _XENOMAI_ALCHEMY_COMPAT_H + +#include <trank/trank.h> + +#endif /* _XENOMAI_ALCHEMY_COMPAT_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/cond.h b/kernel/xenomai-v3.2.4/include/alchemy/cond.h new file mode 100644 index 0000000..7043179 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/cond.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_COND_H +#define _XENOMAI_ALCHEMY_COND_H + +#include <stdint.h> +#include <alchemy/timer.h> +#include <alchemy/mutex.h> + +/** + * @addtogroup alchemy_cond + * @{ + */ + +struct RT_COND { + uintptr_t handle; +}; + +typedef struct RT_COND RT_COND; + +/** + * @brief Condition variable status descriptor + * @anchor RT_CONF_INFO + * + * This structure reports various static and runtime information about + * a condition variable, returned by a call to rt_cond_inquire(). + */ +struct RT_COND_INFO { + /** + * Name of condition variable. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_COND_INFO RT_COND_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +int rt_cond_create(RT_COND *cond, + const char *name); + +int rt_cond_delete(RT_COND *cond); + +int rt_cond_signal(RT_COND *cond); + +int rt_cond_broadcast(RT_COND *cond); + +int rt_cond_wait_timed(RT_COND *cond, + RT_MUTEX *mutex, + const struct timespec *abs_timeout); +static inline +int rt_cond_wait_until(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout) +{ + struct timespec ts; + return rt_cond_wait_timed(cond, mutex, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +int rt_cond_wait(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout) +{ + struct timespec ts; + return rt_cond_wait_timed(cond, mutex, + alchemy_rel_timeout(timeout, &ts)); +} + +int rt_cond_inquire(RT_COND *cond, + RT_COND_INFO *info); + +int rt_cond_bind(RT_COND *cond, + const char *name, RTIME timeout); + +int rt_cond_unbind(RT_COND *cond); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_COND_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/event.h b/kernel/xenomai-v3.2.4/include/alchemy/event.h new file mode 100644 index 0000000..1e8cb4d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/event.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_EVENT_H +#define _XENOMAI_ALCHEMY_EVENT_H + +#include <stdint.h> +#include <alchemy/timer.h> +#include <alchemy/compat.h> + +/** + * @addtogroup alchemy_event + * @{ + */ + +/** Creation flags. */ +#define EV_PRIO 0x1 /* Pend by task priority order. */ +#define EV_FIFO 0x0 /* Pend by FIFO order. */ + +/** Operation flags. */ +#define EV_ANY 0x1 /* Disjunctive wait. */ +#define EV_ALL 0x0 /* Conjunctive wait. */ + +struct RT_EVENT { + uintptr_t handle; +}; + +typedef struct RT_EVENT RT_EVENT; + +/** + * @brief Event status descriptor + * @anchor RT_EVENT_INFO + * + * This structure reports various static and runtime information about + * an event flag group, returned by a call to rt_event_inquire(). + */ +struct RT_EVENT_INFO { + /** + * Current value of the event flag group. + */ + unsigned int value; + /** + * Number of tasks currently waiting for events. + */ + int nwaiters; + /** + * Name of event flag group. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_EVENT_INFO RT_EVENT_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +CURRENT_DECL(int, rt_event_create(RT_EVENT *event, + const char *name, + unsigned int ivalue, + int mode)); + +int rt_event_delete(RT_EVENT *event); + +CURRENT_DECL(int, rt_event_signal(RT_EVENT *event, + unsigned int mask)); + +int rt_event_wait_timed(RT_EVENT *event, + unsigned int mask, + unsigned int *mask_r, + int mode, + const struct timespec *abs_timeout); + +#ifndef __XENO_COMPAT__ + +static inline +int rt_event_wait_until(RT_EVENT *event, + unsigned int mask, unsigned int *mask_r, + int mode, RTIME timeout) +{ + struct timespec ts; + return rt_event_wait_timed(event, mask, mask_r, mode, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +int rt_event_wait(RT_EVENT *event, + unsigned int mask, unsigned int *mask_r, + int mode, RTIME timeout) +{ + struct timespec ts; + return rt_event_wait_timed(event, mask, mask_r, mode, + alchemy_rel_timeout(timeout, &ts)); +} + +#endif /* !__XENO_COMPAT__ */ + +CURRENT_DECL(int, rt_event_clear(RT_EVENT *event, + unsigned int mask, + unsigned int *mask_r)); + +int rt_event_inquire(RT_EVENT *event, + RT_EVENT_INFO *info); + +int rt_event_bind(RT_EVENT *event, + const char *name, RTIME timeout); + +int rt_event_unbind(RT_EVENT *event); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_EVENT_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/heap.h b/kernel/xenomai-v3.2.4/include/alchemy/heap.h new file mode 100644 index 0000000..ade2f47 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/heap.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_HEAP_H +#define _XENOMAI_ALCHEMY_HEAP_H + +#include <stdint.h> +#include <alchemy/timer.h> + +/** + * @addtogroup alchemy_heap + * @{ + */ + +/** Creation flags. */ +#define H_PRIO 0x1 /* Pend by task priority order. */ +#define H_FIFO 0x0 /* Pend by FIFO order. */ +#define H_SINGLE 0x4 /* Manage as single-block area. */ + +struct RT_HEAP { + uintptr_t handle; +}; + +typedef struct RT_HEAP RT_HEAP; + +/** + * @brief Heap status descriptor + * @anchor RT_HEAP_INFO + * + * This structure reports various static and runtime information about + * a real-time heap, returned by a call to rt_heap_inquire(). + */ +struct RT_HEAP_INFO { + /** + * Number of tasks waiting for available memory in + * rt_heap_alloc(). + */ + int nwaiters; + /** + * Creation mode flags as given to rt_heap_create(). + */ + int mode; + /** + * Size of heap (in bytes) as given to rt_heap_create(). The + * maximum amount of memory available from this heap may be + * larger, due to internal padding. + */ + size_t heapsize; + /** + * Maximum amount of memory available from the heap. This + * value accounts for the overhead of internal data structures + * required to maintain the heap. + */ + size_t usablemem; + /** + * Amount of heap memory currently consumed. info.usablemem - + * info.usedmem computes the current amount of free memory in + * the relevant heap. + */ + size_t usedmem; + /** + * Name of heap. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_HEAP_INFO RT_HEAP_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +int rt_heap_create(RT_HEAP *heap, + const char *name, + size_t heapsize, + int mode); + +int rt_heap_delete(RT_HEAP *heap); + +int rt_heap_alloc_timed(RT_HEAP *heap, + size_t size, + const struct timespec *abs_timeout, + void **blockp); + +static inline +int rt_heap_alloc_until(RT_HEAP *heap, + size_t size, RTIME timeout, void **blockp) +{ + struct timespec ts; + return rt_heap_alloc_timed(heap, size, + alchemy_abs_timeout(timeout, &ts), + blockp); +} + +static inline +int rt_heap_alloc(RT_HEAP *heap, + size_t size, RTIME timeout, void **blockp) +{ + struct timespec ts; + return rt_heap_alloc_timed(heap, size, + alchemy_rel_timeout(timeout, &ts), + blockp); +} + +int rt_heap_free(RT_HEAP *heap, + void *block); + +int rt_heap_inquire(RT_HEAP *heap, + RT_HEAP_INFO *info); + +int rt_heap_bind(RT_HEAP *heap, + const char *name, + RTIME timeout); + +int rt_heap_unbind(RT_HEAP *heap); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_HEAP_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/mutex.h b/kernel/xenomai-v3.2.4/include/alchemy/mutex.h new file mode 100644 index 0000000..2c4212f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/mutex.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_MUTEX_H +#define _XENOMAI_ALCHEMY_MUTEX_H + +#include <stdint.h> +#include <alchemy/timer.h> +#include <alchemy/task.h> + +/** + * @addtogroup alchemy_mutex + * @{ + */ + +struct RT_MUTEX { + uintptr_t handle; +}; + +typedef struct RT_MUTEX RT_MUTEX; + +/** + * @brief Mutex status descriptor + * @anchor RT_MUTEX_INFO + * + * This structure reports various static and runtime information about + * a mutex, returned by a call to rt_mutex_inquire(). + */ +struct RT_MUTEX_INFO { + /** + * Current mutex owner, or NO_ALCHEMY_TASK if unlocked. This + * information is in essence transient, and may not be valid + * anymore once used by the caller. + */ + RT_TASK owner; + /** + * Name of mutex. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_MUTEX_INFO RT_MUTEX_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +int rt_mutex_create(RT_MUTEX *mutex, + const char *name); + +int rt_mutex_delete(RT_MUTEX *mutex); + +int rt_mutex_acquire_timed(RT_MUTEX *mutex, + const struct timespec *abs_timeout); + +static inline +int rt_mutex_acquire_until(RT_MUTEX *mutex, RTIME timeout) +{ + struct timespec ts; + return rt_mutex_acquire_timed(mutex, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +int rt_mutex_acquire(RT_MUTEX *mutex, RTIME timeout) +{ + struct timespec ts; + return rt_mutex_acquire_timed(mutex, + alchemy_rel_timeout(timeout, &ts)); +} + +int rt_mutex_release(RT_MUTEX *mutex); + +int rt_mutex_inquire(RT_MUTEX *mutex, + RT_MUTEX_INFO *info); + +int rt_mutex_bind(RT_MUTEX *mutex, + const char *name, RTIME timeout); + +int rt_mutex_unbind(RT_MUTEX *mutex); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_MUTEX_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/pipe.h b/kernel/xenomai-v3.2.4/include/alchemy/pipe.h new file mode 100644 index 0000000..4ae24f9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/pipe.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_PIPE_H +#define _XENOMAI_ALCHEMY_PIPE_H + +#include <stdint.h> +#include <cobalt/uapi/kernel/pipe.h> +#include <alchemy/timer.h> +#include <alchemy/compat.h> + +/** + * @addtogroup alchemy_pipe + * @{ + */ + +/** Creation flags. */ +#define P_MINOR_AUTO XNPIPE_MINOR_AUTO + +/** Operation flags. */ +#define P_URGENT XNPIPE_URGENT +#define P_NORMAL XNPIPE_NORMAL + +struct RT_PIPE { + uintptr_t handle; +}; + +typedef struct RT_PIPE RT_PIPE; + +#ifdef __cplusplus +extern "C" { +#endif + +CURRENT_DECL(int, rt_pipe_create(RT_PIPE *pipe, + const char *name, + int minor, size_t poolsize)); + +int rt_pipe_delete(RT_PIPE *pipe); + +ssize_t rt_pipe_read_timed(RT_PIPE *pipe, + void *buf, size_t size, + const struct timespec *abs_timeout); + +static inline +ssize_t rt_pipe_read_until(RT_PIPE *pipe, + void *buf, size_t size, RTIME timeout) +{ + struct timespec ts; + return rt_pipe_read_timed(pipe, buf, size, + alchemy_abs_timeout(timeout, &ts)); +} + +ssize_t rt_pipe_read(RT_PIPE *pipe, + void *buf, size_t size, RTIME timeout); + +ssize_t rt_pipe_write(RT_PIPE *pipe, + const void *buf, size_t size, int mode); + +ssize_t rt_pipe_stream(RT_PIPE *pipe, + const void *buf, size_t size); + +int rt_pipe_bind(RT_PIPE *pipe, + const char *name, RTIME timeout); + +int rt_pipe_unbind(RT_PIPE *pipe); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_PIPE_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/queue.h b/kernel/xenomai-v3.2.4/include/alchemy/queue.h new file mode 100644 index 0000000..4cd2d70 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/queue.h @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_QUEUE_H +#define _XENOMAI_ALCHEMY_QUEUE_H + +#include <stdint.h> +#include <alchemy/timer.h> + +/** + * @addtogroup alchemy_queue + * @{ + */ + +/** Creation flags. */ +#define Q_PRIO 0x1 /* Pend by task priority order. */ +#define Q_FIFO 0x0 /* Pend by FIFO order. */ + +#define Q_UNLIMITED 0 /* No size limit. */ + +/* + * Operation flags. + */ +#define Q_NORMAL 0x0 +#define Q_URGENT 0x1 +#define Q_BROADCAST 0x2 + +struct RT_QUEUE { + uintptr_t handle; +}; + +typedef struct RT_QUEUE RT_QUEUE; + +/** + * @brief Queue status descriptor + * @anchor RT_QUEUE_INFO + * + * This structure reports various static and runtime information about + * a real-time queue, returned by a call to rt_queue_inquire(). + */ +struct RT_QUEUE_INFO { + /** + * Number of tasks currently waiting on the queue for + * messages. + */ + int nwaiters; + /** + * Number of messages pending in queue. + */ + int nmessages; + /** + * Queue mode bits, as given to rt_queue_create(). + */ + int mode; + /** + * Maximum number of messages in queue, zero if unlimited. + */ + size_t qlimit; + /** + * Size of memory pool for holding message buffers (in bytes). + */ + size_t poolsize; + /** + * Amount of memory consumed from the buffer pool. + */ + size_t usedmem; + /** + * Name of message queue. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_QUEUE_INFO RT_QUEUE_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +int rt_queue_create(RT_QUEUE *queue, + const char *name, + size_t poolsize, size_t qlimit, int mode); + +int rt_queue_delete(RT_QUEUE *queue); + +void *rt_queue_alloc(RT_QUEUE *queue, + size_t size); + +int rt_queue_free(RT_QUEUE *queue, + void *buf); + +int rt_queue_send(RT_QUEUE *queue, + const void *buf, size_t size, int mode); + +int rt_queue_write(RT_QUEUE *queue, + const void *buf, size_t size, int mode); + +ssize_t rt_queue_receive_timed(RT_QUEUE *queue, + void **bufp, + const struct timespec *abs_timeout); + +static inline +ssize_t rt_queue_receive_until(RT_QUEUE *queue, + void **bufp, RTIME timeout) +{ + struct timespec ts; + return rt_queue_receive_timed(queue, bufp, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +ssize_t rt_queue_receive(RT_QUEUE *queue, + void **bufp, RTIME timeout) +{ + struct timespec ts; + return rt_queue_receive_timed(queue, bufp, + alchemy_rel_timeout(timeout, &ts)); +} + +ssize_t rt_queue_read_timed(RT_QUEUE *queue, + void *buf, size_t size, + const struct timespec *abs_timeout); + +static inline +ssize_t rt_queue_read_until(RT_QUEUE *queue, + void *buf, size_t size, RTIME timeout) +{ + struct timespec ts; + return rt_queue_read_timed(queue, buf, size, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +ssize_t rt_queue_read(RT_QUEUE *queue, + void *buf, size_t size, RTIME timeout) +{ + struct timespec ts; + return rt_queue_read_timed(queue, buf, size, + alchemy_rel_timeout(timeout, &ts)); +} + +int rt_queue_flush(RT_QUEUE *queue); + +int rt_queue_inquire(RT_QUEUE *queue, + RT_QUEUE_INFO *info); + +int rt_queue_bind(RT_QUEUE *queue, + const char *name, + RTIME timeout); + +int rt_queue_unbind(RT_QUEUE *queue); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_QUEUE_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/sem.h b/kernel/xenomai-v3.2.4/include/alchemy/sem.h new file mode 100644 index 0000000..8f86824 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/sem.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_SEM_H +#define _XENOMAI_ALCHEMY_SEM_H + +#include <stdint.h> +#include <alchemy/timer.h> + +/** + * @addtogroup alchemy_sem + * @{ + */ + +/** Creation flags. */ +#define S_PRIO 0x1 /* Pend by task priority order. */ +#define S_FIFO 0x0 /* Pend by FIFO order. */ +#define S_PULSE 0x2 /* Enable pulse mode. */ + +struct RT_SEM { + uintptr_t handle; +}; + +typedef struct RT_SEM RT_SEM; + +/** + * @brief Semaphore status descriptor + * @anchor RT_SEM_INFO + * + * This structure reports various static and runtime information about + * a semaphore, returned by a call to rt_sem_inquire(). + */ +struct RT_SEM_INFO { + /** + * Current semaphore value. + */ + unsigned long count; + /** + * Number of tasks waiting on the semaphore. + */ + int nwaiters; + /** + * Name of semaphore. + */ + char name[XNOBJECT_NAME_LEN]; +}; + +typedef struct RT_SEM_INFO RT_SEM_INFO; + +#ifdef __cplusplus +extern "C" { +#endif + +int rt_sem_create(RT_SEM *sem, + const char *name, + unsigned long icount, + int mode); + +int rt_sem_delete(RT_SEM *sem); + +int rt_sem_p_timed(RT_SEM *sem, + const struct timespec *abs_timeout); + +static inline int rt_sem_p_until(RT_SEM *sem, RTIME timeout) +{ + struct timespec ts; + return rt_sem_p_timed(sem, alchemy_abs_timeout(timeout, &ts)); +} + +static inline int rt_sem_p(RT_SEM *sem, RTIME timeout) +{ + struct timespec ts; + return rt_sem_p_timed(sem, alchemy_rel_timeout(timeout, &ts)); +} + +int rt_sem_v(RT_SEM *sem); + +int rt_sem_broadcast(RT_SEM *sem); + +int rt_sem_inquire(RT_SEM *sem, + RT_SEM_INFO *info); + +int rt_sem_bind(RT_SEM *sem, + const char *name, RTIME timeout); + +int rt_sem_unbind(RT_SEM *sem); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_SEM_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/task.h b/kernel/xenomai-v3.2.4/include/alchemy/task.h new file mode 100644 index 0000000..685d478 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/task.h @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_TASK_H +#define _XENOMAI_ALCHEMY_TASK_H + +#include <sys/types.h> +#include <stdint.h> +#include <xeno_config.h> +#include <boilerplate/list.h> +#include <copperplate/threadobj.h> +#include <alchemy/timer.h> +#include <alchemy/compat.h> + +/** + * @addtogroup alchemy_task + * @{ + */ + +/** Task priorities. */ +#define T_LOPRIO 0 +#define T_HIPRIO 99 + +/** Task mode bits. */ +#define T_LOCK __THREAD_M_LOCK +/** Cobalt only, nop over Mercury. */ +#define T_WARNSW __THREAD_M_WARNSW +#define T_CONFORMING __THREAD_M_CONFORMING +#define T_JOINABLE __THREAD_M_SPARE0 + +struct RT_TASK { + uintptr_t handle; + pthread_t thread; +}; + +typedef struct RT_TASK RT_TASK; + +struct RT_TASK_MCB { + int flowid; + int opcode; + union { + dref_type(void *) __dref; + void *data; + }; + ssize_t size; +}; + +typedef struct RT_TASK_MCB RT_TASK_MCB; + +/** + * @brief Task status descriptor + * @anchor RT_TASK_INFO + * + * This structure reports various static and runtime information about + * a real-time task, returned by a call to rt_task_inquire(). + */ +struct RT_TASK_INFO { + /** + * Task priority. + */ + int prio; + /** + * Task status. + */ + struct threadobj_stat stat; + /** + * Name of task. + */ + char name[XNOBJECT_NAME_LEN]; + /** + * Host pid. + */ + pid_t pid; +}; + +typedef struct RT_TASK_INFO RT_TASK_INFO; + +#define NO_ALCHEMY_TASK ((RT_TASK){ 0, 0 }) + +#ifdef __cplusplus +extern "C" { +#endif + +CURRENT_DECL(int, rt_task_create(RT_TASK *task, + const char *name, + int stksize, + int prio, + int mode)); + +int rt_task_delete(RT_TASK *task); + +int rt_task_set_affinity(RT_TASK *task, + const cpu_set_t *cpus); + +int rt_task_start(RT_TASK *task, + void (*entry)(void *arg), + void *arg); + +CURRENT_DECL(int, rt_task_spawn(RT_TASK *task, const char *name, + int stksize, int prio, int mode, + void (*entry)(void *arg), + void *arg)); + +int rt_task_shadow(RT_TASK *task, + const char *name, + int prio, + int mode); + +int rt_task_join(RT_TASK *task); + +CURRENT_DECL(int, rt_task_set_periodic(RT_TASK *task, + RTIME idate, RTIME period)); + +int rt_task_wait_period(unsigned long *overruns_r); + +int rt_task_sleep(RTIME delay); + +int rt_task_sleep_until(RTIME date); + +int rt_task_same(RT_TASK *task1, RT_TASK *task2); + +int rt_task_suspend(RT_TASK *task); + +int rt_task_resume(RT_TASK *task); + +RT_TASK *rt_task_self(void); + +int rt_task_set_priority(RT_TASK *task, int prio); + +int rt_task_set_mode(int clrmask, int setmask, + int *mode_r); + +int rt_task_yield(void); + +int rt_task_unblock(RT_TASK *task); + +int rt_task_slice(RT_TASK *task, RTIME quantum); + +int rt_task_inquire(RT_TASK *task, + RT_TASK_INFO *info); + +ssize_t rt_task_send_timed(RT_TASK *task, + RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r, + const struct timespec *abs_timeout); + +static inline +ssize_t rt_task_send_until(RT_TASK *task, + RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r, + RTIME timeout) +{ + struct timespec ts; + return rt_task_send_timed(task, mcb_s, mcb_r, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +ssize_t rt_task_send(RT_TASK *task, + RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r, + RTIME timeout) +{ + struct timespec ts; + return rt_task_send_timed(task, mcb_s, mcb_r, + alchemy_rel_timeout(timeout, &ts)); +} + +int rt_task_receive_timed(RT_TASK_MCB *mcb_r, + const struct timespec *abs_timeout); + +static inline +int rt_task_receive_until(RT_TASK_MCB *mcb_r, RTIME timeout) +{ + struct timespec ts; + return rt_task_receive_timed(mcb_r, + alchemy_abs_timeout(timeout, &ts)); +} + +static inline +int rt_task_receive(RT_TASK_MCB *mcb_r, RTIME timeout) +{ + struct timespec ts; + return rt_task_receive_timed(mcb_r, + alchemy_rel_timeout(timeout, &ts)); +} + +int rt_task_reply(int flowid, + RT_TASK_MCB *mcb_s); + +int rt_task_bind(RT_TASK *task, + const char *name, RTIME timeout); + +int rt_task_unbind(RT_TASK *task); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _XENOMAI_ALCHEMY_TASK_H */ diff --git a/kernel/xenomai-v3.2.4/include/alchemy/timer.h b/kernel/xenomai-v3.2.4/include/alchemy/timer.h new file mode 100644 index 0000000..7b3837b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/alchemy/timer.h @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_ALCHEMY_TIMER_H +#define _XENOMAI_ALCHEMY_TIMER_H + +#include <stddef.h> +#include <copperplate/clockobj.h> + +/** + * @addtogroup alchemy_timer + * @{ + */ + +typedef ticks_t RTIME; + +typedef sticks_t SRTIME; + +#define TM_INFINITE 0 +#define TM_NOW 0 +#define TM_NONBLOCK ((RTIME)-1ULL) + +/** + * @brief Timer status descriptor + * @anchor RT_TIMER_INFO + * + * This structure reports information about the Alchemy clock, + * returned by a call to rt_timer_inquire(). + */ +typedef struct rt_timer_info { + /** + * Clock resolution in nanoseconds. + */ + RTIME period; + /** + * Current monotonic date expressed in clock ticks. The + * duration of a tick depends on the Alchemy clock resolution + * for the process (see --alchemy-clock-resolution option, + * defaults to 1 nanosecond). + */ + RTIME date; +} RT_TIMER_INFO; + +extern struct clockobj alchemy_clock; + +#define alchemy_abs_timeout(__t, __ts) \ + ({ \ + (__t) == TM_INFINITE ? NULL : \ + (__t) == TM_NONBLOCK ? \ + ({ (__ts)->tv_sec = (__ts)->tv_nsec = 0; (__ts); }) : \ + ({ clockobj_ticks_to_timespec(&alchemy_clock, (__t), (__ts)); \ + (__ts); }); \ + }) + +#define alchemy_rel_timeout(__t, __ts) \ + ({ \ + (__t) == TM_INFINITE ? NULL : \ + (__t) == TM_NONBLOCK ? \ + ({ (__ts)->tv_sec = (__ts)->tv_nsec = 0; (__ts); }) : \ + ({ clockobj_ticks_to_timeout(&alchemy_clock, (__t), (__ts)); \ + (__ts); }); \ + }) + +static inline +int alchemy_poll_mode(const struct timespec *abs_timeout) +{ + return abs_timeout && + abs_timeout->tv_sec == 0 && + abs_timeout->tv_nsec == 0; +} + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @fn RTIME rt_timer_read(void) + * @brief Return the current system time. + * + * Return the current time maintained by the Xenomai core clock. + * + * @return The current time expressed in clock ticks (see note). + * + * @apitags{unrestricted} + * + * @note The @a time value is a multiple of the Alchemy clock + * resolution (see --alchemy-clock-resolution option, defaults to 1 + * nanosecond). + */ +static inline RTIME rt_timer_read(void) +{ + return clockobj_get_time(&alchemy_clock); +} + +SRTIME rt_timer_ns2ticks(SRTIME ns); + +SRTIME rt_timer_ticks2ns(SRTIME ticks); + +RTIME rt_timer_read(void); + +void rt_timer_inquire(RT_TIMER_INFO *info); + +void rt_timer_spin(RTIME ns); + +#ifdef __cplusplus +} +#endif + +/** @} */ + +#endif /* _ALCHEMY_TIMER_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am new file mode 100644 index 0000000..0642560 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am @@ -0,0 +1,23 @@ +includesubdir = $(includedir)/boilerplate + +includesub_HEADERS = \ + ancillaries.h \ + atomic.h \ + avl.h \ + shavl.h \ + avl-inner.h \ + compiler.h \ + debug.h \ + hash.h \ + heapmem.h \ + libc.h \ + list.h \ + lock.h \ + namegen.h \ + obstack.h \ + private-list.h \ + scope.h \ + setup.h \ + shared-list.h \ + time.h \ + tunables.h diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h b/kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h new file mode 100644 index 0000000..319d22f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_ANCILLARIES_H +#define _BOILERPLATE_ANCILLARIES_H + +#include <stdarg.h> +#include <time.h> +#include <pthread.h> +#include <sched.h> +#include <string.h> + +struct error_frame; + +#define ONE_BILLION 1000000000 + +void __namecpy_requires_character_array_as_destination(void); + +#define namecpy(__dst, __src) \ + ({ \ + if (!__builtin_types_compatible_p(typeof(__dst), char[])) \ + __namecpy_requires_character_array_as_destination(); \ + strncpy((__dst), __src, sizeof(__dst)); \ + __dst[sizeof(__dst) - 1] = '\0'; \ + __dst; \ + }) + +#define early_panic(__fmt, __args...) \ + __early_panic(__func__, __fmt, ##__args) + +#define panic(__fmt, __args...) \ + __panic(__func__, __fmt, ##__args) + +#ifdef __cplusplus +extern "C" { +#endif + +void __printout(const char *name, + const char *header, + const char *fmt, va_list ap); + +void __noreturn __early_panic(const char *fn, + const char *fmt, ...); + +void __noreturn ___panic(const char *fn, + const char *name, + const char *fmt, va_list ap); + +void __noreturn __panic(const char *fn, + const char *fmt, ...); + +void __warning(const char *name, + const char *fmt, va_list ap); + +void early_warning(const char *fmt, ...); + +void warning(const char *fmt, ...); + +void __notice(const char *name, + const char *fmt, va_list ap); + +void early_notice(const char *fmt, ...); + +void notice(const char *fmt, ...); + +void __boilerplate_init(void); + +const char *symerror(int errnum); + +void error_hook(struct error_frame *ef); + +int get_static_cpu_count(void); + +int get_online_cpu_set(cpu_set_t *cpuset); + +int get_realtime_cpu_set(cpu_set_t *cpuset); + +int get_current_cpu(void); + +pid_t get_thread_pid(void); + +char *lookup_command(const char *cmd); + +size_t get_mem_size(const char *arg); + +extern const char *config_strings[]; + +extern pthread_mutex_t __printlock; + +#ifdef __cplusplus +} +#endif + +#endif /* _BOILERPLATE_ANCILLARIES_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/atomic.h b/kernel/xenomai-v3.2.4/include/boilerplate/atomic.h new file mode 100644 index 0000000..4ee5f39 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/atomic.h @@ -0,0 +1,89 @@ +/** + * Copyright © 2011 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * Copyright © 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_ATOMIC_H +#define _BOILERPLATE_ATOMIC_H + +#include <xeno_config.h> + +typedef struct { int v; } atomic_t; + +typedef struct { long v; } atomic_long_t; + +#define ATOMIC_INIT(__n) { (__n) } + +static inline long atomic_long_read(const atomic_long_t *ptr) +{ + return ptr->v; +} + +static inline void atomic_long_set(atomic_long_t *ptr, long v) +{ + ptr->v = v; +} + +static inline int atomic_read(const atomic_t *ptr) +{ + return ptr->v; +} + +static inline void atomic_set(atomic_t *ptr, long v) +{ + ptr->v = v; +} + +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(__ptr, __old, __new) \ + __sync_val_compare_and_swap(&(__ptr)->v, __old, __new) +#endif + +#ifndef atomic_sub_fetch +#define atomic_sub_fetch(__ptr, __n) \ + __sync_sub_and_fetch(&(__ptr)->v, __n) +#endif + +#ifndef atomic_add_fetch +#define atomic_add_fetch(__ptr, __n) \ + __sync_add_and_fetch(&(__ptr)->v, __n) +#endif + +#ifdef CONFIG_SMP +#ifndef smp_mb +#define smp_mb() __sync_synchronize() +#endif +#ifndef smp_rmb +#define smp_rmb() smp_mb() +#endif +#ifndef smp_wmb +#define smp_wmb() smp_mb() +#endif +#else /* !CONFIG_SMP */ +#define smp_mb() do { } while (0) +#define smp_rmb() do { } while (0) +#define smp_wmb() do { } while (0) +#endif /* !CONFIG_SMP */ + +#define ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x)) + +#define compiler_barrier() __asm__ __volatile__("": : :"memory") + +#ifndef cpu_relax +#define cpu_relax() __sync_synchronize() +#endif + +#endif /* _BOILERPLATE_ATOMIC_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h b/kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h new file mode 100644 index 0000000..9c05762 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2015 Gilles Chanteperdrix + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#if (!defined(_BOILERPLATE_AVL_INNER_H) && !defined(AVL_PSHARED)) || \ + (!defined(_BOILERPLATE_AVL_SHARED_INNER_H) && defined(AVL_PSHARED)) /* Yeah, well... */ + +#if !defined(_BOILERPLATE_AVL_H) && !defined(_BOILERPLATE_SHAVL_H) +#error "Do not include this file directly. Use <boilerplate/avl.h> or <boilerplate/shavl.h> instead." +#endif + +#include <stddef.h> +#include <stdio.h> + +#ifdef AVL_PSHARED +#define __AVL(__decl) shavl_ ## __decl +#define __AVLH(__decl) shavlh_ ## __decl +#define __AVL_T(__type) sh ## __type +#define _BOILERPLATE_AVL_SHARED_INNER_H +#else +#define __AVL(__decl) avl_ ## __decl +#define __AVLH(__decl) avlh_ ## __decl +#define __AVL_T(__type) __type +#define _BOILERPLATE_AVL_INNER_H +#endif + +struct __AVL_T(avlh) { +#define AVLH_APP_BITS 28 + unsigned int flags: AVLH_APP_BITS; + int type: 2; + int balance: 2; + union { + ptrdiff_t offset; + struct __AVL_T(avlh) *ptr; + } link[3]; +}; + +struct __AVL_T(avl); + +/* + * Comparison function: should return -1 if left is less than right, 0 + * if they are equal and 1 if left is greather than right. You can use + * the avl_sign function which will convert a difference to -1, 0, + * 1. Beware of overflow however. You can also use avl_cmp_sign() + * which should not have any such problems. + */ +typedef int __AVL_T(avlh_cmp_t)(const struct __AVL_T(avlh) *const, + const struct __AVL_T(avlh) *const); + +typedef struct __AVL_T(avlh) * +__AVL_T(avl_search_t)(const struct __AVL_T(avl) *, + const struct __AVL_T(avlh) *, int *, int); + +typedef int __AVL_T(avlh_prn_t)(char *, size_t, + const struct __AVL_T(avlh) *const); + +struct __AVL_T(avl_searchops) { + __AVL_T(avl_search_t) *search; + __AVL_T(avlh_cmp_t) *cmp; +}; + +struct __AVL_T(avl) { + struct __AVL_T(avlh) anchor; + union { + ptrdiff_t offset; + struct __AVL_T(avlh) *ptr; + } end[3]; + unsigned int count; + unsigned int height; +}; + +#define AVL_LEFT -1 +#define AVL_UP 0 +#define AVL_RIGHT 1 +/* maps AVL_LEFT to AVL_RIGHT and reciprocally. */ +#define avl_opposite(type) (-(type)) +/* maps AVL_LEFT and AVL_RIGHT to arrays index (or bit positions). */ +#define avl_type2index(type) ((type)+1) + +#define AVL_THR_LEFT (1 << avl_type2index(AVL_LEFT)) +#define AVL_THR_RIGHT (1 << avl_type2index(AVL_RIGHT)) + +#ifdef AVL_PSHARED + +static inline struct shavlh * +shavlh_link(const struct shavl *const avl, + const struct shavlh *const holder, unsigned int dir) +{ + ptrdiff_t offset = holder->link[avl_type2index(dir)].offset; + return offset == (ptrdiff_t)-1 ? NULL : (void *)avl + offset; +} + +static inline void +shavlh_set_link(struct shavl *const avl, struct shavlh *lhs, + int dir, struct shavlh *rhs) +{ + ptrdiff_t offset = rhs ? (void *)rhs - (void *)avl : (ptrdiff_t)-1; + lhs->link[avl_type2index(dir)].offset = offset; +} + +static inline +struct shavlh *shavl_end(const struct shavl *const avl, int dir) +{ + ptrdiff_t offset = avl->end[avl_type2index(dir)].offset; + return offset == (ptrdiff_t)-1 ? NULL : (void *)avl + offset; +} + +static inline void +shavl_set_end(struct shavl *const avl, int dir, struct shavlh *holder) +{ + ptrdiff_t offset = holder ? (void *)holder - (void *)avl : (ptrdiff_t)-1; + avl->end[avl_type2index(dir)].offset = offset; +} + +#define shavl_count(avl) ((avl)->count) +#define shavl_height(avl) ((avl)->height) +#define shavl_anchor(avl) (&(avl)->anchor) + +#define shavlh_up(avl, holder) \ + shavlh_link((avl), (holder), AVL_UP) +#define shavlh_left(avl, holder) \ + shavlh_link((avl), (holder), AVL_LEFT) +#define shavlh_right(avl, holder) \ + shavlh_link((avl), (holder), AVL_RIGHT) + +#define shavlh_thr_tst(avl, holder, side) \ + (shavlh_link(avl, holder, side) == NULL) +#define shavlh_child(avl, holder, side) \ + (shavlh_link((avl),(holder),(side))) +#define shavlh_has_child(avl, holder, side) \ + (!shavlh_thr_tst(avl, holder, side)) + +#define shavl_top(avl) (shavlh_right(avl, shavl_anchor(avl))) +#define shavl_head(avl) (shavl_end((avl), AVL_LEFT)) +#define shavl_tail(avl) (shavl_end((avl), AVL_RIGHT)) + +/* + * Search a node in a pshared AVL, return its parent if it could not + * be found. + */ +#define DECLARE_SHAVL_SEARCH(__search_fn, __cmp) \ + struct shavlh *__search_fn(const struct shavl *const avl, \ + const struct shavlh *const node, \ + int *const pdelta, int dir) \ + { \ + int delta = AVL_RIGHT; \ + struct shavlh *holder = shavl_top(avl), *next; \ + \ + if (holder == NULL) \ + goto done; \ + \ + for (;;) { \ + delta = __cmp(node, holder); \ + /* \ + * Handle duplicates keys here, according to \ + * "dir", if dir is: \ + * - AVL_LEFT, the leftmost node is returned, \ + * - AVL_RIGHT, the rightmost node is returned, \ + * - 0, the first match is returned. \ + */ \ + if (!(delta ?: dir)) \ + break; \ + next = shavlh_child(avl, holder, delta ?: dir); \ + if (next == NULL) \ + break; \ + holder = next; \ + } \ + \ + done: \ + *pdelta = delta; \ + return holder; \ + } + +#else /* !AVL_PSHARED */ + +#define avlh_link(avl, holder, dir) ((holder)->link[avl_type2index(dir)].ptr) + +#define avl_end(avl, dir) ((avl)->end[avl_type2index(dir)].ptr) + +static inline void +avlh_set_link(struct avl *const avl, struct avlh *lhs, int dir, struct avlh *rhs) +{ + avlh_link(avl, lhs, dir) = rhs; +} + +static inline void +avl_set_end(struct avl *const avl, int dir, struct avlh *holder) +{ + avl_end(avl, dir) = holder; +} + +#define avl_count(avl) ((avl)->count) +#define avl_height(avl) ((avl)->height) +#define avl_anchor(avl) (&(avl)->anchor) + +#define avlh_up(avl, holder) avlh_link((avl), (holder), AVL_UP) +#define avlh_left(avl, holder) avlh_link((avl), (holder), AVL_LEFT) +#define avlh_right(avl, holder) avlh_link((avl), (holder), AVL_RIGHT) + +#define avlh_thr_tst(avl, holder, side) (avlh_link(avl, holder, side) == NULL) +#define avlh_child(avl, holder, side) (avlh_link((avl),(holder),(side))) +#define avlh_has_child(avl, holder, side) (!avlh_thr_tst(avl, holder, side)) + +#define avl_top(avl) (avlh_right(avl, avl_anchor(avl))) +#define avl_head(avl) (avl_end((avl), AVL_LEFT)) +#define avl_tail(avl) (avl_end((avl), AVL_RIGHT)) + +/* + * Search a node in a private AVL, return its parent if it could not + * be found. + */ +#define DECLARE_AVL_SEARCH(__search_fn, __cmp) \ + struct avlh *__search_fn(const struct avl *const avl, \ + const struct avlh *const node, \ + int *const pdelta, int dir) \ + { \ + int delta = AVL_RIGHT; \ + struct avlh *holder = avl_top(avl), *next; \ + \ + if (holder == NULL) \ + goto done; \ + \ + for (;;) { \ + delta = __cmp(node, holder); \ + /* \ + * Handle duplicates keys here, according to \ + * "dir", if dir is: \ + * - AVL_LEFT, the leftmost node is returned, \ + * - AVL_RIGHT, the rightmost node is returned, \ + * - 0, the first match is returned. \ + */ \ + if (!(delta ?: dir)) \ + break; \ + next = avlh_child(avl, holder, delta ?: dir); \ + if (next == NULL) \ + break; \ + holder = next; \ + } \ + \ + done: \ + *pdelta = delta; \ + return holder; \ + } + +#endif /* !AVL_PSHARED */ + +/* + * From "Bit twiddling hacks", returns v < 0 ? -1 : (v > 0 ? 1 : 0) + */ +#define avl_sign(v) \ + ({ \ + typeof(v) _v = (v); \ + ((_v) > 0) - ((_v) < 0); \ + }) + +/* + * Variation on the same theme. + */ +#define avl_cmp_sign(l, r) \ + ({ \ + typeof(l) _l = (l); \ + typeof(r) _r = (r); \ + (_l > _r) - (_l < _r); \ + }) + +static inline struct __AVL_T(avlh) * +__AVL(search_inner)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *n, int *delta, + const struct __AVL_T(avl_searchops) *ops) +{ + return ops->search(avl, n, delta, 0); +} + +static inline +struct __AVL_T(avlh) *__AVL(gettop)(const struct __AVL_T(avl) *const avl) +{ + return __AVL(top)(avl); +} + +static inline +struct __AVL_T(avlh) *__AVL(gethead)(const struct __AVL_T(avl) *const avl) +{ + return __AVL(head)(avl); +} + +static inline +struct __AVL_T(avlh) *__AVL(gettail)(const struct __AVL_T(avl) *const avl) +{ + return __AVL(tail)(avl); +} + +static inline +unsigned int __AVL(getcount)(const struct __AVL_T(avl) *const avl) +{ + return __AVL(count)(avl); +} + +struct __AVL_T(avlh) *__AVL(inorder)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *holder, + const int dir); + +struct __AVL_T(avlh) *__AVL(postorder)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder, + const int dir); + +struct __AVL_T(avlh) *__AVL(preorder)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *holder, + const int dir); + +static inline struct __AVL_T(avlh) * +__AVL(next)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder) +{ + return __AVL(inorder)(avl, holder, AVL_RIGHT); +} + +static inline struct __AVL_T(avlh) * +__AVL(prev)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder) +{ + return __AVL(inorder)(avl, holder, AVL_LEFT); +} + +static inline struct __AVL_T(avlh) * +__AVL(postorder_next)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder) +{ + return __AVL(postorder)(avl, holder, AVL_RIGHT); +} + +static inline struct __AVL_T(avlh) * +__AVL(postorder_prev)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder) +{ + return __AVL(postorder)(avl, holder, AVL_LEFT); +} + +static inline struct __AVL_T(avlh) * +__AVL(preorder_next)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder) +{ + return __AVL(preorder)(avl, holder, AVL_RIGHT); +} + +static inline struct __AVL_T(avlh) * +__AVL(preorder_prev)(const struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder) +{ + return __AVL(preorder)(avl, holder, AVL_LEFT); +} + +static inline void __AVLH(init)(struct __AVL_T(avlh) *const holder) +{ + holder->balance = 0; + holder->type = 0; +} + +static inline struct __AVL_T(avlh) * +__AVL(search)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, + const struct __AVL_T(avl_searchops) *ops) +{ + struct __AVL_T(avlh) *holder; + int delta; + + holder = __AVL(search_inner)(avl, node, &delta, ops); + if (!delta) + return holder; + + return NULL; +} + +static inline struct __AVL_T(avlh) * +__AVL(search_nearest)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, int dir, + const struct __AVL_T(avl_searchops) *ops) +{ + struct __AVL_T(avlh) *holder; + int delta; + + holder = __AVL(search_inner)(avl, node, &delta, ops); + if (!holder || delta != dir) + return holder; + + return __AVL(inorder)(avl, holder, dir); +} + +static inline struct __AVL_T(avlh) * +__AVL(search_le)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, + const struct __AVL_T(avl_searchops) *ops) +{ + return __AVL(search_nearest)(avl, node, AVL_LEFT, ops); +} + +static inline struct __AVL_T(avlh) * +__AVL(search_ge)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, + const struct __AVL_T(avl_searchops) *ops) +{ + return __AVL(search_nearest)(avl, node, AVL_RIGHT, ops); +} + +static inline struct __AVL_T(avlh) * +__AVL(search_multi)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, int dir, + const struct __AVL_T(avl_searchops) *ops) +{ + struct __AVL_T(avlh) *holder; + int delta; + + holder = ops->search(avl, node, &delta, dir); + if (!delta) + return holder; + + if (!holder) + return NULL; + + return __AVL(inorder)(avl, holder, -dir); +} + +static inline struct __AVL_T(avlh) * +__AVL(search_first)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, + const struct __AVL_T(avl_searchops) *ops) +{ + return __AVL(search_multi)(avl, node, AVL_LEFT, ops); +} + +static inline struct __AVL_T(avlh) * +__AVL(search_last)(const struct __AVL_T(avl) *const avl, + const struct __AVL_T(avlh) *node, + const struct __AVL_T(avl_searchops) *ops) +{ + return __AVL(search_multi)(avl, node, AVL_RIGHT, ops); +} + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +void __AVL(init)(struct __AVL_T(avl) *const avl); + +void __AVL(destroy)(struct __AVL_T(avl) *const avl); + +int __AVL(insert)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder, + const struct __AVL_T(avl_searchops) *ops); + +int __AVL(insert_front)(struct __AVL_T(avl) *avl, + struct __AVL_T(avlh) *holder, + const struct __AVL_T(avl_searchops) *ops); + +int __AVL(insert_back)(struct __AVL_T(avl) *avl, + struct __AVL_T(avlh) *holder, + const struct __AVL_T(avl_searchops) *ops); + +int __AVL(insert_at)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *parent, int dir, + struct __AVL_T(avlh) *child); + +int __AVL(prepend)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder, + const struct __AVL_T(avl_searchops) *ops); + +int __AVL(append)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder, + const struct __AVL_T(avl_searchops) *ops); + +int __AVL(delete)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *node); + +int __AVL(replace)(struct __AVL_T(avl) *avl, + struct __AVL_T(avlh) *oldh, + struct __AVL_T(avlh) *newh, + const struct __AVL_T(avl_searchops) *ops); + +struct __AVL_T(avlh) *__AVL(update)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder, + const struct __AVL_T(avl_searchops) *ops); + +struct __AVL_T(avlh) *__AVL(set)(struct __AVL_T(avl) *const avl, + struct __AVL_T(avlh) *const holder, + const struct __AVL_T(avl_searchops) *ops); + +void __AVL(clear)(struct __AVL_T(avl) *const avl, + void (*destruct)(struct __AVL_T(avlh) *)); + +int __AVL(check)(const struct __AVL_T(avl) *avl, + const struct __AVL_T(avl_searchops) *ops); + +void __AVL(dump)(FILE *file, const struct __AVL_T(avl) *const avl, + __AVL_T(avlh_prn_t) *prn, unsigned int indent, + unsigned int len); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#undef __AVL +#undef __AVLH +#undef __AVL_T + +#endif /* !_BOILERPLATE_AVL_INNER_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/avl.h b/kernel/xenomai-v3.2.4/include/boilerplate/avl.h new file mode 100644 index 0000000..57d8379 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/avl.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018 Philippe Gerum + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _BOILERPLATE_AVL_H +#define _BOILERPLATE_AVL_H + +#include <boilerplate/avl-inner.h> + +#endif /* !_BOILERPLATE_AVL_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/compiler.h b/kernel/xenomai-v3.2.4/include/boilerplate/compiler.h new file mode 100644 index 0000000..263af6b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/compiler.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_COMPILER_H +#define _BOILERPLATE_COMPILER_H + +#include <stddef.h> + +#define container_of(ptr, type, member) \ + ({ \ + const __typeof__(((type *)0)->member) *__mptr = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); \ + }) + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#ifndef __noreturn +#define __noreturn __attribute__((__noreturn__)) +#endif + +#ifndef __must_check +#define __must_check __attribute__((__warn_unused_result__)) +#endif + +#ifndef __weak +#define __weak __attribute__((__weak__)) +#endif + +#ifndef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) +#endif + +#ifndef __aligned +#define __aligned(__n) __attribute__((aligned (__n))) +#endif + +#ifndef __deprecated +#define __deprecated __attribute__((__deprecated__)) +#endif + +#ifndef __packed +#define __packed __attribute__((__packed__)) +#endif + +#ifndef __alloc_size +#define __alloc_size(__args) __attribute__((__alloc_size__(__args))) +#endif + +#define __align_to(__size, __al) (((__size) + (__al) - 1) & (~((__al) - 1))) + +#ifdef __cplusplus +extern "C" { +#endif + +#define xenomai_count_trailing_zeros(x) \ + ((x) == 0 ? (int)(sizeof(x) * __CHAR_BIT__) \ + : sizeof(x) <= sizeof(unsigned int) ? \ + __builtin_ctz((unsigned int)x) \ + : sizeof(x) <= sizeof(unsigned long) ? \ + __builtin_ctzl((unsigned long)x) \ + : __builtin_ctzll(x)) + +#define xenomai_count_leading_zeros(x) \ + ((x) == 0 ? (int)(sizeof(x) * __CHAR_BIT__) \ + : sizeof(x) <= sizeof(unsigned int) ? \ + __builtin_clz((unsigned int)x) + \ + (int)(sizeof(unsigned int) - sizeof(x)) \ + : sizeof(x) <= sizeof(unsigned long) ? \ + __builtin_clzl((unsigned long)x) \ + : __builtin_clzll(x)) + +#ifdef __cplusplus +} +#endif + +#endif /* _BOILERPLATE_COMPILER_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/debug.h b/kernel/xenomai-v3.2.4/include/boilerplate/debug.h new file mode 100644 index 0000000..248cb90 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/debug.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_DEBUG_H +#define _BOILERPLATE_DEBUG_H + +#include <stdint.h> +#include <stddef.h> +#include <xeno_config.h> + +#ifdef CONFIG_XENO_DEBUG + +#include <pthread.h> +#include <boilerplate/compiler.h> + +static inline int must_check(void) +{ + return 1; +} + +struct error_frame { + int retval; + int lineno; + const char *fn; + const char *file; + struct error_frame *next; +}; + +struct backtrace_data { + const char *name; + struct error_frame *inner; + pthread_mutex_t lock; + char eundef[16]; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +void backtrace_init_context(struct backtrace_data *btd, + const char *name); + +void backtrace_destroy_context(struct backtrace_data *btd); + +void backtrace_dump(struct backtrace_data *btd); + +void backtrace_log(int retval, const char *fn, + const char *file, int lineno); + +void backtrace_check(void); + +void __debug(const char *name, const char *fmt, ...); + +char *__get_error_buf(size_t *sizep); + +void debug_init(void); + +#ifdef __cplusplus +} +#endif + +#define __bt(__exp) \ + ({ \ + typeof(__exp) __ret = (__exp); \ + if (__ret < 0) \ + backtrace_log((int)__ret, __FUNCTION__, \ + __FILE__, __LINE__); \ + __ret; \ + }) + +#define __bterrno(__exp) \ + ({ \ + typeof(__exp) __ret = (__exp); \ + if (__ret < 0) \ + backtrace_log(-errno, __FUNCTION__, \ + __FILE__, __LINE__); \ + __ret; \ + }) + +#else /* !CONFIG_XENO_DEBUG */ + +static inline int must_check(void) +{ + return 0; +} + +struct backtrace_data { +}; + +#define __bt(__exp) (__exp) + +#define __bterrno(__exp) (__exp) + +#define backtrace_init_context(btd, name) \ + do { (void)(btd); (void)(name); } while (0) + +#define backtrace_destroy_context(btd) \ + do { (void)(btd); } while (0) + +#define backtrace_dump(btd) \ + do { (void)(btd); } while (0) + +#define backtrace_check() \ + do { } while (0) +/* + * XXX: We have no thread-private backtrace context in non-debug mode, + * so there is a potential race if multiple threads want to write to + * this buffer. This looks acceptable though, since this is primarily + * a debug feature, and the race won't damage the system anyway. + */ +#define __get_error_buf(sizep) \ + ({ \ + static char __buf[16]; \ + *(sizep) = sizeof(__buf); \ + __buf; \ + }) + +#define debug_init() do { } while (0) + +#endif /* !CONFIG_XENO_DEBUG */ + +static inline int bad_pointer(const void *ptr) +{ + return ptr == NULL || + ((intptr_t)ptr & (intptr_t)(sizeof(intptr_t)-1)) != 0; +} + +#endif /* _BOILERPLATE_DEBUG_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/hash.h b/kernel/xenomai-v3.2.4/include/boilerplate/hash.h new file mode 100644 index 0000000..ef62e13 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/hash.h @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _BOILERPLATE_HASH_H +#define _BOILERPLATE_HASH_H + +#include <pthread.h> +#include <boilerplate/list.h> + +#define HASHSLOTS (1<<8) + +struct hashobj { + dref_type(const void *) key; +#ifdef CONFIG_XENO_PSHARED + char static_key[16]; +#endif + size_t len; + struct holder link; +}; + +struct hash_bucket { + struct listobj obj_list; +}; + +struct hash_table { + struct hash_bucket table[HASHSLOTS]; + pthread_mutex_t lock; +}; + +struct hash_operations { + int (*compare)(const void *l, + const void *r, + size_t len); +#ifdef CONFIG_XENO_PSHARED + int (*probe)(struct hashobj *oldobj); + void *(*alloc)(size_t len); + void (*free)(void *key); +#endif +}; + +typedef int (*hash_walk_op)(struct hash_table *t, + struct hashobj *obj, + void *arg); + +#ifdef CONFIG_XENO_PSHARED + +/* Private version - h-table is not shareable between processes. */ + +struct pvhashobj { + const void *key; + size_t len; + struct pvholder link; +}; + +struct pvhash_bucket { + struct pvlistobj obj_list; +}; + +struct pvhash_table { + struct pvhash_bucket table[HASHSLOTS]; + pthread_mutex_t lock; +}; + +struct pvhash_operations { + int (*compare)(const void *l, + const void *r, + size_t len); +}; + +typedef int (*pvhash_walk_op)(struct pvhash_table *t, + struct pvhashobj *obj, + void *arg); + +#else /* !CONFIG_XENO_PSHARED */ +#define pvhashobj hashobj +#define pvhash_bucket hash_bucket +#define pvhash_table hash_table +#define pvhash_walk_op hash_walk_op +#endif /* !CONFIG_XENO_PSHARED */ + +#ifdef __cplusplus +extern "C" { +#endif + +unsigned int __hash_key(const void *key, + size_t length, unsigned int c); + +void __hash_init(void *heap, struct hash_table *t); + +int __hash_enter(struct hash_table *t, + const void *key, size_t len, + struct hashobj *newobj, + const struct hash_operations *hops, + int nodup); + +static inline void hash_init(struct hash_table *t) +{ + __hash_init(__main_heap, t); +} + +void hash_destroy(struct hash_table *t); + +static inline int hash_enter(struct hash_table *t, + const void *key, size_t len, + struct hashobj *newobj, + const struct hash_operations *hops) +{ + return __hash_enter(t, key, len, newobj, hops, 1); +} + +static inline int hash_enter_dup(struct hash_table *t, + const void *key, size_t len, + struct hashobj *newobj, + const struct hash_operations *hops) +{ + return __hash_enter(t, key, len, newobj, hops, 0); +} + +int hash_remove(struct hash_table *t, struct hashobj *delobj, + const struct hash_operations *hops); + +struct hashobj *hash_search(struct hash_table *t, + const void *key, size_t len, + const struct hash_operations *hops); + +int hash_walk(struct hash_table *t, + hash_walk_op walk, void *arg); + +#ifdef CONFIG_XENO_PSHARED + +int __hash_enter_probe(struct hash_table *t, + const void *key, size_t len, + struct hashobj *newobj, + const struct hash_operations *hops, + int nodup); + +int __pvhash_enter(struct pvhash_table *t, + const void *key, size_t len, + struct pvhashobj *newobj, + const struct pvhash_operations *hops, + int nodup); + +static inline +int hash_enter_probe(struct hash_table *t, + const void *key, size_t len, + struct hashobj *newobj, + const struct hash_operations *hops) +{ + return __hash_enter_probe(t, key, len, newobj, hops, 1); +} + +static inline +int hash_enter_probe_dup(struct hash_table *t, + const void *key, size_t len, + struct hashobj *newobj, + const struct hash_operations *hops) +{ + return __hash_enter_probe(t, key, len, newobj, hops, 0); +} + +struct hashobj *hash_search_probe(struct hash_table *t, + const void *key, size_t len, + const struct hash_operations *hops); + +void pvhash_init(struct pvhash_table *t); + +static inline +int pvhash_enter(struct pvhash_table *t, + const void *key, size_t len, + struct pvhashobj *newobj, + const struct pvhash_operations *hops) +{ + return __pvhash_enter(t, key, len, newobj, hops, 1); +} + +static inline +int pvhash_enter_dup(struct pvhash_table *t, + const void *key, size_t len, + struct pvhashobj *newobj, + const struct pvhash_operations *hops) +{ + return __pvhash_enter(t, key, len, newobj, hops, 0); +} + +int pvhash_remove(struct pvhash_table *t, struct pvhashobj *delobj, + const struct pvhash_operations *hops); + +struct pvhashobj *pvhash_search(struct pvhash_table *t, + const void *key, size_t len, + const struct pvhash_operations *hops); + +int pvhash_walk(struct pvhash_table *t, + pvhash_walk_op walk, void *arg); + +#else /* !CONFIG_XENO_PSHARED */ +#define pvhash_init hash_init +#define pvhash_enter hash_enter +#define pvhash_enter_dup hash_enter_dup +#define pvhash_remove hash_remove +#define pvhash_search hash_search +#define pvhash_walk hash_walk +#define pvhash_operations hash_operations +#endif /* !CONFIG_XENO_PSHARED */ + +#ifdef __cplusplus +} +#endif + +#endif /* _BOILERPLATE_HASH_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h b/kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h new file mode 100644 index 0000000..0ddd1ce --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_HEAPMEM_H +#define _BOILERPLATE_HEAPMEM_H + +#include <sys/types.h> +#include <stdint.h> +#include <limits.h> +#include <boilerplate/list.h> +#include <boilerplate/lock.h> +#include <boilerplate/avl.h> + +#define HEAPMEM_PAGE_SHIFT 9 /* 2^9 => 512 bytes */ +#define HEAPMEM_PAGE_SIZE (1UL << HEAPMEM_PAGE_SHIFT) +#define HEAPMEM_PAGE_MASK (~(HEAPMEM_PAGE_SIZE - 1)) +#define HEAPMEM_MIN_LOG2 4 /* 16 bytes */ +/* + * Use bucketed memory for sizes between 2^HEAPMEM_MIN_LOG2 and + * 2^(HEAPMEM_PAGE_SHIFT-1). + */ +#define HEAPMEM_MAX (HEAPMEM_PAGE_SHIFT - HEAPMEM_MIN_LOG2) +#define HEAPMEM_MIN_ALIGN (1U << HEAPMEM_MIN_LOG2) +/* Max size of an extent (4Gb - HEAPMEM_PAGE_SIZE). */ +#define HEAPMEM_MAX_EXTSZ (4294967295U - HEAPMEM_PAGE_SIZE + 1) +/* Bits we need for encoding a page # */ +#define HEAPMEM_PGENT_BITS (32 - HEAPMEM_PAGE_SHIFT) + +/* Each page is represented by a page map entry. */ +#define HEAPMEM_PGMAP_BYTES sizeof(struct heapmem_pgentry) + +struct heapmem_pgentry { + /* Linkage in bucket list. */ + unsigned int prev : HEAPMEM_PGENT_BITS; + unsigned int next : HEAPMEM_PGENT_BITS; + /* page_list or log2. */ + unsigned int type : 6; + /* + * We hold either a spatial map of busy blocks within the page + * for bucketed memory (up to 32 blocks per page), or the + * overall size of the multi-page block if entry.type == + * page_list. + */ + union { + uint32_t map; + uint32_t bsize; + }; +}; + +/* + * A range descriptor is stored at the beginning of the first page of + * a range of free pages. heapmem_range.size is nrpages * + * HEAPMEM_PAGE_SIZE. Ranges are indexed by address and size in AVL + * trees. + */ +struct heapmem_range { + struct avlh addr_node; + struct avlh size_node; + size_t size; +}; + +struct heapmem_extent { + struct pvholder next; + void *membase; /* Base of page array */ + void *memlim; /* Limit of page array */ + struct avl addr_tree; + struct avl size_tree; + struct heapmem_pgentry pagemap[0]; /* Start of page entries[] */ +}; + +struct heap_memory { + pthread_mutex_t lock; + struct pvlistobj extents; + size_t arena_size; + size_t usable_size; + size_t used_size; + /* Heads of page lists for log2-sized blocks. */ + uint32_t buckets[HEAPMEM_MAX]; +}; + +#define __HEAPMEM_MAP_SIZE(__nrpages) \ + ((__nrpages) * HEAPMEM_PGMAP_BYTES) + +#define __HEAPMEM_ARENA_SIZE(__size) \ + (__size + \ + __align_to(sizeof(struct heapmem_extent) + \ + __HEAPMEM_MAP_SIZE((__size) >> HEAPMEM_PAGE_SHIFT), \ + HEAPMEM_MIN_ALIGN)) + +/* + * Calculate the minimal size of the memory arena needed to contain a + * heap of __user_size bytes, including our meta data for managing it. + * Usable at build time if __user_size is constant. + */ +#define HEAPMEM_ARENA_SIZE(__user_size) \ + __HEAPMEM_ARENA_SIZE(__align_to(__user_size, HEAPMEM_PAGE_SIZE)) + +#ifdef __cplusplus +extern "C" { +#endif + +int heapmem_init(struct heap_memory *heap, + void *mem, size_t size); + +int heapmem_extend(struct heap_memory *heap, + void *mem, size_t size); + +void heapmem_destroy(struct heap_memory *heap); + +void *heapmem_alloc(struct heap_memory *heap, + size_t size) __alloc_size(2); + +int heapmem_free(struct heap_memory *heap, + void *block); + +static inline +size_t heapmem_arena_size(const struct heap_memory *heap) +{ + return heap->arena_size; +} + +static inline +size_t heapmem_usable_size(const struct heap_memory *heap) +{ + return heap->usable_size; +} + +static inline +size_t heapmem_used_size(const struct heap_memory *heap) +{ + return heap->used_size; +} + +ssize_t heapmem_check(struct heap_memory *heap, + void *block); + +#ifdef __cplusplus +} +#endif + +#endif /* _BOILERPLATE_HEAPMEM_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/libc.h b/kernel/xenomai-v3.2.4/include/boilerplate/libc.h new file mode 100644 index 0000000..44ddad5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/libc.h @@ -0,0 +1,296 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_LIBC_H +#define _BOILERPLATE_LIBC_H + +#include <limits.h> + +#ifdef __IN_XENO__ +/* + * Quirks for dealing with outdated libc* issues. This header will be + * parsed by the Xenomai implementation only, applications based on it + * have to provide their own set of wrappers as they should decide by + * themselves what to do when a feature is missing. + */ +#include <xeno_config.h> +#include <errno.h> +#include <boilerplate/compiler.h> + +#if !HAVE_DECL_PTHREAD_PRIO_NONE +enum { + PTHREAD_PRIO_NONE, + PTHREAD_PRIO_INHERIT, + PTHREAD_PRIO_PROTECT +}; +#endif /* !HAVE_DECL_PTHREAD_PRIO_NONE */ + +#ifndef HAVE_FORK +static inline int fork(void) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_PTHREAD_ATFORK +#ifndef HAVE_FORK +static inline +int pthread_atfork(void (*prepare)(void), void (*parent)(void), + void (*child)(void)) +{ + return 0; +} +#else +#error "fork() without pthread_atfork()" +#endif +#endif /* !HAVE_PTHREAD_ATFORK */ + +#ifndef HAVE_PTHREAD_GETATTR_NP +static inline +int pthread_getattr_np(pthread_t th, pthread_attr_t *attr) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_GETATTR_NP */ + +#ifndef HAVE_PTHREAD_CONDATTR_SETCLOCK +static inline +int pthread_condattr_setclock(pthread_condattr_t *__restrict__ attr, + clockid_t clock_id) +{ + return clock_id == CLOCK_REALTIME ? 0 : ENOSYS; +} +#endif /* !HAVE_PTHREAD_CONDATTR_SETCLOCK */ + +#ifndef HAVE_PTHREAD_CONDATTR_GETCLOCK +static inline +int pthread_condattr_getclock(const pthread_condattr_t *__restrict__ attr, + clockid_t *__restrict__ clock_id) +{ + *clock_id = CLOCK_REALTIME; + + return 0; +} +#endif /* !HAVE_PTHREAD_CONDATTR_GETCLOCK */ + +#ifndef HAVE_PTHREAD_MUTEXATTR_SETPROTOCOL +static inline +int pthread_mutexattr_setprotocol(pthread_mutexattr_t *__restrict__ attr, + int protocol) +{ + return protocol == PTHREAD_PRIO_NONE ? 0 : ENOSYS; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_SETPROTOCOL */ + +#ifndef HAVE_PTHREAD_MUTEXATTR_GETPROTOCOL +static inline +int pthread_mutexattr_getprotocol(const pthread_mutexattr_t * + __restrict__ attr, int *__restrict__ protocol) +{ + *protocol = PTHREAD_PRIO_NONE; + + return 0; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_GETPROTOCOL */ + +#ifndef HAVE_PTHREAD_MUTEXATTR_SETPRIOCEILING +static inline +int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, + int prioceiling) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_SETPRIOCEILING */ + +#ifndef HAVE_PTHREAD_MUTEXATTR_GETPRIOCEILING +static inline +int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t * + __restrict attr, + int *__restrict prioceiling) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_GETPRIOCEILING */ + +#ifndef HAVE_PTHREAD_MUTEX_SETPRIOCEILING +static inline +int pthread_mutex_setprioceiling(pthread_mutex_t *__restrict attr, + int prioceiling, + int *__restrict old_ceiling) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_SETPRIOCEILING */ + +#ifndef HAVE_PTHREAD_MUTEX_GETPRIOCEILING +static inline +int pthread_mutex_getprioceiling(pthread_mutex_t *__restrict attr, + int *__restrict prioceiling) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_GETPRIOCEILING */ + +#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP +#include <sched.h> +static inline +int pthread_attr_setaffinity_np(pthread_attr_t *attr, + size_t cpusetsize, const cpu_set_t *cpuset) +{ + if (CPU_ISSET(0, cpuset) && CPU_COUNT(cpuset) == 1) + return 0; + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_ATTR_SETAFFINITY_NP */ + +#ifndef HAVE_PTHREAD_SETAFFINITY_NP +static inline +int pthread_setaffinity_np(pthread_t thread, size_t cpusetsize, + const cpu_set_t *cpuset) +{ + if (CPU_ISSET(0, cpuset) && CPU_COUNT(cpuset) == 1) + return 0; + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_SETAFFINITY_NP */ + +#ifndef HAVE_PTHREAD_SETSCHEDPRIO + +static inline +int pthread_setschedprio(pthread_t thread, int prio) +{ + struct sched_param param; + int policy, ret; + + ret = pthread_getschedparam(thread, &policy, ¶m); + if (ret) + return ret; + + param.sched_priority = prio; + + return pthread_setschedparam(thread, policy, ¶m); +} + +#endif /* !HAVE_PTHREAD_SETSCHEDPRIO */ + +#if !defined(HAVE_CLOCK_NANOSLEEP) && defined(CONFIG_XENO_MERCURY) +/* + * Best effort for a Mercury setup based on an outdated libc lacking + * "advanced" real-time support. Too bad if the system clock is set + * during sleep time. This is a non-issue for Cobalt, as the libcobalt + * implementation will always be picked instead. + */ +__weak int clock_nanosleep(clockid_t clock_id, int flags, + const struct timespec *request, + struct timespec *remain) +{ + struct timespec now, tmp; + + tmp = *request; + if (flags) { + clock_gettime(CLOCK_REALTIME, &now); + tmp.tv_sec -= now.tv_sec; + tmp.tv_nsec -= now.tv_nsec; + if (tmp.tv_nsec < 0) { + tmp.tv_sec--; + tmp.tv_nsec += 1000000000; + } + } + + return nanosleep(&tmp, remain); +} +#endif /* !HAVE_CLOCK_NANOSLEEP && MERCURY */ + +#ifndef HAVE_SCHED_GETCPU +/* + * Might be declared in uClibc headers but not actually implemented, + * so we make the placeholder a weak symbol. + */ +__weak int sched_getcpu(void) +{ + return 0; /* outdated uClibc: assume uniprocessor. */ +} +#endif /* !HAVE_SCHED_GETCPU */ + +#ifndef HAVE_SHM_OPEN +__weak int shm_open(const char *name, int oflag, mode_t mode) +{ + errno = ENOSYS; + return -1; +} +#endif /* !HAVE_SHM_OPEN */ + +#ifndef HAVE_SHM_UNLINK +__weak int shm_unlink(const char *name) +{ + errno = ENOSYS; + return -1; +} +#endif /* !HAVE_SHM_UNLINK */ + +#ifndef HAVE_PTHREAD_MUTEXATTR_SETROBUST +#ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP +#define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np +#else +static inline +int pthread_mutexattr_setrobust(pthread_mutexattr_t *attr, + int robustness) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP */ +#endif /* !HAVE_PTHREAD_MUTEXATTR_SETROBUST */ + +#if !defined(HAVE_PTHREAD_SETNAME_NP) && defined(CONFIG_XENO_MERCURY) +static inline +int pthread_setname_np(pthread_t thread, const char *name) +{ + return ENOSYS; +} +#endif /* !HAVE_PTHREAD_SETNAME_NP && MERCURY */ + +#endif /* __IN_XENO__ */ + +#if defined(__COBALT_WRAP__) || defined(__IN_XENO__) +/* + * clock_nanosleep() and pthread_setname_np() must be declared when the libc + * does not declare them, both for compiling xenomai, and for compiling + * applications wrapping these symbols to the libcobalt versions. + */ +#ifndef HAVE_CLOCK_NANOSLEEP +int clock_nanosleep(clockid_t clock_id, int flags, + const struct timespec *request, + struct timespec *remain); +#endif /* !HAVE_CLOCK_NANOSLEEP */ + +#ifndef HAVE_PTHREAD_SETNAME_NP +int pthread_setname_np(pthread_t thread, const char *name); +#endif /* !HAVE_PTHREAD_SETNAME_NP */ +#endif /* __COBALT_WRAP__ || __IN_XENO__ */ + +#ifndef PTHREAD_STACK_DEFAULT +#define PTHREAD_STACK_DEFAULT \ + ({ \ + int __ret = PTHREAD_STACK_MIN; \ + if (__ret < 65536) \ + __ret = 65536; \ + __ret; \ + }) +#endif /* !PTHREAD_STACK_DEFAULT */ + +#endif /* _BOILERPLATE_LIBC_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/list.h b/kernel/xenomai-v3.2.4/include/boilerplate/list.h new file mode 100644 index 0000000..97fbc12 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/list.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_LIST_H +#define _BOILERPLATE_LIST_H + +#include <assert.h> +#include <boilerplate/scope.h> +#include <boilerplate/compiler.h> +#include <boilerplate/shared-list.h> +#include <boilerplate/private-list.h> + +/* + * WARNING: ALL list services are assumed to be free from any POSIX + * cancellation points by callers, allowing the *_nocancel() locking + * forms to be used (see boilerplate/lock.h). + * + * Please think of this when adding any debug instrumentation invoking + * printf() and the like. + */ + +#endif /* !_BOILERPLATE_LIST_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/lock.h b/kernel/xenomai-v3.2.4/include/boilerplate/lock.h new file mode 100644 index 0000000..df3469d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/lock.h @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _BOILERPLATE_LOCK_H +#define _BOILERPLATE_LOCK_H + +#include <pthread.h> +#include <boilerplate/wrappers.h> +#include <boilerplate/debug.h> + +/* + * CANCEL_DEFER/RESTORE() should enclose any emulator code prior to + * holding a lock, or invoking inner boilerplate/copperplate services + * (which usually do so), to change the system state. A proper cleanup + * handler should be pushed prior to acquire such lock. + * + * Those macros ensure that cancellation type is switched to deferred + * mode while the section is traversed, then restored to its original + * value upon exit. + * + * WARNING: inner services MAY ASSUME that cancellability is deferred + * for the caller, so you really want to define protected sections as + * required in the higher interface layers. + */ +struct service { + int cancel_type; +}; + +#ifdef CONFIG_XENO_ASYNC_CANCEL + +#define CANCEL_DEFER(__s) \ + do { \ + pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, \ + &(__s).cancel_type); \ + } while (0) + +#define CANCEL_RESTORE(__s) \ + do { \ + pthread_setcanceltype((__s).cancel_type, NULL); \ + backtrace_check(); \ + } while (0) + +#else /* !CONFIG_XENO_ASYNC_CANCEL */ + +#define CANCEL_DEFER(__s) do { (void)(__s); } while (0) + +#define CANCEL_RESTORE(__s) do { } while (0) + +#endif /* !CONFIG_XENO_ASYNC_CANCEL */ + +struct cleanup_block { + pthread_mutex_t *lock; + void (*handler)(void *arg); + void *arg; +}; + +#define __push_cleanup_args(__cb, __lock, __fn, __arg) \ + ((__cb)->lock = (__lock)), \ + ((__cb)->handler = (void (*)(void *))(__fn)), \ + ((__cb)->arg = (__arg)) + +#define push_cleanup_handler(__cb, __lock, __fn, __arg) \ + pthread_cleanup_push((void (*)(void *))__run_cleanup_block, \ + (__push_cleanup_args(__cb, __lock, __fn, __arg), (__cb))) + +#define pop_cleanup_handler(__cb) \ + pthread_cleanup_pop(0) + +#define push_cleanup_lock(__lock) \ + pthread_cleanup_push((void (*)(void *))__RT(pthread_mutex_unlock), (__lock)) + +#define pop_cleanup_lock(__lock) \ + pthread_cleanup_pop(0) + +#ifdef CONFIG_XENO_DEBUG +int __check_cancel_type(const char *locktype); +#else +#define __check_cancel_type(__locktype) \ + ({ (void)__locktype; 0; }) +#endif + +#define __do_lock(__lock, __op) \ + ({ \ + int __ret; \ + __ret = -__RT(pthread_mutex_##__op(__lock)); \ + __ret; \ + }) + +#define __do_lock_nocancel(__lock, __type, __op) \ + ({ \ + __bt(__check_cancel_type(#__op "_nocancel")); \ + __do_lock(__lock, __op); \ + }) + +#define __do_unlock(__lock) \ + ({ \ + int __ret; \ + __ret = -__RT(pthread_mutex_unlock(__lock)); \ + __ret; \ + }) +/* + * Macros to enter/leave critical sections within inner + * routines. Actually, they are mainly aimed at self-documenting the + * code, by specifying basic assumption(s) about the code being + * traversed. In effect, they are currently aliases to the standard + * pthread_mutex_* API, except for the _safe form. + * + * The _nocancel suffix indicates that no cancellation point is + * traversed by the protected code, therefore we don't need any + * cleanup handler since we are guaranteed to run in deferred cancel + * mode after CANCEL_DEFER(). A runtime check is inserted in + * debug mode, which triggers when cancellability is not in deferred + * mode while an attempt is made to acquire a _nocancel lock. + * + * read/write_lock() forms must be enclosed within the scope of a + * cleanup handler since the protected code may reach cancellation + * points. push_cleanup_lock() is a simple shorthand to push + * pthread_mutex_unlock as the cleanup handler. + */ +#define read_lock(__lock) \ + __do_lock(__lock, lock) + +#define read_trylock(__lock) \ + __do_lock(__lock, trylock) + +#define read_lock_nocancel(__lock) \ + __do_lock_nocancel(__lock, read_lock, lock) + +#define read_trylock_nocancel(__lock) \ + __do_lock_nocancel(__lock, read_trylock, trylock) + +#define read_unlock(__lock) \ + __do_unlock(__lock) + +#define write_lock(__lock) \ + __do_lock(__lock, lock) + +#define write_trylock(__lock) \ + __do_lock(__lock, trylock) + +#define write_lock_nocancel(__lock) \ + __do_lock_nocancel(__lock, write_lock, lock) + +#define write_trylock_nocancel(__lock) \ + __do_lock_nocancel(__lock, write_trylock, trylock) + +#define write_unlock(__lock) \ + __do_unlock(__lock) + +#define __do_lock_safe(__lock, __state, __op) \ + ({ \ + int __ret, __oldstate; \ + __bt(__check_cancel_type(#__op "_safe")); \ + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__oldstate); \ + __ret = -__RT(pthread_mutex_##__op(__lock)); \ + if (__ret) \ + pthread_setcancelstate(__oldstate, NULL); \ + __state = __oldstate; \ + __ret; \ + }) + +#define __do_unlock_safe(__lock, __state) \ + ({ \ + int __ret, __restored_state = __state; \ + __ret = -__RT(pthread_mutex_unlock(__lock)); \ + pthread_setcancelstate(__restored_state, NULL); \ + __ret; \ + }) + +/* + * The _safe call form is available when undoing the changes from an + * update section upon cancellation using a cleanup handler is not an + * option (e.g. too complex), or in situations where the protected + * code shall fully run; in such cases, cancellation is disabled + * throughout the section. + */ + +#define write_lock_safe(__lock, __state) \ + __do_lock_safe(__lock, __state, lock) + +#define write_trylock_safe(__lock, __state) \ + __do_lock_safe(__lock, __state, trylock) + +#define write_unlock_safe(__lock, __state) \ + __do_unlock_safe(__lock, __state) + +#define read_lock_safe(__lock, __state) \ + __do_lock_safe(__lock, __state, lock) + +#define read_unlock_safe(__lock, __state) \ + __do_unlock_safe(__lock, __state) + +#ifdef CONFIG_XENO_DEBUG +#define mutex_type_attribute PTHREAD_MUTEX_ERRORCHECK +#else +#define mutex_type_attribute PTHREAD_MUTEX_NORMAL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void __run_cleanup_block(struct cleanup_block *cb); + +#ifdef __cplusplus +} +#endif + +#endif /* _BOILERPLATE_LOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/namegen.h b/kernel/xenomai-v3.2.4/include/boilerplate/namegen.h new file mode 100644 index 0000000..2395869 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/namegen.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_NAMEGEN_H +#define _BOILERPLATE_NAMEGEN_H + +#include <boilerplate/atomic.h> + +struct name_generator { + const char *radix; + int length; + atomic_t serial; +}; + +#define DEFINE_NAME_GENERATOR(__name, __radix, __type, __member) \ + struct name_generator __name = { \ + .radix = __radix, \ + .length = sizeof ((__type *)0)->__member, \ + .serial = ATOMIC_INIT(0), \ + } + +#ifdef __cplusplus +extern "C" { +#endif + +char *generate_name(char *buf, const char *radix, + struct name_generator *ngen); + +#ifdef __cplusplus +} +#endif + +#endif /* _BOILERPLATE_NAMEGEN_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/obstack.h b/kernel/xenomai-v3.2.4/include/boilerplate/obstack.h new file mode 100644 index 0000000..95eb792 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/obstack.h @@ -0,0 +1,515 @@ +/* obstack.h - object stack macros + Copyright (C) 1988-1994,1996-1999,2003,2004,2005 + Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* Summary: + +All the apparent functions defined here are macros. The idea +is that you would use these pre-tested macros to solve a +very specific set of problems, and they would run fast. +Caution: no side-effects in arguments please!! They may be +evaluated MANY times!! + +These macros operate a stack of objects. Each object starts life +small, and may grow to maturity. (Consider building a word syllable +by syllable.) An object can move while it is growing. Once it has +been "finished" it never changes address again. So the "top of the +stack" is typically an immature growing object, while the rest of the +stack is of mature, fixed size and fixed address objects. + +These routines grab large chunks of memory, using a function you +supply, called `obstack_chunk_alloc'. On occasion, they free chunks, +by calling `obstack_chunk_free'. You must define them and declare +them before using any obstack macros. + +Each independent stack is represented by a `struct obstack'. +Each of the obstack macros expects a pointer to such a structure +as the first argument. + +One motivation for this package is the problem of growing char strings +in symbol tables. Unless you are "fascist pig with a read-only mind" +--Gosper's immortal quote from HAKMEM item 154, out of context--you +would not like to put any arbitrary upper limit on the length of your +symbols. + +In practice this often means you will build many short symbols and a +few long symbols. At the time you are reading a symbol you don't know +how long it is. One traditional method is to read a symbol into a +buffer, realloc()ating the buffer every time you try to read a symbol +that is longer than the buffer. This is beaut, but you still will +want to copy the symbol from the buffer to a more permanent +symbol-table entry say about half the time. + +With obstacks, you can work differently. Use one obstack for all symbol +names. As you read a symbol, grow the name in the obstack gradually. +When the name is complete, finalize it. Then, if the symbol exists already, +free the newly read name. + +The way we do this is to take a large chunk, allocating memory from +low addresses. When you want to build a symbol in the chunk you just +add chars above the current "high water mark" in the chunk. When you +have finished adding chars, because you got to the end of the symbol, +you know how long the chars are, and you can create a new object. +Mostly the chars will not burst over the highest address of the chunk, +because you would typically expect a chunk to be (say) 100 times as +long as an average object. + +In case that isn't clear, when we have enough chars to make up +the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed) +so we just point to it where it lies. No moving of chars is +needed and this is the second win: potentially long strings need +never be explicitly shuffled. Once an object is formed, it does not +change its address during its lifetime. + +When the chars burst over a chunk boundary, we allocate a larger +chunk, and then copy the partly formed object from the end of the old +chunk to the beginning of the new larger chunk. We then carry on +accreting characters to the end of the object as we normally would. + +A special macro is provided to add a single char at a time to a +growing object. This allows the use of register variables, which +break the ordinary 'growth' macro. + +Summary: + We allocate large chunks. + We carve out one object at a time from the current chunk. + Once carved, an object never moves. + We are free to append data of any size to the currently + growing object. + Exactly one object is growing in an obstack at any one time. + You can run one obstack per control block. + You may have as many control blocks as you dare. + Because of the way we do it, you can `unwind' an obstack + back to a previous state. (You may remove objects much + as you would with a stack.) +*/ + + +/* Don't do the contents of this file more than once. */ + +#ifndef _BOILERPLATE_OBSTACK_H +#define _BOILERPLATE_OBSTACK_H 1 + +#ifdef HAVE_OBSTACK_H +#include_next <obstack.h> +#else + +#ifdef __cplusplus +extern "C" { +#endif + +/* We need the type of a pointer subtraction. If __PTRDIFF_TYPE__ is + defined, as with GNU C, use that; that way we don't pollute the + namespace with <stddef.h>'s symbols. Otherwise, include <stddef.h> + and use ptrdiff_t. */ + +#ifdef __PTRDIFF_TYPE__ +# define PTR_INT_TYPE __PTRDIFF_TYPE__ +#else +# include <stddef.h> +# define PTR_INT_TYPE ptrdiff_t +#endif + +/* If B is the base of an object addressed by P, return the result of + aligning P to the next multiple of A + 1. B and P must be of type + char *. A + 1 must be a power of 2. */ + +#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A))) + +/* Similiar to _BPTR_ALIGN (B, P, A), except optimize the common case + where pointers can be converted to integers, aligned as integers, + and converted back again. If PTR_INT_TYPE is narrower than a + pointer (e.g., the AS/400), play it safe and compute the alignment + relative to B. Otherwise, use the faster strategy of computing the + alignment relative to 0. */ + +#define __PTR_ALIGN(B, P, A) \ + __BPTR_ALIGN (sizeof (PTR_INT_TYPE) < sizeof (void *) ? (B) : (char *) 0, \ + P, A) + +#include <string.h> + +struct _obstack_chunk /* Lives at front of each chunk. */ +{ + char *limit; /* 1 past end of this chunk */ + struct _obstack_chunk *prev; /* address of prior chunk or NULL */ + char contents[4]; /* objects begin here */ +}; + +struct obstack /* control current object in current chunk */ +{ + long chunk_size; /* preferred size to allocate chunks in */ + struct _obstack_chunk *chunk; /* address of current struct obstack_chunk */ + char *object_base; /* address of object we are building */ + char *next_free; /* where to add next char to current object */ + char *chunk_limit; /* address of char after current chunk */ + union + { + PTR_INT_TYPE tempint; + void *tempptr; + } temp; /* Temporary for some macros. */ + int alignment_mask; /* Mask of alignment for each object. */ + /* These prototypes vary based on `use_extra_arg', and we use + casts to the prototypeless function type in all assignments, + but having prototypes here quiets -Wstrict-prototypes. */ + struct _obstack_chunk *(*chunkfun) (void *, long); + void (*freefun) (void *, struct _obstack_chunk *); + void *extra_arg; /* first arg for chunk alloc/dealloc funcs */ + unsigned use_extra_arg:1; /* chunk alloc/dealloc funcs take extra arg */ + unsigned maybe_empty_object:1;/* There is a possibility that the current + chunk contains a zero-length object. This + prevents freeing the chunk if we allocate + a bigger chunk to replace it. */ + unsigned alloc_failed:1; /* No longer used, as we now call the failed + handler on error, but retained for binary + compatibility. */ +}; + +/* Declare the external functions we use; they are in obstack.c. */ + +extern void _obstack_newchunk (struct obstack *, int); +extern int _obstack_begin (struct obstack *, int, int, + void *(*) (long), void (*) (void *)); +extern int _obstack_begin_1 (struct obstack *, int, int, + void *(*) (void *, long), + void (*) (void *, void *), void *); +extern int _obstack_memory_used (struct obstack *); + +void obstack_free (struct obstack *obstack, void *block); + + +/* Error handler called when `obstack_chunk_alloc' failed to allocate + more memory. This can be set to a user defined function which + should either abort gracefully or use longjump - but shouldn't + return. The default action is to print a message and abort. */ +extern void (*obstack_alloc_failed_handler) (void); + +/* Exit value used when `print_and_abort' is used. */ +extern int obstack_exit_failure; + +/* Pointer to beginning of object being allocated or to be allocated next. + Note that this might not be the final address of the object + because a new chunk might be needed to hold the final size. */ + +#define obstack_base(h) ((void *) (h)->object_base) + +/* Size for allocating ordinary chunks. */ + +#define obstack_chunk_size(h) ((h)->chunk_size) + +/* Pointer to next byte not yet allocated in current chunk. */ + +#define obstack_next_free(h) ((h)->next_free) + +/* Mask specifying low bits that should be clear in address of an object. */ + +#define obstack_alignment_mask(h) ((h)->alignment_mask) + +/* To prevent prototype warnings provide complete argument list. */ +#define obstack_init(h) \ + _obstack_begin ((h), 0, 0, \ + (void *(*) (long)) obstack_chunk_alloc, \ + (void (*) (void *)) obstack_chunk_free) + +#define obstack_begin(h, size) \ + _obstack_begin ((h), (size), 0, \ + (void *(*) (long)) obstack_chunk_alloc, \ + (void (*) (void *)) obstack_chunk_free) + +#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \ + _obstack_begin ((h), (size), (alignment), \ + (void *(*) (long)) (chunkfun), \ + (void (*) (void *)) (freefun)) + +#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \ + _obstack_begin_1 ((h), (size), (alignment), \ + (void *(*) (void *, long)) (chunkfun), \ + (void (*) (void *, void *)) (freefun), (arg)) + +#define obstack_chunkfun(h, newchunkfun) \ + ((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun)) + +#define obstack_freefun(h, newfreefun) \ + ((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun)) + +#define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar)) + +#define obstack_blank_fast(h,n) ((h)->next_free += (n)) + +#define obstack_memory_used(h) _obstack_memory_used (h) + +#if defined __GNUC__ && defined __STDC__ && __STDC__ +/* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and + does not implement __extension__. But that compiler doesn't define + __GNUC_MINOR__. */ +# if __GNUC__ < 2 || (__NeXT__ && !__GNUC_MINOR__) +# define __extension__ +# endif + +/* For GNU C, if not -traditional, + we can define these macros to compute all args only once + without using a global variable. + Also, we can avoid using the `temp' slot, to make faster code. */ + +# define obstack_object_size(OBSTACK) \ + __extension__ \ + ({ struct obstack const *__o = (OBSTACK); \ + (unsigned) (__o->next_free - __o->object_base); }) + +# define obstack_room(OBSTACK) \ + __extension__ \ + ({ struct obstack const *__o = (OBSTACK); \ + (unsigned) (__o->chunk_limit - __o->next_free); }) + +# define obstack_make_room(OBSTACK,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->chunk_limit - __o->next_free < __len) \ + _obstack_newchunk (__o, __len); \ + (void) 0; }) + +# define obstack_empty_p(OBSTACK) \ + __extension__ \ + ({ struct obstack const *__o = (OBSTACK); \ + (__o->chunk->prev == 0 \ + && __o->next_free == __PTR_ALIGN ((char *) __o->chunk, \ + __o->chunk->contents, \ + __o->alignment_mask)); }) + +# define obstack_grow(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->next_free + __len > __o->chunk_limit) \ + _obstack_newchunk (__o, __len); \ + memcpy (__o->next_free, where, __len); \ + __o->next_free += __len; \ + (void) 0; }) + +# define obstack_grow0(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->next_free + __len + 1 > __o->chunk_limit) \ + _obstack_newchunk (__o, __len + 1); \ + memcpy (__o->next_free, where, __len); \ + __o->next_free += __len; \ + *(__o->next_free)++ = 0; \ + (void) 0; }) + +# define obstack_1grow(OBSTACK,datum) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + if (__o->next_free + 1 > __o->chunk_limit) \ + _obstack_newchunk (__o, 1); \ + obstack_1grow_fast (__o, datum); \ + (void) 0; }) + +/* These assume that the obstack alignment is good enough for pointers + or ints, and that the data added so far to the current object + shares that much alignment. */ + +# define obstack_ptr_grow(OBSTACK,datum) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + if (__o->next_free + sizeof (void *) > __o->chunk_limit) \ + _obstack_newchunk (__o, sizeof (void *)); \ + obstack_ptr_grow_fast (__o, datum); }) \ + +# define obstack_int_grow(OBSTACK,datum) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + if (__o->next_free + sizeof (int) > __o->chunk_limit) \ + _obstack_newchunk (__o, sizeof (int)); \ + obstack_int_grow_fast (__o, datum); }) + +# define obstack_ptr_grow_fast(OBSTACK,aptr) \ +__extension__ \ +({ struct obstack *__o1 = (OBSTACK); \ + *(const void **) __o1->next_free = (aptr); \ + __o1->next_free += sizeof (const void *); \ + (void) 0; }) + +# define obstack_int_grow_fast(OBSTACK,aint) \ +__extension__ \ +({ struct obstack *__o1 = (OBSTACK); \ + *(int *) __o1->next_free = (aint); \ + __o1->next_free += sizeof (int); \ + (void) 0; }) + +# define obstack_blank(OBSTACK,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->chunk_limit - __o->next_free < __len) \ + _obstack_newchunk (__o, __len); \ + obstack_blank_fast (__o, __len); \ + (void) 0; }) + +# define obstack_alloc(OBSTACK,length) \ +__extension__ \ +({ struct obstack *__h = (OBSTACK); \ + obstack_blank (__h, (length)); \ + obstack_finish (__h); }) + +# define obstack_copy(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__h = (OBSTACK); \ + obstack_grow (__h, (where), (length)); \ + obstack_finish (__h); }) + +# define obstack_copy0(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__h = (OBSTACK); \ + obstack_grow0 (__h, (where), (length)); \ + obstack_finish (__h); }) + +/* The local variable is named __o1 to avoid a name conflict + when obstack_blank is called. */ +# define obstack_finish(OBSTACK) \ +__extension__ \ +({ struct obstack *__o1 = (OBSTACK); \ + void *__value = (void *) __o1->object_base; \ + if (__o1->next_free == __value) \ + __o1->maybe_empty_object = 1; \ + __o1->next_free \ + = __PTR_ALIGN (__o1->object_base, __o1->next_free, \ + __o1->alignment_mask); \ + if (__o1->next_free - (char *)__o1->chunk \ + > __o1->chunk_limit - (char *)__o1->chunk) \ + __o1->next_free = __o1->chunk_limit; \ + __o1->object_base = __o1->next_free; \ + __value; }) + +# define obstack_free(OBSTACK, OBJ) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + void *__obj = (OBJ); \ + if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit) \ + __o->next_free = __o->object_base = (char *)__obj; \ + else (obstack_free) (__o, __obj); }) + +#else /* not __GNUC__ or not __STDC__ */ + +# define obstack_object_size(h) \ + (unsigned) ((h)->next_free - (h)->object_base) + +# define obstack_room(h) \ + (unsigned) ((h)->chunk_limit - (h)->next_free) + +# define obstack_empty_p(h) \ + ((h)->chunk->prev == 0 \ + && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk, \ + (h)->chunk->contents, \ + (h)->alignment_mask)) + +/* Note that the call to _obstack_newchunk is enclosed in (..., 0) + so that we can avoid having void expressions + in the arms of the conditional expression. + Casting the third operand to void was tried before, + but some compilers won't accept it. */ + +# define obstack_make_room(h,length) \ +( (h)->temp.tempint = (length), \ + (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0)) + +# define obstack_grow(h,where,length) \ +( (h)->temp.tempint = (length), \ + (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0), \ + memcpy ((h)->next_free, where, (h)->temp.tempint), \ + (h)->next_free += (h)->temp.tempint) + +# define obstack_grow0(h,where,length) \ +( (h)->temp.tempint = (length), \ + (((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0), \ + memcpy ((h)->next_free, where, (h)->temp.tempint), \ + (h)->next_free += (h)->temp.tempint, \ + *((h)->next_free)++ = 0) + +# define obstack_1grow(h,datum) \ +( (((h)->next_free + 1 > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), 1), 0) : 0), \ + obstack_1grow_fast (h, datum)) + +# define obstack_ptr_grow(h,datum) \ +( (((h)->next_free + sizeof (char *) > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0), \ + obstack_ptr_grow_fast (h, datum)) + +# define obstack_int_grow(h,datum) \ +( (((h)->next_free + sizeof (int) > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0), \ + obstack_int_grow_fast (h, datum)) + +# define obstack_ptr_grow_fast(h,aptr) \ + (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr)) + +# define obstack_int_grow_fast(h,aint) \ + (((int *) ((h)->next_free += sizeof (int)))[-1] = (aint)) + +# define obstack_blank(h,length) \ +( (h)->temp.tempint = (length), \ + (((h)->chunk_limit - (h)->next_free < (h)->temp.tempint) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0), \ + obstack_blank_fast (h, (h)->temp.tempint)) + +# define obstack_alloc(h,length) \ + (obstack_blank ((h), (length)), obstack_finish ((h))) + +# define obstack_copy(h,where,length) \ + (obstack_grow ((h), (where), (length)), obstack_finish ((h))) + +# define obstack_copy0(h,where,length) \ + (obstack_grow0 ((h), (where), (length)), obstack_finish ((h))) + +# define obstack_finish(h) \ +( ((h)->next_free == (h)->object_base \ + ? (((h)->maybe_empty_object = 1), 0) \ + : 0), \ + (h)->temp.tempptr = (h)->object_base, \ + (h)->next_free \ + = __PTR_ALIGN ((h)->object_base, (h)->next_free, \ + (h)->alignment_mask), \ + (((h)->next_free - (char *) (h)->chunk \ + > (h)->chunk_limit - (char *) (h)->chunk) \ + ? ((h)->next_free = (h)->chunk_limit) : 0), \ + (h)->object_base = (h)->next_free, \ + (h)->temp.tempptr) + +# define obstack_free(h,obj) \ +( (h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk, \ + ((((h)->temp.tempint > 0 \ + && (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk)) \ + ? (int) ((h)->next_free = (h)->object_base \ + = (h)->temp.tempint + (char *) (h)->chunk) \ + : (((obstack_free) ((h), (h)->temp.tempint + (char *) (h)->chunk), 0), 0))) + +#endif /* not __GNUC__ or not __STDC__ */ + +#ifdef __cplusplus +} /* C++ */ +#endif + +#endif /* !HAVE_OBSTACK_H */ + +#endif /* _BOILERPLATE_OBSTACK_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/private-list.h b/kernel/xenomai-v3.2.4/include/boilerplate/private-list.h new file mode 100644 index 0000000..72f1e4c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/private-list.h @@ -0,0 +1,217 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _BOILERPLATE_PRIVATE_LIST_H +#define _BOILERPLATE_PRIVATE_LIST_H + +#ifndef _BOILERPLATE_LIST_H +#error "Do not include this file directly. Use <boilerplate/list.h> instead." +#endif + +struct pvholder { + struct pvholder *next; + struct pvholder *prev; +}; + +struct pvlistobj { + struct pvholder head; +}; + +#define PRIVATE_LIST_INITIALIZER(__name) \ + { .head = { .next = &((__name).head), .prev = &((__name).head) } } + +#define DEFINE_PRIVATE_LIST(__name) \ + struct pvlistobj __name = PRIVATE_LIST_INITIALIZER(__name) + +static inline void initpvh(struct pvholder *holder) +{ + holder->next = holder; + holder->prev = holder; +} + +static inline void atpvh(struct pvholder *head, struct pvholder *holder) +{ + /* Inserts the new element right after the heading one. */ + holder->prev = head; + holder->next = head->next; + holder->next->prev = holder; + head->next = holder; +} + +static inline void dtpvh(struct pvholder *holder) +{ + holder->prev->next = holder->next; + holder->next->prev = holder->prev; +} + +static inline void pvlist_init(struct pvlistobj *list) +{ + initpvh(&list->head); +} + +static inline void pvholder_init(struct pvholder *holder) +{ + initpvh(holder); +} + +/* + * XXX: pvholder_init() is mandatory if you later want to use this + * predicate. + */ +static inline int pvholder_linked(const struct pvholder *holder) +{ + return !(holder->prev == holder->next && + holder->prev == holder); +} + +static inline void pvlist_prepend(struct pvholder *holder, struct pvlistobj *list) +{ + atpvh(&list->head, holder); +} + +static inline void pvlist_append(struct pvholder *holder, struct pvlistobj *list) +{ + atpvh(list->head.prev, holder); +} + +static inline void pvlist_insert(struct pvholder *next, struct pvholder *prev) +{ + atpvh(prev, next); +} + +static inline void pvlist_join(struct pvlistobj *lsrc, struct pvlistobj *ldst) +{ + struct pvholder *headsrc = lsrc->head.next; + struct pvholder *tailsrc = lsrc->head.prev; + struct pvholder *headdst = &ldst->head; + + headsrc->prev->next = tailsrc->next; + tailsrc->next->prev = headsrc->prev; + headsrc->prev = headdst; + tailsrc->next = headdst->next; + headdst->next->prev = tailsrc; + headdst->next = headsrc; +} + +static inline void pvlist_remove(struct pvholder *holder) +{ + dtpvh(holder); +} + +static inline void pvlist_remove_init(struct pvholder *holder) +{ + dtpvh(holder); + initpvh(holder); +} + +static inline int pvlist_empty(const struct pvlistobj *list) +{ + return list->head.next == &list->head; +} + +static inline struct pvholder *pvlist_pop(struct pvlistobj *list) +{ + struct pvholder *holder = list->head.next; + pvlist_remove(holder); + return holder; +} + +static inline int pvlist_heading_p(const struct pvholder *holder, + const struct pvlistobj *list) +{ + return list->head.next == holder; +} + +#define pvlist_entry(ptr, type, member) \ + container_of(ptr, type, member) + +#define pvlist_first_entry(list, type, member) \ + pvlist_entry((list)->head.next, type, member) + +#define pvlist_last_entry(list, type, member) \ + pvlist_entry((list)->head.prev, type, member) + +#define pvlist_prev_entry(pos, list, member) \ + ({ \ + typeof(*pos) *__prev = NULL; \ + if ((list)->head.next != &(pos)->member) \ + __prev = pvlist_entry((pos)->member.prev, \ + typeof(*pos), member); \ + __prev; \ + }) + +#define pvlist_next_entry(pos, list, member) \ + ({ \ + typeof(*pos) *__next = NULL; \ + if ((list)->head.prev != &(pos)->member) \ + __next = pvlist_entry((pos)->member.next, \ + typeof(*pos), member); \ + __next; \ + }) + +#define pvlist_pop_entry(list, type, member) ({ \ + struct pvholder *__holder = pvlist_pop(list); \ + pvlist_entry(__holder, type, member); }) + +#define pvlist_for_each(pos, list) \ + for (pos = (list)->head.next; \ + pos != &(list)->head; pos = (pos)->next) + +#define pvlist_for_each_reverse(pos, list) \ + for (pos = (list)->head.prev; \ + pos != &(list)->head; pos = (pos)->prev) + +#define pvlist_for_each_safe(pos, tmp, list) \ + for (pos = (list)->head.next, \ + tmp = (pos)->next; \ + pos != &(list)->head; \ + pos = tmp, tmp = (pos)->next) + +#define pvlist_for_each_entry(pos, list, member) \ + for (pos = pvlist_entry((list)->head.next, \ + typeof(*pos), member); \ + &(pos)->member != &(list)->head; \ + pos = pvlist_entry((pos)->member.next, \ + typeof(*pos), member)) + +#define pvlist_for_each_entry_safe(pos, tmp, list, member) \ + for (pos = pvlist_entry((list)->head.next, \ + typeof(*pos), member), \ + tmp = pvlist_entry((pos)->member.next, \ + typeof(*pos), member); \ + &(pos)->member != &(list)->head; \ + pos = tmp, tmp = pvlist_entry((pos)->member.next, \ + typeof(*pos), member)) + +#define pvlist_for_each_entry_reverse(pos, list, member) \ + for (pos = pvlist_entry((list)->head.prev, \ + typeof(*pos), member); \ + &pos->member != &(list)->head; \ + pos = pvlist_entry(pos->member.prev, \ + typeof(*pos), member)) + +#define pvlist_for_each_entry_reverse_safe(pos, tmp, list, member) \ + for (pos = pvlist_entry((list)->head.prev, \ + typeof(*pos), member), \ + tmp = pvlist_entry((pos)->member.prev, \ + typeof(*pos), member); \ + &(pos)->member != &(list)->head; \ + pos = tmp, tmp = pvlist_entry((pos)->member.prev, \ + typeof(*pos), member)) + +#endif /* !_BOILERPLATE_PRIVATE_LIST_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/scope.h b/kernel/xenomai-v3.2.4/include/boilerplate/scope.h new file mode 100644 index 0000000..ded6a2c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/scope.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_SCOPE_H +#define _BOILERPLATE_SCOPE_H + +#include <sys/types.h> +#include <stddef.h> +#include <stdint.h> +#include <xeno_config.h> + +typedef uintptr_t memoff_t; + +#ifdef CONFIG_XENO_PSHARED + +extern void *__main_heap; + +int pshared_check(void *heap, void *addr); + +#define dref_type(t) memoff_t + +#define __memoff(__base, __addr) ((memoff_t)((caddr_t)(__addr) - (caddr_t)(__base))) +#define __memptr(__base, __off) ((void *)((caddr_t)(__base) + (__off))) +#define __memchk(__base, __addr) pshared_check(__base, __addr) + +#define __moff(__p) __memoff(__main_heap, __p) +#define __moff_nullable(__p) (__p ? __memoff(__main_heap, __p) : 0) +#define __mptr(__off) __memptr(__main_heap, __off) +#define __mptr_nullable(__off) (__off ? __memptr(__main_heap, __off) : NULL) +#define __mchk(__p) __memchk(__main_heap, __p) + +#define mutex_scope_attribute PTHREAD_PROCESS_SHARED +#define sem_scope_attribute 1 +#ifdef CONFIG_XENO_COBALT +#define monitor_scope_attribute COBALT_MONITOR_SHARED +#define event_scope_attribute COBALT_EVENT_SHARED +#endif + +#else /* !CONFIG_XENO_PSHARED */ + +#define __main_heap NULL + +#define dref_type(t) __typeof__(t) + +#define __memoff(__base, __addr) (__addr) +#define __memptr(__base, __off) (__off) +#define __memchk(__base, __addr) 1 + +#define __moff(__p) (__p) +#define __moff_nullable(__p) (__p) +#define __mptr(__off) (__off) +#define __mptr_nullable(__off) (__off) +#define __mchk(__p) 1 + +#define mutex_scope_attribute PTHREAD_PROCESS_PRIVATE +#define sem_scope_attribute 0 +#ifdef CONFIG_XENO_COBALT +#define monitor_scope_attribute 0 +#define event_scope_attribute 0 +#endif + +#endif /* !CONFIG_XENO_PSHARED */ + +#endif /* _BOILERPLATE_SCOPE_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/setup.h b/kernel/xenomai-v3.2.4/include/boilerplate/setup.h new file mode 100644 index 0000000..7df3cfe --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/setup.h @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_SETUP_H +#define _BOILERPLATE_SETUP_H + +#include <boilerplate/list.h> +#include <boilerplate/wrappers.h> +#include <string.h> +#include <sched.h> + +struct base_setup_data { + cpu_set_t cpu_affinity; + int no_mlock; + int no_sanity; + int verbosity_level; + int trace_level; + const char *arg0; +}; + +struct option; + +struct setup_descriptor { + const char *name; + int (*tune)(void); + int (*parse_option)(int optnum, const char *optarg); + void (*help)(void); + int (*init)(void); + const struct option *options; + struct { + int id; + int opt_start; + int opt_end; + struct pvholder next; + int done; + } __reserved; +}; + +/* + * We have three pre-defined constructor priorities: + * + * - One for setup calls (__setup_ctor), which are guaranteed to run + * prior to the bootstrap code. You should use setup calls for + * implementing initialization hooks which depend on a particular call + * order. Each Xenomai interface layer is initialized via a dedicated + * setup call. + * + * - A second priority is assigned to early init calls (__early_ctor), + * which are also guaranteed to run prior to the bootstrap + * code. Whether such early code runs before OR after any setup code + * is __UNSPECIFIED__. By design, such code may not invoke any Xenomai + * service, and generally speaking, should have no dependencies on + * anything else. + * + * - The last priority level is used for the bootstrap code + * (__bootstrap_ctor), which is guaranteed to run after any + * setup/early code, provided such bootstrap code is part of the main + * executable. + * + * The guarantees on the init order don't go beyond what is stated + * here, do NOT assume more than this. + */ +#define __setup_ctor __attribute__ ((constructor(200))) +#define __early_ctor __attribute__ ((constructor(210))) +#define __bootstrap_ctor __attribute__ ((constructor(220))) + +#define __setup_call(__name, __id) \ +static __setup_ctor void __declare_ ## __name(void) \ +{ \ + __register_setup_call(&(__name), __id); \ +} + +#define core_setup_call(__name) __setup_call(__name, 0) +#define boilerplate_setup_call(__name) __setup_call(__name, 1) +#define copperplate_setup_call(__name) __setup_call(__name, 2) +#define interface_setup_call(__name) __setup_call(__name, 3) +#define post_setup_call(__name) __setup_call(__name, 4) +#define user_setup_call(__name) __setup_call(__name, 5) + +#ifdef __cplusplus +extern "C" { +#endif + +void __register_setup_call(struct setup_descriptor *p, int id); + +extern pid_t __node_id; + +extern int __config_done; + +extern struct base_setup_data __base_setup_data; + +const char *get_program_name(void); + +void __trace_me(const char *fmt, ...); + +#define trace_me(__fmt, __args...) \ + do { \ + if (__base_setup_data.trace_level > 0) \ + __trace_me(__fmt, ##__args); \ + } while (0) + +#ifdef __cplusplus +} +#endif + +#endif /* !_BOILERPLATE_SETUP_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h b/kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h new file mode 100644 index 0000000..741dfc0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h @@ -0,0 +1,346 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_SHARED_LIST_H +#define _BOILERPLATE_SHARED_LIST_H + +#ifndef _BOILERPLATE_LIST_H +#error "Do not include this file directly. Use <boilerplate/list.h> instead." +#endif + +#define __hoff(h, a) __memoff(h, a) +#define __hptr(h, v) ((struct holder *)__memptr(h, v)) +#define __hchk(h, a) __memchk(h, a) + +struct holder { + dref_type(struct holder *) next; + dref_type(struct holder *) prev; +}; + +struct listobj { + struct holder head; +}; + +static inline void __inith_nocheck(void *heap, struct holder *holder) +{ + holder->next = __hoff(heap, holder); + holder->prev = __hoff(heap, holder); +} + +static inline void __inith(void *heap, struct holder *holder) +{ + assert(__hchk(heap, holder)); + __inith_nocheck(heap, holder); +} + +static inline void inith(struct holder *holder) +{ + __inith(__main_heap, holder); +} + +static inline void __ath(void *heap, struct holder *head, + struct holder *holder) +{ + /* Inserts the new element right after the heading one. */ + holder->prev = __hoff(heap, head); + holder->next = head->next; + __hptr(heap, holder->next)->prev = __hoff(heap, holder); + head->next = __hoff(heap, holder); +} + +static inline void ath(struct holder *head, struct holder *holder) +{ + __ath(__main_heap, head, holder); +} + +static inline void __dth(void *heap, struct holder *holder) +{ + __hptr(heap, holder->prev)->next = holder->next; + __hptr(heap, holder->next)->prev = holder->prev; +} + +static inline void dth(struct holder *holder) +{ + __dth(__main_heap, holder); +} + +static inline void __list_init(void *heap, struct listobj *list) +{ + __inith(heap, &list->head); +} + +static inline void __list_init_nocheck(void *heap, struct listobj *list) +{ + __inith_nocheck(heap, &list->head); +} + +static inline void list_init(struct listobj *list) +{ + __list_init(__main_heap, list); +} + +static inline void __holder_init(void *heap, struct holder *holder) +{ + __inith(heap, holder); +} + +static inline void __holder_init_nocheck(void *heap, struct holder *holder) +{ + __inith_nocheck(heap, holder); +} + +static inline void holder_init(struct holder *holder) +{ + inith(holder); +} + +static inline int __holder_linked(void *heap, const struct holder *holder) +{ + return !(holder->prev == holder->next && + holder->prev == __hoff(heap, holder)); +} + +/* + * XXX: holder_init() is mandatory if you later want to use this + * predicate. + */ +static inline int holder_linked(const struct holder *holder) +{ + return __holder_linked(__main_heap, holder); +} + +static inline void __list_prepend(void *heap, struct holder *holder, + struct listobj *list) +{ + __ath(heap, &list->head, holder); +} + +static inline void list_prepend(struct holder *holder, struct listobj *list) +{ + __list_prepend(__main_heap, holder, list); +} + +static inline void __list_append(void *heap, struct holder *holder, + struct listobj *list) +{ + __ath(heap, __hptr(heap, list->head.prev), holder); +} + +static inline void list_append(struct holder *holder, struct listobj *list) +{ + __list_append(__main_heap, holder, list); +} + +static inline void __list_insert(void *heap, struct holder *next, struct holder *prev) +{ + __ath(heap, prev, next); +} + +static inline void list_insert(struct holder *next, struct holder *prev) +{ + __list_insert(__main_heap, next, prev); +} + +static inline void __list_join(void *heap, struct listobj *lsrc, + struct listobj *ldst) +{ + struct holder *headsrc = __hptr(heap, lsrc->head.next); + struct holder *tailsrc = __hptr(heap, lsrc->head.prev); + struct holder *headdst = &ldst->head; + + __hptr(heap, headsrc->prev)->next = tailsrc->next; + __hptr(heap, tailsrc->next)->prev = headsrc->prev; + headsrc->prev = __hoff(heap, headdst); + tailsrc->next = headdst->next; + __hptr(heap, headdst->next)->prev = __hoff(heap, tailsrc); + headdst->next = __hoff(heap, headsrc); +} + +static inline void list_join(struct listobj *lsrc, struct listobj *ldst) +{ + __list_join(__main_heap, lsrc, ldst); +} + +static inline void __list_remove(void *heap, struct holder *holder) +{ + __dth(heap, holder); +} + +static inline void list_remove(struct holder *holder) +{ + __list_remove(__main_heap, holder); +} + +static inline void __list_remove_init(void *heap, struct holder *holder) +{ + __dth(heap, holder); + __inith(heap, holder); +} + +static inline void list_remove_init(struct holder *holder) +{ + __list_remove_init(__main_heap, holder); +} + +static inline int __list_empty(void *heap, const struct listobj *list) +{ + return list->head.next == __hoff(heap, &list->head); +} + +static inline int list_empty(const struct listobj *list) +{ + return __list_empty(__main_heap, list); +} + +static inline struct holder *__list_pop(void *heap, struct listobj *list) +{ + struct holder *holder = __hptr(heap, list->head.next); + __list_remove(heap, holder); + return holder; +} + +static inline struct holder *list_pop(struct listobj *list) +{ + return __list_pop(__main_heap, list); +} + +static inline int __list_heading_p(void *heap, const struct holder *holder, + const struct listobj *list) +{ + return list->head.next == __hoff(heap, holder); +} + +static inline int list_heading_p(const struct holder *holder, + const struct listobj *list) +{ + return __list_heading_p(__main_heap, holder, list); +} + +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +#define __list_first_entry(heap, list, type, member) \ + list_entry(__hptr((heap), (list)->head.next), type, member) + +#define list_first_entry(list, type, member) \ + __list_first_entry(__main_heap, list, type, member) + +#define __list_last_entry(heap, list, type, member) \ + list_entry(__hptr((heap), (list)->head.prev), type, member) + +#define list_last_entry(list, type, member) \ + __list_last_entry(__main_heap, list, type, member) + +#define __list_prev_entry(heap, pos, list, member) \ + ({ \ + typeof(*pos) *__prev = NULL; \ + if ((list)->head.next != __hoff(heap, &(pos)->member)) \ + __prev = list_entry(__hptr((heap), \ + (pos)->member.prev), typeof(*pos), member); \ + __prev; \ + }) + +#define list_prev_entry(pos, list, member) \ + __list_prev_entry(__main_heap, pos, list, member) + +#define __list_next_entry(heap, pos, list, member) \ + ({ \ + typeof(*pos) *__next = NULL; \ + if ((list)->head.prev != __hoff(heap, &(pos)->member)) \ + __next = list_entry(__hptr((heap), \ + (pos)->member.next), typeof(*pos), member); \ + __next; \ + }) + +#define list_next_entry(pos, list, member) \ + __list_next_entry(__main_heap, pos, list, member) + +#define __list_pop_entry(heap, list, type, member) ({ \ + struct holder *__holder = __list_pop((heap), list); \ + list_entry(__holder, type, member); }) + +#define list_pop_entry(list, type, member) \ + __list_pop_entry(__main_heap, list, type, member) + +#define __list_for_each(heap, pos, list) \ + for (pos = __hptr((heap), (list)->head.next); \ + pos != &(list)->head; pos = __hptr((heap), (pos)->next)) + +#define list_for_each(pos, list) \ + __list_for_each(__main_heap, pos, list) + +#define __list_for_each_reverse(heap, pos, list) \ + for (pos = __hptr((heap), (list)->head.prev); \ + pos != &(list)->head; pos = __hptr((heap), (pos)->prev)) + +#define list_for_each_reverse(pos, list) \ + __list_for_each_reverse(__main_heap, pos, list) + +#define __list_for_each_safe(heap, pos, tmp, list) \ + for (pos = __hptr((heap), (list)->head.next), \ + tmp = __hptr((heap), (pos)->next); \ + pos != &(list)->head; \ + pos = tmp, tmp = __hptr((heap), (pos)->next)) + +#define list_for_each_safe(pos, tmp, list) \ + __list_for_each_safe(__main_heap, pos, tmp, list) + +#define __list_for_each_entry(heap, pos, list, member) \ + for (pos = list_entry(__hptr((heap), (list)->head.next), \ + typeof(*pos), member); \ + &(pos)->member != &(list)->head; \ + pos = list_entry(__hptr((heap), (pos)->member.next), \ + typeof(*pos), member)) + +#define list_for_each_entry(pos, list, member) \ + __list_for_each_entry(__main_heap, pos, list, member) + +#define __list_for_each_entry_safe(heap, pos, tmp, list, member) \ + for (pos = list_entry(__hptr((heap), (list)->head.next), \ + typeof(*pos), member), \ + tmp = list_entry(__hptr((heap), (pos)->member.next), \ + typeof(*pos), member); \ + &(pos)->member != &(list)->head; \ + pos = tmp, tmp = list_entry(__hptr((heap), (pos)->member.next), \ + typeof(*pos), member)) + +#define __list_for_each_entry_reverse_safe(heap, pos, tmp, list, member) \ + for (pos = list_entry(__hptr((heap), (list)->head.prev), \ + typeof(*pos), member), \ + tmp = list_entry(__hptr((heap), (pos)->member.prev), \ + typeof(*pos), member); \ + &(pos)->member != &(list)->head; \ + pos = tmp, tmp = list_entry(__hptr((heap), (pos)->member.prev), \ + typeof(*pos), member)) + +#define list_for_each_entry_safe(pos, tmp, list, member) \ + __list_for_each_entry_safe(__main_heap, pos, tmp, list, member) + +#define __list_for_each_entry_reverse(heap, pos, list, member) \ + for (pos = list_entry(__hptr((heap), (list)->head.prev), \ + typeof(*pos), member); \ + &pos->member != &(list)->head; \ + pos = list_entry(__hptr((heap), pos->member.prev), \ + typeof(*pos), member)) + +#define list_for_each_entry_reverse(pos, list, member) \ + __list_for_each_entry_reverse(__main_heap, pos, list, member) + +#define list_for_each_entry_reverse_safe(pos, tmp, list, member) \ + __list_for_each_entry_reverse_safe(__main_heap, pos, tmp, list, member) + +#endif /* !_BOILERPLATE_SHARED_LIST_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/shavl.h b/kernel/xenomai-v3.2.4/include/boilerplate/shavl.h new file mode 100644 index 0000000..a15a4db --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/shavl.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018 Philippe Gerum + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _BOILERPLATE_SHAVL_H +#define _BOILERPLATE_SHAVL_H + +#define AVL_PSHARED + +#include <boilerplate/avl-inner.h> + +#endif /* !_BOILERPLATE_SHAVL_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/time.h b/kernel/xenomai-v3.2.4/include/boilerplate/time.h new file mode 100644 index 0000000..8d317e7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/time.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_TIME_H +#define _BOILERPLATE_TIME_H + +#include <time.h> + +typedef unsigned long long ticks_t; + +typedef long long sticks_t; + +#ifdef __cplusplus +extern "C" { +#endif + +void timespec_sub(struct timespec *__restrict r, + const struct timespec *__restrict t1, + const struct timespec *__restrict t2); + +void timespec_subs(struct timespec *__restrict r, + const struct timespec *__restrict t1, + sticks_t t2); + +void timespec_add(struct timespec *__restrict r, + const struct timespec *__restrict t1, + const struct timespec *__restrict t2); + +void timespec_adds(struct timespec *__restrict r, + const struct timespec *__restrict t1, + sticks_t t2); + +void timespec_sets(struct timespec *__restrict r, + ticks_t ns); + +#ifdef __cplusplus +} +#endif + +static inline sticks_t timespec_scalar(const struct timespec *__restrict t) +{ + return t->tv_sec * 1000000000LL + t->tv_nsec; +} + +static inline int __attribute__ ((always_inline)) +timespec_before(const struct timespec *__restrict t1, + const struct timespec *__restrict t2) +{ + if (t1->tv_sec < t2->tv_sec) + return 1; + + if (t1->tv_sec == t2->tv_sec && + t1->tv_nsec < t2->tv_nsec) + return 1; + + return 0; +} + +static inline int __attribute__ ((always_inline)) +timespec_before_or_same(const struct timespec *__restrict t1, + const struct timespec *__restrict t2) +{ + if (t1->tv_sec < t2->tv_sec) + return 1; + + if (t1->tv_sec == t2->tv_sec && + t1->tv_nsec <= t2->tv_nsec) + return 1; + + return 0; +} + +static inline int __attribute__ ((always_inline)) +timespec_after(const struct timespec *__restrict t1, + const struct timespec *__restrict t2) +{ + return !timespec_before_or_same(t1, t2); +} + +static inline int __attribute__ ((always_inline)) +timespec_after_or_same(const struct timespec *__restrict t1, + const struct timespec *__restrict t2) +{ + return !timespec_before(t1, t2); +} + +#endif /* _BOILERPLATE_TIME_H */ diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/tunables.h b/kernel/xenomai-v3.2.4/include/boilerplate/tunables.h new file mode 100644 index 0000000..397ffc8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/boilerplate/tunables.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _BOILERPLATE_TUNABLES_H +#define _BOILERPLATE_TUNABLES_H + +#include <assert.h> +#include <boilerplate/setup.h> + +#ifdef __cplusplus +extern "C" { +#endif + +static inline int __may_change_config_tunable(void) +{ + return !__config_done; +} + +#define __tunable_set_call(__name, __scope) \ + __assign_ ## __name ## _ ## __scope + +#define __tunable_get_call(__name, __scope) \ + __read_ ## __name ## _ ## __scope + +#define __define_tunable(__name, __type, __val, __scope) \ + void __tunable_set_call(__name, __scope)(__typeof__(__type) __val) + +#define __read_tunable(__name, __type, __scope) \ + __typeof__(__type) __tunable_get_call(__name, __scope)(void) + +#define define_config_tunable(__name, __type, __val) \ + __define_tunable(__name, __type, __val, config) + +#define define_runtime_tunable(__name, __type, __val) \ + __define_tunable(__name, __type, __val, runtime) + +#define read_config_tunable(__name, __type) \ + __read_tunable(__name, __type, config) + +#define read_runtime_tunable(__name, __type) \ + __read_tunable(__name, __type, runtime) + +#define set_config_tunable(__name, __val) \ + do { \ + assert(__may_change_config_tunable()); \ + __tunable_set_call(__name, config)(__val); \ + } while (0) + +#define get_config_tunable(__name) \ + __tunable_get_call(__name, config)() + +#define set_runtime_tunable(__name, __val) \ + __tunable_set_call(__name, runtime)(__val) + +#define get_runtime_tunable(__name) \ + __tunable_get_call(__name, runtime)() + +static inline define_config_tunable(cpu_affinity, cpu_set_t, cpus) +{ + __base_setup_data.cpu_affinity = cpus; +} + +static inline read_config_tunable(cpu_affinity, cpu_set_t) +{ + return __base_setup_data.cpu_affinity; +} + +static inline define_config_tunable(no_mlock, int, nolock) +{ + __base_setup_data.no_mlock = nolock; +} + +static inline read_config_tunable(no_mlock, int) +{ + return __base_setup_data.no_mlock; +} + +static inline define_config_tunable(no_sanity, int, nosanity) +{ + __base_setup_data.no_sanity = nosanity; +} + +static inline read_config_tunable(no_sanity, int) +{ + return __base_setup_data.no_sanity; +} + +static inline define_runtime_tunable(verbosity_level, int, level) +{ + __base_setup_data.verbosity_level = level; +} + +static inline read_runtime_tunable(verbosity_level, int) +{ + return __base_setup_data.verbosity_level; +} + +static inline define_runtime_tunable(trace_level, int, level) +{ + __base_setup_data.trace_level = level; +} + +static inline read_runtime_tunable(trace_level, int) +{ + return __base_setup_data.trace_level; +} + +#ifdef __cplusplus +} +#endif + +#endif /* !_BOILERPLATE_TUNABLES_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/Makefile.am new file mode 100644 index 0000000..19e9611 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/Makefile.am @@ -0,0 +1,27 @@ +includesubdir = $(includedir)/cobalt + +includesub_HEADERS = \ + fcntl.h \ + mqueue.h \ + pthread.h \ + sched.h \ + semaphore.h \ + signal.h \ + stdio.h \ + stdlib.h \ + syslog.h \ + ticks.h \ + time.h \ + trace.h \ + tunables.h \ + unistd.h \ + wrappers.h + +noinst_HEADERS = \ + arith.h + +SUBDIRS = \ + boilerplate \ + kernel \ + sys \ + uapi diff --git a/kernel/xenomai-v3.2.4/include/cobalt/arith.h b/kernel/xenomai-v3.2.4/include/cobalt/arith.h new file mode 100644 index 0000000..6313924 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/arith.h @@ -0,0 +1,45 @@ +/** + * Generic arithmetic/conversion routines. + * Copyright © 2005 Stelian Pop. + * Copyright © 2005 Gilles Chanteperdrix. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARITH_H +#define _COBALT_ARITH_H + +#include <stddef.h> +#include <endian.h> + +#if __BYTE_ORDER == __BIG_ENDIAN +#define endianstruct { unsigned int _h; unsigned int _l; } +#else /* __BYTE_ORDER == __LITTLE_ENDIAN */ +#define endianstruct { unsigned int _l; unsigned int _h; } +#endif /* __BYTE_ORDER == __LITTLE_ENDIAN */ + +static inline unsigned xnarch_do_div(unsigned long long *a, unsigned d) +{ + unsigned int r = *a % d; + *a /= d; + + return r; +} + +#define do_div(a, d) xnarch_do_div(&(a), (d)) + +#include <asm/xenomai/features.h> +#include <asm/xenomai/uapi/arith.h> + +#endif /* !_COBALT_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am new file mode 100644 index 0000000..ec3c2fe --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am @@ -0,0 +1,8 @@ +includesubdir = $(includedir)/cobalt/boilerplate + +includesub_HEADERS = \ + sched.h \ + limits.h \ + signal.h \ + trace.h \ + wrappers.h diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h new file mode 100644 index 0000000..ae49324 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_BOILERPLATE_LIMITS_H +#define _COBALT_BOILERPLATE_LIMITS_H + +#include <cobalt/uapi/kernel/limits.h> + +#endif /* _COBALT_BOILERPLATE_LIMITS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h new file mode 100644 index 0000000..d23a8da --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_BOILERPLATE_SCHED_H +#define _COBALT_BOILERPLATE_SCHED_H + +#include <cobalt/sched.h> + +#endif /* _COBALT_BOILERPLATE_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h new file mode 100644 index 0000000..40c6c78 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_BOILERPLATE_SIGNAL_H +#define _COBALT_BOILERPLATE_SIGNAL_H + +#include <cobalt/signal.h> + +/* Generates reserved signal numbers for Boilerplate/Copperplate. */ +#define __SIGRSVD(n) (SIGRTMIN + 8 + (n)) + +#define SIGAGENT __SIGRSVD(0) /* Request to remote agent */ +#define SIGPERIOD __SIGRSVD(1) /* Periodic signal */ + +/* Generates private signal numbers for clients, up to SIGRTMAX. */ +#define __SIGPRIV(n) __SIGRSVD(8 + (n)) + +#define SIGSAFE_LOCK_ENTRY(__safelock) \ + do { \ + push_cleanup_lock(__safelock); \ + write_lock(__safelock); + +#define SIGSAFE_LOCK_EXIT(__safelock) \ + write_unlock(__safelock); \ + pop_cleanup_lock(&__safelock); \ + } while (0) + +#endif /* _COBALT_BOILERPLATE_SIGNAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h new file mode 100644 index 0000000..c6cff8b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_BOILERPLATE_TRACE_H +#define _COBALT_BOILERPLATE_TRACE_H + +#include <cobalt/trace.h> + +#endif /* _COBALT_BOILERPLATE_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h new file mode 100644 index 0000000..fc3a59d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_BOILERPLATE_WRAPPERS_H +#define _COBALT_BOILERPLATE_WRAPPERS_H + +#include <cobalt/wrappers.h> + +#endif /* !_COBALT_BOILERPLATE_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/fcntl.h b/kernel/xenomai-v3.2.4/include/cobalt/fcntl.h new file mode 100644 index 0000000..f1052c2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/fcntl.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <fcntl.h> + +#ifndef _COBALT_FCNTL_H +#define _COBALT_FCNTL_H + +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, open(const char *path, int oflag, ...)); + +COBALT_DECL(int, open64(const char *path, int oflag, ...)); + +COBALT_DECL(int, __open_2(const char *path, int oflag)); + +COBALT_DECL(int, __open64_2(const char *path, int oflag)); + +COBALT_DECL(int, fcntl(int fd, int cmd, ...)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_FCNTL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am new file mode 100644 index 0000000..6413481 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am @@ -0,0 +1,37 @@ + +noinst_HEADERS = \ + ancillaries.h \ + arith.h \ + assert.h \ + bufd.h \ + clock.h \ + compat.h \ + heap.h \ + init.h \ + intr.h \ + list.h \ + lock.h \ + map.h \ + pipe.h \ + ppd.h \ + registry.h \ + sched.h \ + sched-idle.h \ + schedparam.h \ + schedqueue.h \ + sched-quota.h \ + sched-rt.h \ + sched-sporadic.h \ + sched-tp.h \ + sched-weak.h \ + select.h \ + stat.h \ + synch.h \ + thread.h \ + timer.h \ + trace.h \ + tree.h \ + vdso.h \ + vfile.h + +SUBDIRS = rtdm diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h new file mode 100644 index 0000000..b957310 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_ANCILLARIES_H +#define _COBALT_KERNEL_ANCILLARIES_H + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/uidgid.h> +#include <cobalt/uapi/kernel/limits.h> + +#define ksformat(__dst, __len, __fmt, __args...) \ + ({ \ + size_t __ret; \ + __ret = snprintf(__dst, __len, __fmt, ##__args); \ + if (__ret >= __len) \ + __dst[__len-1] = '\0'; \ + __ret; \ + }) + +#define kasformat(__fmt, __args...) \ + ({ \ + kasprintf(GFP_KERNEL, __fmt, ##__args); \ + }) + +#define kvsformat(__dst, __len, __fmt, __ap) \ + ({ \ + size_t __ret; \ + __ret = vsnprintf(__dst, __len, __fmt, __ap); \ + if (__ret >= __len) \ + __dst[__len-1] = '\0'; \ + __ret; \ + }) + +#define kvasformat(__fmt, __ap) \ + ({ \ + kvasprintf(GFP_KERNEL, __fmt, __ap); \ + }) + +void __knamecpy_requires_character_array_as_destination(void); + +#define knamecpy(__dst, __src) \ + ({ \ + if (!__builtin_types_compatible_p(typeof(__dst), char[])) \ + __knamecpy_requires_character_array_as_destination(); \ + strncpy((__dst), __src, sizeof(__dst)); \ + __dst[sizeof(__dst) - 1] = '\0'; \ + __dst; \ + }) + +#define get_current_uuid() from_kuid_munged(current_user_ns(), current_uid()) + +#endif /* !_COBALT_KERNEL_ANCILLARIES_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h new file mode 100644 index 0000000..a343dfd --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h @@ -0,0 +1,35 @@ +/* + * Generic arithmetic/conversion routines. + * Copyright © 2005 Stelian Pop. + * Copyright © 2005 Gilles Chanteperdrix. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_ARITH_H +#define _COBALT_KERNEL_ARITH_H + +#include <asm/byteorder.h> +#include <asm/div64.h> + +#ifdef __BIG_ENDIAN +#define endianstruct { unsigned int _h; unsigned int _l; } +#else /* __LITTLE_ENDIAN */ +#define endianstruct { unsigned int _l; unsigned int _h; } +#endif + +#include <asm/xenomai/uapi/arith.h> + +#endif /* _COBALT_KERNEL_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h new file mode 100644 index 0000000..98218ce --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2006 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_ASSERT_H +#define _COBALT_KERNEL_ASSERT_H + +#include <linux/kconfig.h> + +#define XENO_INFO KERN_INFO "[Xenomai] " +#define XENO_WARNING KERN_WARNING "[Xenomai] " +#define XENO_ERR KERN_ERR "[Xenomai] " + +#define XENO_DEBUG(__subsys) \ + IS_ENABLED(CONFIG_XENO_OPT_DEBUG_##__subsys) +#define XENO_ASSERT(__subsys, __cond) \ + (!WARN_ON(XENO_DEBUG(__subsys) && !(__cond))) +#define XENO_BUG(__subsys) \ + BUG_ON(XENO_DEBUG(__subsys)) +#define XENO_BUG_ON(__subsys, __cond) \ + BUG_ON(XENO_DEBUG(__subsys) && (__cond)) +#define XENO_WARN(__subsys, __cond, __fmt...) \ + WARN(XENO_DEBUG(__subsys) && (__cond), __fmt) +#define XENO_WARN_ON(__subsys, __cond) \ + WARN_ON(XENO_DEBUG(__subsys) && (__cond)) +#define XENO_WARN_ON_ONCE(__subsys, __cond) \ + WARN_ON_ONCE(XENO_DEBUG(__subsys) && (__cond)) +#ifdef CONFIG_SMP +#define XENO_BUG_ON_SMP(__subsys, __cond) \ + XENO_BUG_ON(__subsys, __cond) +#define XENO_WARN_ON_SMP(__subsys, __cond) \ + XENO_WARN_ON(__subsys, __cond) +#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond) \ + XENO_WARN_ON_ONCE(__subsys, __cond) +#else +#define XENO_BUG_ON_SMP(__subsys, __cond) \ + do { } while (0) +#define XENO_WARN_ON_SMP(__subsys, __cond) \ + do { } while (0) +#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond) \ + do { } while (0) +#endif + +#define primary_mode_only() XENO_BUG_ON(CONTEXT, is_secondary_domain()) +#define secondary_mode_only() XENO_BUG_ON(CONTEXT, !is_secondary_domain()) +#define interrupt_only() XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p()) +#define realtime_cpu_only() XENO_BUG_ON(CONTEXT, !xnsched_supported_cpu(raw_smp_processor_id())) +#define thread_only() XENO_BUG_ON(CONTEXT, xnsched_interrupt_p()) +#define irqoff_only() XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0) +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING +#define atomic_only() XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) && hard_irqs_disabled()) == 0) +#define preemptible_only() XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) || hard_irqs_disabled()) +#else +#define atomic_only() XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0) +#define preemptible_only() XENO_BUG_ON(CONTEXT, hard_irqs_disabled() != 0) +#endif + +#endif /* !_COBALT_KERNEL_ASSERT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h new file mode 100644 index 0000000..92a4078 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_BUFD_H +#define _COBALT_KERNEL_BUFD_H + +#include <linux/types.h> + +/** + * @addtogroup cobalt_core_bufd + * + * @{ + */ + +struct mm_struct; + +struct xnbufd { + caddr_t b_ptr; /* src/dst buffer address */ + size_t b_len; /* total length of buffer */ + off_t b_off; /* # of bytes read/written */ + struct mm_struct *b_mm; /* src/dst address space */ + caddr_t b_carry; /* pointer to carry over area */ + char b_buf[64]; /* fast carry over area */ +}; + +void xnbufd_map_umem(struct xnbufd *bufd, + void __user *ptr, size_t len); + +static inline void xnbufd_map_uread(struct xnbufd *bufd, + const void __user *ptr, size_t len) +{ + xnbufd_map_umem(bufd, (void __user *)ptr, len); +} + +static inline void xnbufd_map_uwrite(struct xnbufd *bufd, + void __user *ptr, size_t len) +{ + xnbufd_map_umem(bufd, ptr, len); +} + +ssize_t xnbufd_unmap_uread(struct xnbufd *bufd); + +ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd); + +void xnbufd_map_kmem(struct xnbufd *bufd, + void *ptr, size_t len); + +static inline void xnbufd_map_kread(struct xnbufd *bufd, + const void *ptr, size_t len) +{ + xnbufd_map_kmem(bufd, (void *)ptr, len); +} + +static inline void xnbufd_map_kwrite(struct xnbufd *bufd, + void *ptr, size_t len) +{ + xnbufd_map_kmem(bufd, ptr, len); +} + +ssize_t xnbufd_unmap_kread(struct xnbufd *bufd); + +ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd); + +ssize_t xnbufd_copy_to_kmem(void *ptr, + struct xnbufd *bufd, size_t len); + +ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, + void *from, size_t len); + +void xnbufd_invalidate(struct xnbufd *bufd); + +static inline void xnbufd_reset(struct xnbufd *bufd) +{ + bufd->b_off = 0; +} + +/** @} */ + +#endif /* !_COBALT_KERNEL_BUFD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h new file mode 100644 index 0000000..2f7b714 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2006,2007 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_CLOCK_H +#define _COBALT_KERNEL_CLOCK_H + +#include <pipeline/pipeline.h> +#include <pipeline/clock.h> +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/vfile.h> +#include <cobalt/uapi/kernel/types.h> +#include <asm/xenomai/wrappers.h> + +/** + * @addtogroup cobalt_core_clock + * @{ + */ + +struct xnsched; +struct xntimerdata; +struct __kernel_timex; + +struct xnclock_gravity { + unsigned long irq; + unsigned long kernel; + unsigned long user; +}; + +struct xnclock { + /** (ns) */ + xnsticks_t wallclock_offset; + /** (ns) */ + xnticks_t resolution; + /** (raw clock ticks). */ + struct xnclock_gravity gravity; + /** Clock name. */ + const char *name; + struct { +#ifdef CONFIG_XENO_OPT_EXTCLOCK + xnticks_t (*read_raw)(struct xnclock *clock); + xnticks_t (*read_monotonic)(struct xnclock *clock); + int (*set_time)(struct xnclock *clock, + const struct timespec64 *ts); + xnsticks_t (*ns_to_ticks)(struct xnclock *clock, + xnsticks_t ns); + xnsticks_t (*ticks_to_ns)(struct xnclock *clock, + xnsticks_t ticks); + xnsticks_t (*ticks_to_ns_rounded)(struct xnclock *clock, + xnsticks_t ticks); + void (*program_local_shot)(struct xnclock *clock, + struct xnsched *sched); + void (*program_remote_shot)(struct xnclock *clock, + struct xnsched *sched); +#endif + int (*adjust_time)(struct xnclock *clock, + struct __kernel_timex *tx); + int (*set_gravity)(struct xnclock *clock, + const struct xnclock_gravity *p); + void (*reset_gravity)(struct xnclock *clock); +#ifdef CONFIG_XENO_OPT_VFILE + void (*print_status)(struct xnclock *clock, + struct xnvfile_regular_iterator *it); +#endif + } ops; + /* Private section. */ + struct xntimerdata *timerdata; + int id; +#ifdef CONFIG_SMP + /** Possible CPU affinity of clock beat. */ + cpumask_t affinity; +#endif +#ifdef CONFIG_XENO_OPT_STATS + struct xnvfile_snapshot timer_vfile; + struct xnvfile_rev_tag timer_revtag; + struct list_head timerq; + int nrtimers; +#endif /* CONFIG_XENO_OPT_STATS */ +#ifdef CONFIG_XENO_OPT_VFILE + struct xnvfile_regular vfile; +#endif +}; + +struct xnclock_ratelimit_state { + xnticks_t interval; + xnticks_t begin; + int burst; + int printed; + int missed; +}; + +extern struct xnclock nkclock; + +int xnclock_register(struct xnclock *clock, + const cpumask_t *affinity); + +void xnclock_deregister(struct xnclock *clock); + +void xnclock_tick(struct xnclock *clock); + +void xnclock_core_local_shot(struct xnsched *sched); + +void xnclock_core_remote_shot(struct xnsched *sched); + +xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns); + +xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks); + +xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks); + +xnticks_t xnclock_core_read_monotonic(void); + +static inline xnticks_t xnclock_core_read_raw(void) +{ + return pipeline_read_cycle_counter(); +} + +/* We use the Linux defaults */ +#define XN_RATELIMIT_INTERVAL 5000000000LL +#define XN_RATELIMIT_BURST 10 + +int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func); + +#define xnclock_ratelimit() ({ \ + static struct xnclock_ratelimit_state __state = { \ + .interval = XN_RATELIMIT_INTERVAL, \ + .burst = XN_RATELIMIT_BURST, \ + }; \ + __xnclock_ratelimit(&__state, __func__); \ +}) + +#ifdef CONFIG_XENO_OPT_EXTCLOCK + +static inline void xnclock_program_shot(struct xnclock *clock, + struct xnsched *sched) +{ + if (likely(clock == &nkclock)) + xnclock_core_local_shot(sched); + else if (clock->ops.program_local_shot) + clock->ops.program_local_shot(clock, sched); +} + +static inline void xnclock_remote_shot(struct xnclock *clock, + struct xnsched *sched) +{ +#ifdef CONFIG_SMP + if (likely(clock == &nkclock)) + xnclock_core_remote_shot(sched); + else if (clock->ops.program_remote_shot) + clock->ops.program_remote_shot(clock, sched); +#endif +} + +static inline xnticks_t xnclock_read_raw(struct xnclock *clock) +{ + if (likely(clock == &nkclock)) + return xnclock_core_read_raw(); + + return clock->ops.read_raw(clock); +} + +static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock, + xnsticks_t ns) +{ + if (likely(clock == &nkclock)) + return xnclock_core_ns_to_ticks(ns); + + return clock->ops.ns_to_ticks(clock, ns); +} + +static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock, + xnsticks_t ticks) +{ + if (likely(clock == &nkclock)) + return xnclock_core_ticks_to_ns(ticks); + + return clock->ops.ticks_to_ns(clock, ticks); +} + +static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock, + xnsticks_t ticks) +{ + if (likely(clock == &nkclock)) + return xnclock_core_ticks_to_ns_rounded(ticks); + + return clock->ops.ticks_to_ns_rounded(clock, ticks); +} + +static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock) +{ + if (likely(clock == &nkclock)) + return xnclock_core_read_monotonic(); + + return clock->ops.read_monotonic(clock); +} + +static inline int xnclock_set_time(struct xnclock *clock, + const struct timespec64 *ts) +{ + if (likely(clock == &nkclock)) + return -EINVAL; + + return clock->ops.set_time(clock, ts); +} + +#else /* !CONFIG_XENO_OPT_EXTCLOCK */ + +static inline void xnclock_program_shot(struct xnclock *clock, + struct xnsched *sched) +{ + xnclock_core_local_shot(sched); +} + +static inline void xnclock_remote_shot(struct xnclock *clock, + struct xnsched *sched) +{ +#ifdef CONFIG_SMP + xnclock_core_remote_shot(sched); +#endif +} + +static inline xnticks_t xnclock_read_raw(struct xnclock *clock) +{ + return xnclock_core_read_raw(); +} + +static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock, + xnsticks_t ns) +{ + return xnclock_core_ns_to_ticks(ns); +} + +static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock, + xnsticks_t ticks) +{ + return xnclock_core_ticks_to_ns(ticks); +} + +static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock, + xnsticks_t ticks) +{ + return xnclock_core_ticks_to_ns_rounded(ticks); +} + +static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock) +{ + return xnclock_core_read_monotonic(); +} + +static inline int xnclock_set_time(struct xnclock *clock, + const struct timespec64 *ts) +{ + /* + * There is no way to change the core clock's idea of time. + */ + return -EINVAL; +} + +#endif /* !CONFIG_XENO_OPT_EXTCLOCK */ + +static inline int xnclock_adjust_time(struct xnclock *clock, + struct __kernel_timex *tx) +{ + if (clock->ops.adjust_time == NULL) + return -EOPNOTSUPP; + + return clock->ops.adjust_time(clock, tx); +} + +static inline xnticks_t xnclock_get_offset(struct xnclock *clock) +{ + return clock->wallclock_offset; +} + +static inline xnticks_t xnclock_get_resolution(struct xnclock *clock) +{ + return clock->resolution; /* ns */ +} + +static inline void xnclock_set_resolution(struct xnclock *clock, + xnticks_t resolution) +{ + clock->resolution = resolution; /* ns */ +} + +static inline int xnclock_set_gravity(struct xnclock *clock, + const struct xnclock_gravity *gravity) +{ + if (clock->ops.set_gravity) + return clock->ops.set_gravity(clock, gravity); + + return -EINVAL; +} + +static inline void xnclock_reset_gravity(struct xnclock *clock) +{ + if (clock->ops.reset_gravity) + clock->ops.reset_gravity(clock); +} + +#define xnclock_get_gravity(__clock, __type) ((__clock)->gravity.__type) + +static inline xnticks_t xnclock_read_realtime(struct xnclock *clock) +{ + if (likely(clock == &nkclock)) + return pipeline_read_wallclock(); + /* + * Return an adjusted value of the monotonic time with the + * translated system wallclock offset. + */ + return xnclock_read_monotonic(clock) + xnclock_get_offset(clock); +} + +void xnclock_apply_offset(struct xnclock *clock, + xnsticks_t delta_ns); + +void xnclock_set_wallclock(xnticks_t epoch_ns); + +unsigned long long xnclock_divrem_billion(unsigned long long value, + unsigned long *rem); + +#ifdef CONFIG_XENO_OPT_VFILE + +void xnclock_init_proc(void); + +void xnclock_cleanup_proc(void); + +static inline void xnclock_print_status(struct xnclock *clock, + struct xnvfile_regular_iterator *it) +{ + if (clock->ops.print_status) + clock->ops.print_status(clock, it); +} + +#else +static inline void xnclock_init_proc(void) { } +static inline void xnclock_cleanup_proc(void) { } +#endif + +int xnclock_init(void); + +void xnclock_cleanup(void); + +/** @} */ + +#endif /* !_COBALT_KERNEL_CLOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h new file mode 100644 index 0000000..275735d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_COMPAT_H +#define _COBALT_KERNEL_COMPAT_H + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +#include <linux/compat.h> +#include <net/compat.h> +#include <asm/xenomai/wrappers.h> +#include <cobalt/uapi/sched.h> + +struct mq_attr; + +struct __compat_sched_ss_param { + int __sched_low_priority; + struct old_timespec32 __sched_repl_period; + struct old_timespec32 __sched_init_budget; + int __sched_max_repl; +}; + +struct __compat_sched_rr_param { + struct old_timespec32 __sched_rr_quantum; +}; + +struct compat_sched_param_ex { + int sched_priority; + union { + struct __compat_sched_ss_param ss; + struct __compat_sched_rr_param rr; + struct __sched_tp_param tp; + struct __sched_quota_param quota; + } sched_u; +}; + +struct compat_mq_attr { + compat_long_t mq_flags; + compat_long_t mq_maxmsg; + compat_long_t mq_msgsize; + compat_long_t mq_curmsgs; +}; + +struct compat_sched_tp_window { + struct old_timespec32 offset; + struct old_timespec32 duration; + int ptid; +}; + +struct __compat_sched_config_tp { + int op; + int nr_windows; + struct compat_sched_tp_window windows[0]; +}; + +union compat_sched_config { + struct __compat_sched_config_tp tp; + struct __sched_config_quota quota; +}; + +#define compat_sched_tp_confsz(nr_win) \ + (sizeof(struct __compat_sched_config_tp) + nr_win * sizeof(struct compat_sched_tp_window)) + +typedef struct { + compat_ulong_t fds_bits[__FD_SETSIZE / (8 * sizeof(compat_long_t))]; +} compat_fd_set; + +struct compat_rtdm_mmap_request { + u64 offset; + compat_size_t length; + int prot; + int flags; +}; + +int sys32_get_timespec(struct timespec64 *ts, + const struct old_timespec32 __user *cts); + +int sys32_put_timespec(struct old_timespec32 __user *cts, + const struct timespec64 *ts); + +int sys32_get_itimerspec(struct itimerspec64 *its, + const struct old_itimerspec32 __user *cits); + +int sys32_put_itimerspec(struct old_itimerspec32 __user *cits, + const struct itimerspec64 *its); + +int sys32_get_timeval(struct __kernel_old_timeval *tv, + const struct old_timeval32 __user *ctv); + +int sys32_put_timeval(struct old_timeval32 __user *ctv, + const struct __kernel_old_timeval *tv); + +int sys32_get_timex(struct __kernel_timex *tx, + const struct old_timex32 __user *ctx); + +int sys32_put_timex(struct old_timex32 __user *ctx, + const struct __kernel_timex *tx); + +int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds, + size_t cfdsize); + +int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds, + size_t fdsize); + +int sys32_get_param_ex(int policy, + struct sched_param_ex *p, + const struct compat_sched_param_ex __user *u_cp); + +int sys32_put_param_ex(int policy, + struct compat_sched_param_ex __user *u_cp, + const struct sched_param_ex *p); + +int sys32_get_mqattr(struct mq_attr *ap, + const struct compat_mq_attr __user *u_cap); + +int sys32_put_mqattr(struct compat_mq_attr __user *u_cap, + const struct mq_attr *ap); + +int sys32_get_sigevent(struct sigevent *ev, + const struct compat_sigevent *__user u_cev); + +int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset); + +int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set); + +int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval); + +int sys32_put_siginfo(void __user *u_si, const struct siginfo *si, + int overrun); + +int sys32_get_msghdr(struct user_msghdr *msg, + const struct compat_msghdr __user *u_cmsg); + +int sys32_get_mmsghdr(struct mmsghdr *mmsg, + const struct compat_mmsghdr __user *u_cmmsg); + +int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg, + const struct user_msghdr *msg); + +int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg, + const struct mmsghdr *mmsg); + +int sys32_get_iovec(struct iovec *iov, + const struct compat_iovec __user *ciov, + int ciovlen); + +int sys32_put_iovec(struct compat_iovec __user *u_ciov, + const struct iovec *iov, + int iovlen); + +#endif /* CONFIG_XENO_ARCH_SYS3264 */ + +#endif /* !_COBALT_KERNEL_COMPAT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h new file mode 100644 index 0000000..cdb0dac --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_CLOCK_H +#define _COBALT_KERNEL_DOVETAIL_CLOCK_H + +#include <cobalt/uapi/kernel/types.h> +#include <cobalt/kernel/assert.h> +#include <linux/ktime.h> +#include <linux/errno.h> + +struct timespec64; + +static inline u64 pipeline_read_cycle_counter(void) +{ + /* + * With Dovetail, our idea of time is directly based on a + * refined count of nanoseconds since the epoch, the hardware + * time counter is transparent to us. For this reason, + * xnclock_ticks_to_ns() and xnclock_ns_to_ticks() are + * idempotent when building for Dovetail. + */ + return ktime_get_mono_fast_ns(); +} + +static inline xnticks_t pipeline_read_wallclock(void) +{ + return ktime_get_real_fast_ns(); +} + +static inline int pipeline_set_wallclock(xnticks_t epoch_ns) +{ + return -EOPNOTSUPP; +} + +void pipeline_set_timer_shot(unsigned long cycles); + +const char *pipeline_timer_name(void); + +static inline const char *pipeline_clock_name(void) +{ + return "<Linux clocksource>"; +} + +static inline int pipeline_get_host_time(struct timespec64 *tp) +{ + /* Convert ktime_get_real_fast_ns() to timespec. */ + *tp = ktime_to_timespec64(ktime_get_real_fast_ns()); + + return 0; +} + +static inline void pipeline_init_clock(void) +{ + /* N/A */ +} + +static inline xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks) +{ + return ticks; +} + +static inline xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks) +{ + return ticks; +} + +static inline xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns) +{ + return ns; +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_CLOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h new file mode 100644 index 0000000..af3d70f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H +#define _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H + +#include <linux/irq_work.h> + +/* + * This field must be named inband_work and appear first in the + * container work struct. + */ +struct pipeline_inband_work { + struct irq_work work; +}; + +#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler) \ + { \ + .work = IRQ_WORK_INIT((void (*)(struct irq_work *))__handler), \ + } + +#define pipeline_post_inband_work(__work) \ + irq_work_queue(&(__work)->inband_work.work) + +#endif /* !_COBALT_KERNEL_DOVETAIL_INBAND_WORK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h new file mode 100644 index 0000000..55d9b8f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_IRQ_H +#define _COBALT_KERNEL_DOVETAIL_IRQ_H + +static inline void xnintr_init_proc(void) +{ + /* N/A */ +} + +static inline void xnintr_cleanup_proc(void) +{ + /* N/A */ +} + +static inline int xnintr_mount(void) +{ + /* N/A */ + return 0; +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_IRQ_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h new file mode 100644 index 0000000..fa47f03 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_KEVENTS_H +#define _COBALT_KERNEL_DOVETAIL_KEVENTS_H + +#define KEVENT_PROPAGATE 0 +#define KEVENT_STOP 1 + +struct cobalt_process; +struct cobalt_thread; + +static inline +int pipeline_attach_process(struct cobalt_process *process) +{ + return 0; +} + +static inline +void pipeline_detach_process(struct cobalt_process *process) +{ } + +int pipeline_prepare_current(void); + +void pipeline_attach_current(struct xnthread *thread); + +int pipeline_trap_kevents(void); + +void pipeline_enable_kevents(void); + +void pipeline_cleanup_process(void); + +#endif /* !_COBALT_KERNEL_DOVETAIL_KEVENTS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h new file mode 100644 index 0000000..8866c92 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_LOCK_H +#define _COBALT_KERNEL_DOVETAIL_LOCK_H + +#include <linux/spinlock.h> + +typedef hard_spinlock_t pipeline_spinlock_t; + +#define PIPELINE_SPIN_LOCK_UNLOCKED(__name) __HARD_SPIN_LOCK_INITIALIZER(__name) + +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING +/* Disable UP-over-SMP kernel optimization in debug mode. */ +#define __locking_active__ 1 + +#else + +#ifdef CONFIG_SMP +#define __locking_active__ 1 +#else +#define __locking_active__ IS_ENABLED(CONFIG_SMP) +#endif + +#endif + +#endif /* !_COBALT_KERNEL_DOVETAIL_LOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h new file mode 100644 index 0000000..4f3dd95 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_MACHINE_H +#define _COBALT_KERNEL_DOVETAIL_MACHINE_H + +#include <linux/percpu.h> + +#ifdef CONFIG_FTRACE +#define boot_lat_trace_notice "[LTRACE]" +#else +#define boot_lat_trace_notice "" +#endif + +struct vm_area_struct; + +struct cobalt_machine { + const char *name; + int (*init)(void); + int (*late_init)(void); + void (*cleanup)(void); + void (*prefault)(struct vm_area_struct *vma); + const char *const *fault_labels; +}; + +extern struct cobalt_machine cobalt_machine; + +struct cobalt_machine_cpudata { + unsigned int faults[32]; +}; + +DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata); + +struct cobalt_pipeline { +#ifdef CONFIG_SMP + cpumask_t supported_cpus; +#endif +}; + +int pipeline_init(void); + +int pipeline_late_init(void); + +void pipeline_cleanup(void); + +extern struct cobalt_pipeline cobalt_pipeline; + +#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h new file mode 100644 index 0000000..2ee7b32 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_PIPELINE_H +#define _COBALT_KERNEL_DOVETAIL_PIPELINE_H + +#include <linux/irq_pipeline.h> +#include <linux/cpumask.h> +#include <cobalt/kernel/assert.h> +#include <asm/xenomai/features.h> +#include <asm/xenomai/syscall.h> +#include <asm/syscall.h> +#include <pipeline/machine.h> + +typedef unsigned long spl_t; + +/* + * We only keep the LSB when testing in SMP mode in order to strip off + * the recursion marker (0x2) the nklock may store there. + */ +#define splhigh(x) ((x) = oob_irq_save() & 1) +#ifdef CONFIG_SMP +#define splexit(x) oob_irq_restore(x & 1) +#else /* !CONFIG_SMP */ +#define splexit(x) oob_irq_restore(x) +#endif /* !CONFIG_SMP */ +#define splmax() oob_irq_disable() +#define splnone() oob_irq_enable() +#define spltest() oob_irqs_disabled() + +#define is_secondary_domain() running_inband() +#define is_primary_domain() running_oob() + +#ifdef CONFIG_SMP + +irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id); + +static inline int pipeline_request_resched_ipi(void (*handler)(void)) +{ + if (num_possible_cpus() == 1) + return 0; + + /* Trap the out-of-band rescheduling interrupt. */ + return __request_percpu_irq(RESCHEDULE_OOB_IPI, + pipeline_reschedule_ipi_handler, + IRQF_OOB, + "Xenomai reschedule", + &cobalt_machine_cpudata); +} + +static inline void pipeline_free_resched_ipi(void) +{ + if (num_possible_cpus() > 1) + /* Release the out-of-band rescheduling interrupt. */ + free_percpu_irq(RESCHEDULE_OOB_IPI, &cobalt_machine_cpudata); +} + +static inline void pipeline_send_resched_ipi(const struct cpumask *dest) +{ + /* + * Trigger the out-of-band rescheduling interrupt on remote + * CPU(s). + */ + irq_send_oob_ipi(RESCHEDULE_OOB_IPI, dest); +} + +static inline void pipeline_send_timer_ipi(const struct cpumask *dest) +{ + /* + * Trigger the out-of-band timer interrupt on remote CPU(s). + */ + irq_send_oob_ipi(TIMER_OOB_IPI, dest); +} + +#else /* !CONFIG_SMP */ + +static inline int pipeline_request_resched_ipi(void (*handler)(void)) +{ + return 0; +} + + +static inline void pipeline_free_resched_ipi(void) +{ +} + +#endif /* CONFIG_SMP */ + +static inline void pipeline_prepare_panic(void) +{ + /* N/A */ +} + +static inline void pipeline_collect_features(struct cobalt_featinfo *f) +{ + f->clock_freq = 0; /* N/A */ +} + +#ifndef pipeline_get_syscall_args +static inline void pipeline_get_syscall_args(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + syscall_get_arguments(task, regs, args); +} +#endif /* !pipeline_get_syscall_args */ + +#endif /* !_COBALT_KERNEL_DOVETAIL_PIPELINE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h new file mode 100644 index 0000000..45512b9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_SCHED_H +#define _COBALT_KERNEL_DOVETAIL_SCHED_H + +#include <cobalt/kernel/lock.h> + +struct xnthread; +struct xnsched; +struct task_struct; + +void pipeline_init_shadow_tcb(struct xnthread *thread); + +void pipeline_init_root_tcb(struct xnthread *thread); + +int ___xnsched_run(struct xnsched *sched); + +static inline int pipeline_schedule(struct xnsched *sched) +{ + return run_oob_call((int (*)(void *))___xnsched_run, sched); +} + +static inline void pipeline_prep_switch_oob(struct xnthread *root) +{ + /* N/A */ +} + +bool pipeline_switch_to(struct xnthread *prev, + struct xnthread *next, + bool leaving_inband); + +int pipeline_leave_inband(void); + +int pipeline_leave_oob_prepare(void); + +static inline void pipeline_leave_oob_unlock(void) +{ + /* + * We may not re-enable hard irqs due to the specifics of + * stage escalation via run_oob_call(), to prevent breaking + * the (virtual) interrupt state. + */ + xnlock_put(&nklock); +} + +void pipeline_leave_oob_finish(void); + +static inline +void pipeline_finalize_thread(struct xnthread *thread) +{ + /* N/A */ +} + +void pipeline_raise_mayday(struct task_struct *tsk); + +void pipeline_clear_mayday(void); + +#endif /* !_COBALT_KERNEL_DOVETAIL_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h new file mode 100644 index 0000000..1da9d13 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_SIRQ_H +#define _COBALT_KERNEL_DOVETAIL_SIRQ_H + +#include <linux/irq_pipeline.h> +#include <cobalt/kernel/assert.h> + +/* + * Wrappers to create "synthetic IRQs" the Dovetail way. Those + * interrupt channels can only be trigged by software, in order to run + * a handler on the in-band execution stage. + */ + +static inline +int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id)) +{ + /* + * Allocate an IRQ from the synthetic interrupt domain then + * trap it to @handler, to be fired from the in-band stage. + */ + int sirq, ret; + + sirq = irq_create_direct_mapping(synthetic_irq_domain); + if (sirq == 0) + return -EAGAIN; + + ret = __request_percpu_irq(sirq, + handler, + IRQF_NO_THREAD, + "Inband sirq", + &cobalt_machine_cpudata); + + if (ret) { + irq_dispose_mapping(sirq); + return ret; + } + + return sirq; +} + +static inline +void pipeline_delete_inband_sirq(int sirq) +{ + /* + * Free the synthetic IRQ then deallocate it to its + * originating domain. + */ + free_percpu_irq(sirq, + &cobalt_machine_cpudata); + + irq_dispose_mapping(sirq); +} + +static inline void pipeline_post_sirq(int sirq) +{ + /* Trigger the synthetic IRQ */ + irq_post_inband(sirq); +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_SIRQ_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h new file mode 100644 index 0000000..1e6b0f0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_THREAD_H +#define _COBALT_KERNEL_DOVETAIL_THREAD_H + +#include <linux/dovetail.h> + +struct xnthread; + +#define cobalt_threadinfo oob_thread_state + +static inline struct cobalt_threadinfo *pipeline_current(void) +{ + return dovetail_current_state(); +} + +static inline +struct xnthread *pipeline_thread_from_task(struct task_struct *p) +{ + return dovetail_task_state(p)->thread; +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h new file mode 100644 index 0000000..372d832 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_TICK_H +#define _COBALT_KERNEL_IPIPE_TICK_H + +int pipeline_install_tick_proxy(void); + +void pipeline_uninstall_tick_proxy(void); + +struct xnsched; + +bool pipeline_must_force_program_tick(struct xnsched *sched); + +#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h new file mode 100644 index 0000000..306dd54 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_DOVETAIL_TRACE_H +#define _COBALT_KERNEL_DOVETAIL_TRACE_H + +#include <linux/types.h> +#include <linux/kconfig.h> +#include <cobalt/uapi/kernel/trace.h> +#include <trace/events/cobalt-core.h> +#include <cobalt/kernel/assert.h> + +static inline int xntrace_max_begin(unsigned long v) +{ + return -ENOSYS; +} + +static inline int xntrace_max_end(unsigned long v) +{ + return -ENOSYS; +} + +static inline int xntrace_max_reset(void) +{ + return -ENOSYS; +} + +static inline int xntrace_user_start(void) +{ + trace_cobalt_trigger("user-start"); + return 0; +} + +static inline int xntrace_user_stop(unsigned long v) +{ + trace_cobalt_trace_longval(0, v); + trace_cobalt_trigger("user-stop"); + return 0; +} + +static inline int xntrace_user_freeze(unsigned long v, int once) +{ + trace_cobalt_trace_longval(0, v); + trace_cobalt_trigger("user-freeze"); + return 0; +} + +static inline void xntrace_latpeak_freeze(int delay) +{ + trace_cobalt_latpeak(delay); + trace_cobalt_trigger("latency-freeze"); +} + +static inline int xntrace_special(unsigned char id, unsigned long v) +{ + trace_cobalt_trace_longval(id, v); + return 0; +} + +static inline int xntrace_special_u64(unsigned char id, + unsigned long long v) +{ + trace_cobalt_trace_longval(id, v); + return 0; +} + +static inline int xntrace_pid(pid_t pid, short prio) +{ + trace_cobalt_trace_pid(pid, prio); + return 0; +} + +static inline int xntrace_tick(unsigned long delay_ticks) /* ns */ +{ + trace_cobalt_tick_shot(delay_ticks); + return 0; +} + +static inline bool xntrace_enabled(void) +{ + return IS_ENABLED(CONFIG_DOVETAIL_TRACE); +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h new file mode 100644 index 0000000..07de643 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + * Copyright (c) Siemens AG, 2021 + */ + +#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H +#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H + +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/timer.h> +#include <xenomai/posix/clock.h> + +#define is_clock_gettime(__nr) ((__nr) == __NR_clock_gettime) + +#ifndef __NR_clock_gettime64 +#define is_clock_gettime64(__nr) 0 +#else +#define is_clock_gettime64(__nr) ((__nr) == __NR_clock_gettime64) +#endif + +static __always_inline bool +pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs) +{ + struct __kernel_old_timespec __user *u_old_ts; + struct __kernel_timespec uts, __user *u_uts; + struct __kernel_old_timespec old_ts; + struct timespec64 ts64; + int clock_id, ret = 0; + unsigned long args[6]; + + if (!is_clock_gettime(nr) && !is_clock_gettime64(nr)) + return false; + + /* + * We need to fetch the args again because not all archs use the same + * calling convention for Linux and Xenomai syscalls. + */ + syscall_get_arguments(current, regs, args); + + clock_id = (int)args[0]; + switch (clock_id) { + case CLOCK_MONOTONIC: + ns2ts(&ts64, xnclock_read_monotonic(&nkclock)); + break; + case CLOCK_REALTIME: + ns2ts(&ts64, xnclock_read_realtime(&nkclock)); + break; + default: + return false; + } + + if (is_clock_gettime(nr)) { + old_ts.tv_sec = (__kernel_old_time_t)ts64.tv_sec; + old_ts.tv_nsec = ts64.tv_nsec; + u_old_ts = (struct __kernel_old_timespec __user *)args[1]; + if (raw_copy_to_user(u_old_ts, &old_ts, sizeof(old_ts))) + ret = -EFAULT; + } else if (is_clock_gettime64(nr)) { + uts.tv_sec = ts64.tv_sec; + uts.tv_nsec = ts64.tv_nsec; + u_uts = (struct __kernel_timespec __user *)args[1]; + if (raw_copy_to_user(u_uts, &uts, sizeof(uts))) + ret = -EFAULT; + } + + __xn_status_return(regs, ret); + + return true; +} + +#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h new file mode 100644 index 0000000..133aaca --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef _COBALT_KERNEL_DOVETAIL_WRAPPERS_H +#define _COBALT_KERNEL_DOVETAIL_WRAPPERS_H + +/* No wrapper needed so far. */ + +#endif /* !_COBALT_KERNEL_DOVETAIL_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h new file mode 100644 index 0000000..09c982f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_HEAP_H +#define _COBALT_KERNEL_HEAP_H + +#include <linux/string.h> +#include <linux/rbtree.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/list.h> +#include <cobalt/uapi/kernel/types.h> +#include <cobalt/uapi/kernel/heap.h> + +/** + * @addtogroup cobalt_core_heap + * @{ + */ + +#define XNHEAP_PAGE_SHIFT 9 /* 2^9 => 512 bytes */ +#define XNHEAP_PAGE_SIZE (1UL << XNHEAP_PAGE_SHIFT) +#define XNHEAP_PAGE_MASK (~(XNHEAP_PAGE_SIZE - 1)) +#define XNHEAP_MIN_LOG2 4 /* 16 bytes */ +/* + * Use bucketed memory for sizes between 2^XNHEAP_MIN_LOG2 and + * 2^(XNHEAP_PAGE_SHIFT-1). + */ +#define XNHEAP_MAX_BUCKETS (XNHEAP_PAGE_SHIFT - XNHEAP_MIN_LOG2) +#define XNHEAP_MIN_ALIGN (1U << XNHEAP_MIN_LOG2) +/* Maximum size of a heap (4Gb - PAGE_SIZE). */ +#define XNHEAP_MAX_HEAPSZ (4294967295U - PAGE_SIZE + 1) +/* Bits we need for encoding a page # */ +#define XNHEAP_PGENT_BITS (32 - XNHEAP_PAGE_SHIFT) +/* Each page is represented by a page map entry. */ +#define XNHEAP_PGMAP_BYTES sizeof(struct xnheap_pgentry) + +struct xnheap_pgentry { + /* Linkage in bucket list. */ + unsigned int prev : XNHEAP_PGENT_BITS; + unsigned int next : XNHEAP_PGENT_BITS; + /* page_list or log2. */ + unsigned int type : 6; + /* + * We hold either a spatial map of busy blocks within the page + * for bucketed memory (up to 32 blocks per page), or the + * overall size of the multi-page block if entry.type == + * page_list. + */ + union { + u32 map; + u32 bsize; + }; +}; + +/* + * A range descriptor is stored at the beginning of the first page of + * a range of free pages. xnheap_range.size is nrpages * + * XNHEAP_PAGE_SIZE. Ranges are indexed by address and size in + * rbtrees. + */ +struct xnheap_range { + struct rb_node addr_node; + struct rb_node size_node; + size_t size; +}; + +struct xnheap { + void *membase; + struct rb_root addr_tree; + struct rb_root size_tree; + struct xnheap_pgentry *pagemap; + size_t usable_size; + size_t used_size; + u32 buckets[XNHEAP_MAX_BUCKETS]; + char name[XNOBJECT_NAME_LEN]; + DECLARE_XNLOCK(lock); + struct list_head next; +}; + +extern struct xnheap cobalt_heap; + +#define xnmalloc(size) xnheap_alloc(&cobalt_heap, size) +#define xnfree(ptr) xnheap_free(&cobalt_heap, ptr) + +static inline void *xnheap_get_membase(const struct xnheap *heap) +{ + return heap->membase; +} + +static inline +size_t xnheap_get_size(const struct xnheap *heap) +{ + return heap->usable_size; +} + +static inline +size_t xnheap_get_used(const struct xnheap *heap) +{ + return heap->used_size; +} + +static inline +size_t xnheap_get_free(const struct xnheap *heap) +{ + return heap->usable_size - heap->used_size; +} + +int xnheap_init(struct xnheap *heap, + void *membase, size_t size); + +void xnheap_destroy(struct xnheap *heap); + +void *xnheap_alloc(struct xnheap *heap, size_t size); + +void xnheap_free(struct xnheap *heap, void *block); + +ssize_t xnheap_check_block(struct xnheap *heap, void *block); + +void xnheap_set_name(struct xnheap *heap, + const char *name, ...); + +void *xnheap_vmalloc(size_t size); + +void xnheap_vfree(void *p); + +static inline void *xnheap_zalloc(struct xnheap *heap, size_t size) +{ + void *p; + + p = xnheap_alloc(heap, size); + if (p) + memset(p, 0, size); + + return p; +} + +static inline char *xnstrdup(const char *s) +{ + char *p; + + p = xnmalloc(strlen(s) + 1); + if (p == NULL) + return NULL; + + return strcpy(p, s); +} + +#ifdef CONFIG_XENO_OPT_VFILE +void xnheap_init_proc(void); +void xnheap_cleanup_proc(void); +#else /* !CONFIG_XENO_OPT_VFILE */ +static inline void xnheap_init_proc(void) { } +static inline void xnheap_cleanup_proc(void) { } +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_HEAP_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h new file mode 100644 index 0000000..41dd531 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_INIT_H +#define _COBALT_KERNEL_INIT_H + +#include <linux/atomic.h> +#include <linux/notifier.h> +#include <cobalt/uapi/corectl.h> + +extern atomic_t cobalt_runstate; + +static inline enum cobalt_run_states realtime_core_state(void) +{ + return atomic_read(&cobalt_runstate); +} + +static inline int realtime_core_enabled(void) +{ + return atomic_read(&cobalt_runstate) != COBALT_STATE_DISABLED; +} + +static inline int realtime_core_running(void) +{ + return atomic_read(&cobalt_runstate) == COBALT_STATE_RUNNING; +} + +static inline void set_realtime_core_state(enum cobalt_run_states state) +{ + atomic_set(&cobalt_runstate, state); +} + +void cobalt_add_state_chain(struct notifier_block *nb); + +void cobalt_remove_state_chain(struct notifier_block *nb); + +void cobalt_call_state_chain(enum cobalt_run_states newstate); + +#endif /* !_COBALT_KERNEL_INIT_H_ */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h new file mode 100644 index 0000000..393ad96 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_INTR_H +#define _COBALT_KERNEL_INTR_H + +#include <linux/spinlock.h> +#include <cobalt/kernel/stat.h> +#include <pipeline/irq.h> + +/** + * @addtogroup cobalt_core_irq + * @{ + */ + +/* Possible return values of a handler. */ +#define XN_IRQ_NONE 0x1 +#define XN_IRQ_HANDLED 0x2 +#define XN_IRQ_STATMASK (XN_IRQ_NONE|XN_IRQ_HANDLED) +#define XN_IRQ_PROPAGATE 0x100 +#define XN_IRQ_DISABLE 0x200 + +/* Init flags. */ +#define XN_IRQTYPE_SHARED 0x1 +#define XN_IRQTYPE_EDGE 0x2 + +/* Status bits. */ +#define XN_IRQSTAT_ATTACHED 0 +#define _XN_IRQSTAT_ATTACHED (1 << XN_IRQSTAT_ATTACHED) +#define XN_IRQSTAT_DISABLED 1 +#define _XN_IRQSTAT_DISABLED (1 << XN_IRQSTAT_DISABLED) + +struct xnintr; +struct xnsched; + +typedef int (*xnisr_t)(struct xnintr *intr); + +typedef void (*xniack_t)(unsigned irq, void *arg); + +struct xnirqstat { + /** Number of handled receipts since attachment. */ + xnstat_counter_t hits; + /** Runtime accounting entity */ + xnstat_exectime_t account; + /** Accumulated accounting entity */ + xnstat_exectime_t sum; +}; + +struct xnintr { +#ifdef CONFIG_XENO_OPT_SHIRQ + /** Next object in the IRQ-sharing chain. */ + struct xnintr *next; +#endif + /** Number of consequent unhandled interrupts */ + unsigned int unhandled; + /** Interrupt service routine. */ + xnisr_t isr; + /** User-defined cookie value. */ + void *cookie; + /** runtime status */ + unsigned long status; + /** Creation flags. */ + int flags; + /** IRQ number. */ + unsigned int irq; + /** Interrupt acknowledge routine. */ + xniack_t iack; + /** Symbolic name. */ + const char *name; + /** Descriptor maintenance lock. */ + raw_spinlock_t lock; +#ifdef CONFIG_XENO_OPT_STATS_IRQS + /** Statistics. */ + struct xnirqstat *stats; +#endif +}; + +struct xnintr_iterator { + int cpu; /** Current CPU in iteration. */ + unsigned long hits; /** Current hit counter. */ + xnticks_t exectime_period; /** Used CPU time in current accounting period. */ + xnticks_t account_period; /** Length of accounting period. */ + xnticks_t exectime_total; /** Overall CPU time consumed. */ + int list_rev; /** System-wide xnintr list revision (internal use). */ + struct xnintr *prev; /** Previously visited xnintr object (internal use). */ +}; + +void xnintr_core_clock_handler(void); + +void xnintr_host_tick(struct xnsched *sched); + + /* Public interface. */ + +int xnintr_init(struct xnintr *intr, + const char *name, + unsigned irq, + xnisr_t isr, + xniack_t iack, + int flags); + +void xnintr_destroy(struct xnintr *intr); + +int xnintr_attach(struct xnintr *intr, + void *cookie, const cpumask_t *cpumask); + +void xnintr_detach(struct xnintr *intr); + +void xnintr_enable(struct xnintr *intr); + +void xnintr_disable(struct xnintr *intr); + +int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask); + +#ifdef CONFIG_XENO_OPT_STATS_IRQS + +int xnintr_query_init(struct xnintr_iterator *iterator); + +int xnintr_get_query_lock(void); + +void xnintr_put_query_lock(void); + +int xnintr_query_next(int irq, struct xnintr_iterator *iterator, + char *name_buf); + +#else /* !CONFIG_XENO_OPT_STATS_IRQS */ + +static inline int xnintr_query_init(struct xnintr_iterator *iterator) +{ + return 0; +} + +static inline int xnintr_get_query_lock(void) +{ + return 0; +} + +static inline void xnintr_put_query_lock(void) {} +#endif /* !CONFIG_XENO_OPT_STATS_IRQS */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_INTR_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h new file mode 100644 index 0000000..a06d1aa --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_CLOCK_H +#define _COBALT_KERNEL_IPIPE_CLOCK_H + +#include <linux/ipipe_tickdev.h> +#include <cobalt/uapi/kernel/types.h> + +struct timespec64; + +static inline u64 pipeline_read_cycle_counter(void) +{ + u64 t; + ipipe_read_tsc(t); + return t; +} + +xnticks_t pipeline_read_wallclock(void); + +int pipeline_set_wallclock(xnticks_t epoch_ns); + +static inline void pipeline_set_timer_shot(unsigned long cycles) +{ + ipipe_timer_set(cycles); +} + +static inline const char *pipeline_timer_name(void) +{ + return ipipe_timer_name(); +} + +static inline const char *pipeline_clock_name(void) +{ + return ipipe_clock_name(); +} + +int pipeline_get_host_time(struct timespec64 *tp); + +void pipeline_update_clock_freq(unsigned long long freq); + +void pipeline_init_clock(void); + +#endif /* !_COBALT_KERNEL_IPIPE_CLOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h new file mode 100644 index 0000000..12ef07b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_IPIPE_INBAND_WORK_H +#define _COBALT_KERNEL_IPIPE_INBAND_WORK_H + +#include <linux/ipipe.h> + +/* + * This field must be named inband_work and appear first in the + * container work struct. + */ +struct pipeline_inband_work { + struct ipipe_work_header work; +}; + +#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler) \ + { \ + .work = { \ + .size = sizeof(__work), \ + .handler = (void (*)(struct ipipe_work_header *)) \ + __handler, \ + }, \ + } + +#define pipeline_post_inband_work(__work) \ + ipipe_post_work_root(__work, inband_work.work) + +#endif /* !_COBALT_KERNEL_IPIPE_INBAND_WORK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h new file mode 100644 index 0000000..a2db772 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_IRQ_H +#define _COBALT_KERNEL_IPIPE_IRQ_H + +void xnintr_init_proc(void); + +void xnintr_cleanup_proc(void); + +int xnintr_mount(void); + +#endif /* !_COBALT_KERNEL_IPIPE_IRQ_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h new file mode 100644 index 0000000..f3f0c2f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_IPIPE_KEVENTS_H +#define _COBALT_KERNEL_IPIPE_KEVENTS_H + +#define KEVENT_PROPAGATE 0 +#define KEVENT_STOP 1 + +struct cobalt_process; +struct cobalt_thread; + +static inline +int pipeline_attach_process(struct cobalt_process *process) +{ + return 0; +} + +static inline +void pipeline_detach_process(struct cobalt_process *process) +{ } + +int pipeline_prepare_current(void); + +void pipeline_attach_current(struct xnthread *thread); + +int pipeline_trap_kevents(void); + +void pipeline_enable_kevents(void); + +void pipeline_cleanup_process(void); + +#endif /* !_COBALT_KERNEL_IPIPE_KEVENTS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h new file mode 100644 index 0000000..f33b041 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_LOCK_H +#define _COBALT_KERNEL_IPIPE_LOCK_H + +#include <pipeline/pipeline.h> + +typedef ipipe_spinlock_t pipeline_spinlock_t; + +#define PIPELINE_SPIN_LOCK_UNLOCKED(__name) IPIPE_SPIN_LOCK_UNLOCKED + +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING +/* Disable UP-over-SMP kernel optimization in debug mode. */ +#define __locking_active__ 1 +#else +#define __locking_active__ ipipe_smp_p +#endif + +#endif /* !_COBALT_KERNEL_IPIPE_LOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h new file mode 100644 index 0000000..062722a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_IPIPE_MACHINE_H +#define _COBALT_KERNEL_IPIPE_MACHINE_H + +#include <linux/ipipe.h> +#include <linux/percpu.h> + +#ifdef CONFIG_IPIPE_TRACE +#define boot_lat_trace_notice "[LTRACE]" +#else +#define boot_lat_trace_notice "" +#endif + +struct vm_area_struct; + +struct cobalt_machine { + const char *name; + int (*init)(void); + int (*late_init)(void); + void (*cleanup)(void); + void (*prefault)(struct vm_area_struct *vma); + const char *const *fault_labels; +}; + +extern struct cobalt_machine cobalt_machine; + +struct cobalt_machine_cpudata { + unsigned int faults[IPIPE_NR_FAULTS]; +}; + +DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata); + +struct cobalt_pipeline { + struct ipipe_domain domain; + unsigned long clock_freq; + unsigned int escalate_virq; +#ifdef CONFIG_SMP + cpumask_t supported_cpus; +#endif +}; + +int pipeline_init(void); + +int pipeline_late_init(void); + +void pipeline_cleanup(void); + +extern struct cobalt_pipeline cobalt_pipeline; + +#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h new file mode 100644 index 0000000..ac9c92b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_PIPELINE_H +#define _COBALT_KERNEL_IPIPE_PIPELINE_H + +#ifdef CONFIG_IPIPE_LEGACY +#error "CONFIG_IPIPE_LEGACY must be switched off" +#endif + +#include <pipeline/machine.h> +#include <asm/xenomai/features.h> +#include <asm/xenomai/syscall.h> + +#define xnsched_primary_domain cobalt_pipeline.domain + +#define PIPELINE_NR_IRQS IPIPE_NR_IRQS + +typedef unsigned long spl_t; + +#define splhigh(x) ((x) = ipipe_test_and_stall_head() & 1) +#ifdef CONFIG_SMP +#define splexit(x) ipipe_restore_head(x & 1) +#else /* !CONFIG_SMP */ +#define splexit(x) ipipe_restore_head(x) +#endif /* !CONFIG_SMP */ +#define splmax() ipipe_stall_head() +#define splnone() ipipe_unstall_head() +#define spltest() ipipe_test_head() + +#define is_secondary_domain() ipipe_root_p +#define is_primary_domain() (!ipipe_root_p) + +#ifdef CONFIG_SMP + +static inline int pipeline_request_resched_ipi(void (*handler)(void)) +{ + return ipipe_request_irq(&cobalt_pipeline.domain, + IPIPE_RESCHEDULE_IPI, + (ipipe_irq_handler_t)handler, + NULL, NULL); +} + +static inline void pipeline_free_resched_ipi(void) +{ + ipipe_free_irq(&cobalt_pipeline.domain, + IPIPE_RESCHEDULE_IPI); +} + +static inline void pipeline_send_resched_ipi(const struct cpumask *dest) +{ + ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, *dest); +} + +static inline void pipeline_send_timer_ipi(const struct cpumask *dest) +{ + ipipe_send_ipi(IPIPE_HRTIMER_IPI, *dest); +} + +#else /* !CONFIG_SMP */ + +static inline int pipeline_request_resched_ipi(void (*handler)(void)) +{ + return 0; +} + + +static inline void pipeline_free_resched_ipi(void) +{ +} + +#endif /* CONFIG_SMP */ + +static inline void pipeline_prepare_panic(void) +{ + ipipe_prepare_panic(); +} + +static inline void pipeline_collect_features(struct cobalt_featinfo *f) +{ + f->clock_freq = cobalt_pipeline.clock_freq; +} + +static inline void pipeline_get_syscall_args(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + *args++ = __xn_reg_arg1(regs); + *args++ = __xn_reg_arg2(regs); + *args++ = __xn_reg_arg3(regs); + *args++ = __xn_reg_arg4(regs); + *args = __xn_reg_arg5(regs); +} + +#endif /* !_COBALT_KERNEL_IPIPE_PIPELINE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h new file mode 100644 index 0000000..9d7bf88 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_IPIPE_SCHED_H +#define _COBALT_KERNEL_IPIPE_SCHED_H + +#include <cobalt/kernel/lock.h> + +struct xnthread; +struct xnsched; +struct task_struct; + +void pipeline_init_shadow_tcb(struct xnthread *thread); + +void pipeline_init_root_tcb(struct xnthread *thread); + +int pipeline_schedule(struct xnsched *sched); + +void pipeline_prep_switch_oob(struct xnthread *root); + +bool pipeline_switch_to(struct xnthread *prev, + struct xnthread *next, + bool leaving_inband); + +int pipeline_leave_inband(void); + +int pipeline_leave_oob_prepare(void); + +static inline void pipeline_leave_oob_unlock(void) +{ + /* + * Introduce an opportunity for interrupt delivery right + * before switching context, which shortens the + * uninterruptible code path. + * + * We have to shut irqs off before __xnsched_run() is called + * next though: if an interrupt could preempt us right after + * xnarch_escalate() is passed but before the nklock is + * grabbed, we would enter the critical section in + * ___xnsched_run() from the root domain, which would defeat + * the purpose of escalating the request. + */ + xnlock_clear_irqon(&nklock); + splmax(); +} + +void pipeline_leave_oob_finish(void); + +void pipeline_finalize_thread(struct xnthread *thread); + +void pipeline_raise_mayday(struct task_struct *tsk); + +void pipeline_clear_mayday(void); + +#endif /* !_COBALT_KERNEL_IPIPE_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h new file mode 100644 index 0000000..1a16776 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_IPIPE_SIRQ_H +#define _COBALT_KERNEL_IPIPE_SIRQ_H + +#include <linux/ipipe.h> +#include <pipeline/machine.h> + +/* + * Wrappers to create "synthetic IRQs" the I-pipe way (used to be + * called "virtual IRQs" there). Those interrupt channels can only be + * triggered by software; they have per-CPU semantics. We use them to + * schedule handlers to be run on the in-band execution stage, meaning + * "secondary mode" in the Cobalt jargon. + */ + +static inline +int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id)) +{ + int sirq, ret; + + sirq = ipipe_alloc_virq(); + if (sirq == 0) + return -EAGAIN; + + /* + * ipipe_irq_handler_t is close enough to the signature of a + * regular IRQ handler: use the latter in the generic code + * shared with Dovetail. The extraneous return code will be + * ignored by the I-pipe core. + */ + ret = ipipe_request_irq(ipipe_root_domain, sirq, + (ipipe_irq_handler_t)handler, + NULL, NULL); + if (ret) { + ipipe_free_virq(sirq); + return ret; + } + + return sirq; +} + +static inline +void pipeline_delete_inband_sirq(int sirq) +{ + ipipe_free_irq(ipipe_root_domain, sirq); + ipipe_free_virq(sirq); +} + +static inline void pipeline_post_sirq(int sirq) +{ + ipipe_post_irq_root(sirq); +} + +#endif /* !_COBALT_KERNEL_IPIPE_SIRQ_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h new file mode 100644 index 0000000..30a8853 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_KERNEL_IPIPE_THREAD_H +#define _COBALT_KERNEL_IPIPE_THREAD_H + +#include <linux/ipipe.h> +#include <linux/sched.h> + +struct xnthread; + +#define cobalt_threadinfo ipipe_threadinfo + +static inline struct cobalt_threadinfo *pipeline_current(void) +{ + return ipipe_current_threadinfo(); +} + +static inline struct xnthread *pipeline_thread_from_task(struct task_struct *p) +{ + return ipipe_task_threadinfo(p)->thread; +} + +#endif /* !_COBALT_KERNEL_IPIPE_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h new file mode 100644 index 0000000..41347f7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_TICK_H +#define _COBALT_KERNEL_IPIPE_TICK_H + +int pipeline_install_tick_proxy(void); + +void pipeline_uninstall_tick_proxy(void); + +struct xnsched; +static inline bool pipeline_must_force_program_tick(struct xnsched *sched) +{ + return false; +} + +#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h new file mode 100644 index 0000000..a28b83a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_IPIPE_TRACE_H +#define _COBALT_KERNEL_IPIPE_TRACE_H + +#include <linux/types.h> +#include <linux/kconfig.h> +#include <linux/ipipe_trace.h> +#include <cobalt/uapi/kernel/trace.h> + +static inline int xntrace_max_begin(unsigned long v) +{ + ipipe_trace_begin(v); + return 0; +} + +static inline int xntrace_max_end(unsigned long v) +{ + ipipe_trace_end(v); + return 0; +} + +static inline int xntrace_max_reset(void) +{ + ipipe_trace_max_reset(); + return 0; +} + +static inline int xntrace_user_start(void) +{ + return ipipe_trace_frozen_reset(); +} + +static inline int xntrace_user_stop(unsigned long v) +{ + ipipe_trace_freeze(v); + return 0; +} + +static inline int xntrace_user_freeze(unsigned long v, int once) +{ + int ret = 0; + + if (!once) + ret = ipipe_trace_frozen_reset(); + + ipipe_trace_freeze(v); + + return ret; +} + +static inline void xntrace_latpeak_freeze(int delay) +{ + xntrace_user_freeze(delay, 0); +} + +static inline int xntrace_special(unsigned char id, unsigned long v) +{ + ipipe_trace_special(id, v); + return 0; +} + +static inline int xntrace_special_u64(unsigned char id, + unsigned long long v) +{ + ipipe_trace_special(id, (unsigned long)(v >> 32)); + ipipe_trace_special(id, (unsigned long)(v & 0xFFFFFFFF)); + return 0; +} + +static inline int xntrace_pid(pid_t pid, short prio) +{ + ipipe_trace_pid(pid, prio); + return 0; +} + +static inline int xntrace_tick(unsigned long delay_ticks) +{ + ipipe_trace_event(0, delay_ticks); + return 0; +} + +static inline int xntrace_panic_freeze(void) +{ + ipipe_trace_panic_freeze(); + return 0; +} + +static inline int xntrace_panic_dump(void) +{ + ipipe_trace_panic_dump(); + return 0; +} + +static inline bool xntrace_enabled(void) +{ + return IS_ENABLED(CONFIG_IPIPE_TRACE); +} + +#endif /* !_COBALT_KERNEL_IPIPE_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h new file mode 100644 index 0000000..f9ea388 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) Siemens AG, 2021 + */ + +#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H +#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H + +static __always_inline bool +pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs) +{ + return false; +} + +#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h new file mode 100644 index 0000000..dcf021e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef _COBALT_KERNEL_IPIPE_WRAPPERS_H +#define _COBALT_KERNEL_IPIPE_WRAPPERS_H + +#include <linux/ipipe.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) +#define cobalt_set_task_state(tsk, state_value) \ + set_task_state(tsk, state_value) +#else +/* + * The co-kernel can still set the current task state safely if it + * runs on the head stage. + */ +#define cobalt_set_task_state(tsk, state_value) \ + smp_store_mb((tsk)->state, (state_value)) +#endif + +#ifndef ipipe_root_nr_syscalls +#define ipipe_root_nr_syscalls(ti) NR_syscalls +#endif + +#endif /* !_COBALT_KERNEL_IPIPE_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h new file mode 100644 index 0000000..ec029ef --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_LIST_H +#define _COBALT_KERNEL_LIST_H + +#include <linux/list.h> + +#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop) \ +do { \ + typeof(*__new) *__pos; \ + if (list_empty(__head)) \ + list_add(&(__new)->__member_next, __head); \ + else { \ + list_for_each_entry_reverse(__pos, __head, __member_next) { \ + if ((__new)->__member_pri __relop __pos->__member_pri) \ + break; \ + } \ + list_add(&(__new)->__member_next, &__pos->__member_next); \ + } \ +} while (0) + +#define list_add_priff(__new, __head, __member_pri, __member_next) \ + __list_add_pri(__new, __head, __member_pri, __member_next, <=) + +#define list_add_prilf(__new, __head, __member_pri, __member_next) \ + __list_add_pri(__new, __head, __member_pri, __member_next, <) + +#define list_get_entry(__head, __type, __member) \ + ({ \ + __type *__item; \ + __item = list_first_entry(__head, __type, __member); \ + list_del(&__item->__member); \ + __item; \ + }) + +#define list_get_entry_init(__head, __type, __member) \ + ({ \ + __type *__item; \ + __item = list_first_entry(__head, __type, __member); \ + list_del_init(&__item->__member); \ + __item; \ + }) + +#ifndef list_next_entry +#define list_next_entry(__item, __member) \ + list_entry((__item)->__member.next, typeof(*(__item)), __member) +#endif + +#endif /* !_COBALT_KERNEL_LIST_H_ */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h new file mode 100644 index 0000000..185f6e7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h @@ -0,0 +1,248 @@ +/* + * Copyright (C) 2001-2008,2012 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_LOCK_H +#define _COBALT_KERNEL_LOCK_H + +#include <pipeline/lock.h> +#include <linux/percpu.h> +#include <cobalt/kernel/assert.h> +#include <pipeline/pipeline.h> + +/** + * @addtogroup cobalt_core_lock + * + * @{ + */ +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING + +struct xnlock { + unsigned owner; + arch_spinlock_t alock; + const char *file; + const char *function; + unsigned int line; + int cpu; + unsigned long long spin_time; + unsigned long long lock_date; +}; + +struct xnlockinfo { + unsigned long long spin_time; + unsigned long long lock_time; + const char *file; + const char *function; + unsigned int line; +}; + +#define XNARCH_LOCK_UNLOCKED (struct xnlock) { \ + ~0, \ + __ARCH_SPIN_LOCK_UNLOCKED, \ + NULL, \ + NULL, \ + 0, \ + -1, \ + 0LL, \ + 0LL, \ +} + +#define XNLOCK_DBG_CONTEXT , __FILE__, __LINE__, __FUNCTION__ +#define XNLOCK_DBG_CONTEXT_ARGS \ + , const char *file, int line, const char *function +#define XNLOCK_DBG_PASS_CONTEXT , file, line, function + +void xnlock_dbg_prepare_acquire(unsigned long long *start); +void xnlock_dbg_prepare_spin(unsigned int *spin_limit); +void xnlock_dbg_acquired(struct xnlock *lock, int cpu, + unsigned long long *start, + const char *file, int line, + const char *function); +int xnlock_dbg_release(struct xnlock *lock, + const char *file, int line, + const char *function); + +DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats); + +#else /* !CONFIG_XENO_OPT_DEBUG_LOCKING */ + +struct xnlock { + unsigned owner; + arch_spinlock_t alock; +}; + +#define XNARCH_LOCK_UNLOCKED \ + (struct xnlock) { \ + ~0, \ + __ARCH_SPIN_LOCK_UNLOCKED, \ + } + +#define XNLOCK_DBG_CONTEXT +#define XNLOCK_DBG_CONTEXT_ARGS +#define XNLOCK_DBG_PASS_CONTEXT + +static inline +void xnlock_dbg_prepare_acquire(unsigned long long *start) +{ +} + +static inline +void xnlock_dbg_prepare_spin(unsigned int *spin_limit) +{ +} + +static inline void +xnlock_dbg_acquired(struct xnlock *lock, int cpu, + unsigned long long *start) +{ +} + +static inline int xnlock_dbg_release(struct xnlock *lock) +{ + return 0; +} + +#endif /* !CONFIG_XENO_OPT_DEBUG_LOCKING */ + +#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING) + +#define xnlock_get(lock) __xnlock_get(lock XNLOCK_DBG_CONTEXT) +#define xnlock_put(lock) __xnlock_put(lock XNLOCK_DBG_CONTEXT) +#define xnlock_get_irqsave(lock,x) \ + ((x) = __xnlock_get_irqsave(lock XNLOCK_DBG_CONTEXT)) +#define xnlock_put_irqrestore(lock,x) \ + __xnlock_put_irqrestore(lock,x XNLOCK_DBG_CONTEXT) +#define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock, 1) +#define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock, 0) + +static inline void xnlock_init (struct xnlock *lock) +{ + *lock = XNARCH_LOCK_UNLOCKED; +} + +#define DECLARE_XNLOCK(lock) struct xnlock lock +#define DECLARE_EXTERN_XNLOCK(lock) extern struct xnlock lock +#define DEFINE_XNLOCK(lock) struct xnlock lock = XNARCH_LOCK_UNLOCKED +#define DEFINE_PRIVATE_XNLOCK(lock) static DEFINE_XNLOCK(lock) + +static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + int cpu = raw_smp_processor_id(); + unsigned long long start; + + if (lock->owner == cpu) + return 2; + + xnlock_dbg_prepare_acquire(&start); + + arch_spin_lock(&lock->alock); + lock->owner = cpu; + + xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT); + + return 0; +} + +static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT)) + return; + + lock->owner = ~0U; + arch_spin_unlock(&lock->alock); +} + +#ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK +#define ___xnlock_get ____xnlock_get +#define ___xnlock_put ____xnlock_put +#else /* out of line xnlock */ +int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS); + +void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS); +#endif /* out of line xnlock */ + +static inline spl_t +__xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + unsigned long flags; + + splhigh(flags); + + if (__locking_active__) + flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT); + + return flags; +} + +static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags + /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + /* Only release the lock if we didn't take it recursively. */ + if (__locking_active__ && !(flags & 2)) + ___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT); + + splexit(flags & 1); +} + +static inline int xnlock_is_owner(struct xnlock *lock) +{ + if (__locking_active__) + return lock->owner == raw_smp_processor_id(); + + return 1; +} + +static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + if (__locking_active__) + return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT); + + return 0; +} + +static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + if (__locking_active__) + ___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT); +} + +#undef __locking_active__ + +#else /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */ + +#define xnlock_init(lock) do { } while(0) +#define xnlock_get(lock) do { } while(0) +#define xnlock_put(lock) do { } while(0) +#define xnlock_get_irqsave(lock,x) splhigh(x) +#define xnlock_put_irqrestore(lock,x) splexit(x) +#define xnlock_clear_irqoff(lock) splmax() +#define xnlock_clear_irqon(lock) splnone() +#define xnlock_is_owner(lock) 1 + +#define DECLARE_XNLOCK(lock) +#define DECLARE_EXTERN_XNLOCK(lock) +#define DEFINE_XNLOCK(lock) +#define DEFINE_PRIVATE_XNLOCK(lock) + +#endif /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */ + +DECLARE_EXTERN_XNLOCK(nklock); + +/** @} */ + +#endif /* !_COBALT_KERNEL_LOCK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h new file mode 100644 index 0000000..a402df5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_MAP_H +#define _COBALT_KERNEL_MAP_H + +#include <asm/bitsperlong.h> + +/** + * @addtogroup cobalt_core_map + * @{ + */ + +#define XNMAP_MAX_KEYS (BITS_PER_LONG * BITS_PER_LONG) + +struct xnmap { + int nkeys; + int ukeys; + int offset; + unsigned long himask; + unsigned long himap; +#define __IDMAP_LONGS ((XNMAP_MAX_KEYS+BITS_PER_LONG-1)/BITS_PER_LONG) + unsigned long lomap[__IDMAP_LONGS]; +#undef __IDMAP_LONGS + void *objarray[1]; +}; + +struct xnmap *xnmap_create(int nkeys, + int reserve, + int offset); + +void xnmap_delete(struct xnmap *map); + +int xnmap_enter(struct xnmap *map, + int key, + void *objaddr); + +int xnmap_remove(struct xnmap *map, + int key); + +static inline void *xnmap_fetch_nocheck(struct xnmap *map, int key) +{ + int ofkey = key - map->offset; + return map->objarray[ofkey]; +} + +static inline void *xnmap_fetch(struct xnmap *map, int key) +{ + int ofkey = key - map->offset; + + if (ofkey < 0 || ofkey >= map->nkeys) + return NULL; + + return map->objarray[ofkey]; +} + +/** @} */ + +#endif /* !_COBALT_KERNEL_MAP_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h new file mode 100644 index 0000000..8a82c7b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA + * 02139, USA; either version 2 of the License, or (at your option) + * any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_PIPE_H +#define _COBALT_KERNEL_PIPE_H + +#include <linux/types.h> +#include <linux/poll.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/uapi/kernel/pipe.h> + +#define XNPIPE_NDEVS CONFIG_XENO_OPT_PIPE_NRDEV +#define XNPIPE_DEV_MAJOR 150 + +#define XNPIPE_KERN_CONN 0x1 +#define XNPIPE_KERN_LCLOSE 0x2 +#define XNPIPE_USER_CONN 0x4 +#define XNPIPE_USER_SIGIO 0x8 +#define XNPIPE_USER_WREAD 0x10 +#define XNPIPE_USER_WREAD_READY 0x20 +#define XNPIPE_USER_WSYNC 0x40 +#define XNPIPE_USER_WSYNC_READY 0x80 +#define XNPIPE_USER_LCONN 0x100 + +#define XNPIPE_USER_ALL_WAIT \ +(XNPIPE_USER_WREAD|XNPIPE_USER_WSYNC) + +#define XNPIPE_USER_ALL_READY \ +(XNPIPE_USER_WREAD_READY|XNPIPE_USER_WSYNC_READY) + +struct xnpipe_mh { + size_t size; + size_t rdoff; + struct list_head link; +}; + +struct xnpipe_state; + +struct xnpipe_operations { + void (*output)(struct xnpipe_mh *mh, void *xstate); + int (*input)(struct xnpipe_mh *mh, int retval, void *xstate); + void *(*alloc_ibuf)(size_t size, void *xstate); + void (*free_ibuf)(void *buf, void *xstate); + void (*free_obuf)(void *buf, void *xstate); + void (*release)(void *xstate); +}; + +struct xnpipe_state { + struct list_head slink; /* Link on sleep queue */ + struct list_head alink; /* Link on async queue */ + + struct list_head inq; /* From user-space to kernel */ + int nrinq; + struct list_head outq; /* From kernel to user-space */ + int nroutq; + struct xnsynch synchbase; + struct xnpipe_operations ops; + void *xstate; /* Extra state managed by caller */ + + /* Linux kernel part */ + unsigned long status; + struct fasync_struct *asyncq; + wait_queue_head_t readq; /* open/read/poll waiters */ + wait_queue_head_t syncq; /* sync waiters */ + int wcount; /* number of waiters on this minor */ + size_t ionrd; +}; + +extern struct xnpipe_state xnpipe_states[]; + +#define xnminor_from_state(s) (s - xnpipe_states) + +#ifdef CONFIG_XENO_OPT_PIPE +int xnpipe_mount(void); +void xnpipe_umount(void); +#else /* !CONFIG_XENO_OPT_PIPE */ +static inline int xnpipe_mount(void) { return 0; } +static inline void xnpipe_umount(void) { } +#endif /* !CONFIG_XENO_OPT_PIPE */ + +/* Entry points of the kernel interface. */ + +int xnpipe_connect(int minor, + struct xnpipe_operations *ops, void *xstate); + +int xnpipe_disconnect(int minor); + +ssize_t xnpipe_send(int minor, + struct xnpipe_mh *mh, size_t size, int flags); + +ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size); + +ssize_t xnpipe_recv(int minor, + struct xnpipe_mh **pmh, xnticks_t timeout); + +int xnpipe_flush(int minor, int mode); + +int xnpipe_pollstate(int minor, unsigned int *mask_r); + +static inline unsigned int __xnpipe_pollstate(int minor) +{ + struct xnpipe_state *state = xnpipe_states + minor; + unsigned int mask = POLLOUT; + + if (!list_empty(&state->inq)) + mask |= POLLIN; + + return mask; +} + +static inline char *xnpipe_m_data(struct xnpipe_mh *mh) +{ + return (char *)(mh + 1); +} + +#define xnpipe_m_size(mh) ((mh)->size) + +#define xnpipe_m_rdoff(mh) ((mh)->rdoff) + +#endif /* !_COBALT_KERNEL_PIPE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h new file mode 100644 index 0000000..f0079fe --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h @@ -0,0 +1,42 @@ +/* + * Copyright © 2006 Gilles Chanteperdrix <gch@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_PPD_H +#define _COBALT_KERNEL_PPD_H + +#include <linux/types.h> +#include <linux/atomic.h> +#include <linux/rbtree.h> +#include <cobalt/kernel/heap.h> + +struct cobalt_umm { + struct xnheap heap; + atomic_t refcount; + void (*release)(struct cobalt_umm *umm); +}; + +struct cobalt_ppd { + struct cobalt_umm umm; + atomic_t refcnt; + char *exe_path; + struct rb_root fds; +}; + +extern struct cobalt_ppd cobalt_kernel_ppd; + +#endif /* _COBALT_KERNEL_PPD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h new file mode 100644 index 0000000..a459da5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_REGISTRY_H +#define _COBALT_KERNEL_REGISTRY_H + +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/vfile.h> + +/** + * @addtogroup cobalt_core_registry + * + * @{ + */ +struct xnpnode; + +struct xnobject { + void *objaddr; + const char *key; /* !< Hash key. May be NULL if anonynous. */ + unsigned long cstamp; /* !< Creation stamp. */ +#ifdef CONFIG_XENO_OPT_VFILE + struct xnpnode *pnode; /* !< v-file information class. */ + union { + struct { + struct xnvfile_rev_tag tag; + struct xnvfile_snapshot file; + } vfsnap; /* !< virtual snapshot file. */ + struct xnvfile_regular vfreg; /* !< virtual regular file */ + struct xnvfile_link link; /* !< virtual link. */ + } vfile_u; + struct xnvfile *vfilp; +#endif /* CONFIG_XENO_OPT_VFILE */ + struct hlist_node hlink; /* !< Link in h-table */ + struct list_head link; +}; + +int xnregistry_init(void); + +void xnregistry_cleanup(void); + +#ifdef CONFIG_XENO_OPT_VFILE + +#define XNOBJECT_EXPORT_SCHEDULED ((struct xnvfile *)1L) +#define XNOBJECT_EXPORT_INPROGRESS ((struct xnvfile *)2L) +#define XNOBJECT_EXPORT_ABORTED ((struct xnvfile *)3L) + +struct xnptree { + const char *dirname; + /* hidden */ + int entries; + struct xnvfile_directory vdir; +}; + +#define DEFINE_XNPTREE(__var, __name) \ + struct xnptree __var = { \ + .dirname = __name, \ + .entries = 0, \ + .vdir = xnvfile_nodir, \ + } + +struct xnpnode_ops { + int (*export)(struct xnobject *object, struct xnpnode *pnode); + void (*unexport)(struct xnobject *object, struct xnpnode *pnode); + void (*touch)(struct xnobject *object); +}; + +struct xnpnode { + const char *dirname; + struct xnptree *root; + struct xnpnode_ops *ops; + /* hidden */ + int entries; + struct xnvfile_directory vdir; +}; + +struct xnpnode_snapshot { + struct xnpnode node; + struct xnvfile_snapshot_template vfile; +}; + +struct xnpnode_regular { + struct xnpnode node; + struct xnvfile_regular_template vfile; +}; + +struct xnpnode_link { + struct xnpnode node; + char *(*target)(void *obj); +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +#define DEFINE_XNPTREE(__var, __name); + +/* Placeholders. */ + +struct xnpnode { + const char *dirname; +}; + +struct xnpnode_snapshot { + struct xnpnode node; +}; + +struct xnpnode_regular { + struct xnpnode node; +}; + +struct xnpnode_link { + struct xnpnode node; +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/* Public interface. */ + +extern struct xnobject *registry_obj_slots; + +static inline struct xnobject *xnregistry_validate(xnhandle_t handle) +{ + struct xnobject *object; + /* + * Careful: a removed object which is still in flight to be + * unexported carries a NULL objaddr, so we have to check this + * as well. + */ + handle = xnhandle_get_index(handle); + if (likely(handle && handle < CONFIG_XENO_OPT_REGISTRY_NRSLOTS)) { + object = ®istry_obj_slots[handle]; + return object->objaddr ? object : NULL; + } + + return NULL; +} + +static inline const char *xnregistry_key(xnhandle_t handle) +{ + struct xnobject *object = xnregistry_validate(handle); + return object ? object->key : NULL; +} + +int xnregistry_enter(const char *key, + void *objaddr, + xnhandle_t *phandle, + struct xnpnode *pnode); + +static inline int +xnregistry_enter_anon(void *objaddr, xnhandle_t *phandle) +{ + return xnregistry_enter(NULL, objaddr, phandle, NULL); +} + +int xnregistry_bind(const char *key, + xnticks_t timeout, + int timeout_mode, + xnhandle_t *phandle); + +int xnregistry_remove(xnhandle_t handle); + +static inline +void *xnregistry_lookup(xnhandle_t handle, + unsigned long *cstamp_r) +{ + struct xnobject *object = xnregistry_validate(handle); + + if (object == NULL) + return NULL; + + if (cstamp_r) + *cstamp_r = object->cstamp; + + return object->objaddr; +} + +int xnregistry_unlink(const char *key); + +unsigned xnregistry_hash_size(void); + +extern struct xnpnode_ops xnregistry_vfsnap_ops; + +extern struct xnpnode_ops xnregistry_vlink_ops; + +extern struct xnpnode_ops xnregistry_vfreg_ops; + +/** @} */ + +#endif /* !_COBALT_KERNEL_REGISTRY_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am new file mode 100644 index 0000000..fe2e4d5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am @@ -0,0 +1,17 @@ + +noinst_HEADERS = \ + autotune.h \ + can.h \ + cobalt.h \ + compat.h \ + driver.h \ + fd.h \ + gpio.h \ + ipc.h \ + net.h \ + rtdm.h \ + serial.h \ + testing.h \ + udd.h + +SUBDIRS = analogy diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am new file mode 100644 index 0000000..9b2b34f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am @@ -0,0 +1,12 @@ + +noinst_HEADERS = \ + buffer.h \ + channel_range.h \ + command.h \ + context.h \ + device.h \ + driver.h \ + instruction.h \ + rtdm_helpers.h \ + subdevice.h \ + transfer.h diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h new file mode 100644 index 0000000..e1a0cc9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h @@ -0,0 +1,461 @@ +/* + * Analogy for Linux, buffer related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H +#define _COBALT_RTDM_ANALOGY_BUFFER_H + +#include <linux/version.h> +#include <linux/mm.h> +#include <rtdm/driver.h> +#include <rtdm/uapi/analogy.h> +#include <rtdm/analogy/rtdm_helpers.h> +#include <rtdm/analogy/context.h> +#include <rtdm/analogy/command.h> +#include <rtdm/analogy/subdevice.h> + +/* --- Events bits / flags --- */ + +#define A4L_BUF_EOBUF_NR 0 +#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR) + +#define A4L_BUF_ERROR_NR 1 +#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR) + +#define A4L_BUF_EOA_NR 2 +#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR) + +/* --- Status bits / flags --- */ + +#define A4L_BUF_BULK_NR 8 +#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR) + +#define A4L_BUF_MAP_NR 9 +#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR) + + +/* Buffer descriptor structure */ +struct a4l_buffer { + + /* Added by the structure update */ + struct a4l_subdevice *subd; + + /* Buffer's first virtual page pointer */ + void *buf; + + /* Buffer's global size */ + unsigned long size; + /* Tab containing buffer's pages pointers */ + unsigned long *pg_list; + + /* RT/NRT synchronization element */ + struct a4l_sync sync; + + /* Counters needed for transfer */ + unsigned long end_count; + unsigned long prd_count; + unsigned long cns_count; + unsigned long tmp_count; + + /* Status + events occuring during transfer */ + unsigned long flags; + + /* Command on progress */ + struct a4l_cmd_desc *cur_cmd; + + /* Munge counter */ + unsigned long mng_count; + + /* Theshold below which the user process should not be + awakened */ + unsigned long wake_count; +}; + +static inline void __dump_buffer_counters(struct a4l_buffer *buf) +{ + __a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf); + __a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n", + buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count); +} + +/* --- Static inline functions related with + user<->kernel data transfers --- */ + +/* The function __produce is an inline function which copies data into + the asynchronous buffer and takes care of the non-contiguous issue + when looping. This function is used in read and write operations */ +static inline int __produce(struct a4l_device_context *cxt, + struct a4l_buffer *buf, void *pin, unsigned long count) +{ + unsigned long start_ptr = (buf->prd_count % buf->size); + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + unsigned long tmp_cnt = count; + int ret = 0; + + while (ret == 0 && tmp_cnt != 0) { + /* Check the data copy can be performed contiguously */ + unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ? + buf->size - start_ptr : tmp_cnt; + + /* Perform the copy */ + if (cxt == NULL) + memcpy(buf->buf + start_ptr, pin, blk_size); + else + ret = rtdm_safe_copy_from_user(fd, + buf->buf + start_ptr, + pin, blk_size); + + /* Update pointers/counts */ + pin += blk_size; + tmp_cnt -= blk_size; + start_ptr = 0; + } + + return ret; +} + +/* The function __consume is an inline function which copies data from + the asynchronous buffer and takes care of the non-contiguous issue + when looping. This function is used in read and write operations */ +static inline int __consume(struct a4l_device_context *cxt, + struct a4l_buffer *buf, void *pout, unsigned long count) +{ + unsigned long start_ptr = (buf->cns_count % buf->size); + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + unsigned long tmp_cnt = count; + int ret = 0; + + while (ret == 0 && tmp_cnt != 0) { + /* Check the data copy can be performed contiguously */ + unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ? + buf->size - start_ptr : tmp_cnt; + + /* Perform the copy */ + if (cxt == NULL) + memcpy(pout, buf->buf + start_ptr, blk_size); + else + ret = rtdm_safe_copy_to_user(fd, + pout, + buf->buf + start_ptr, + blk_size); + + /* Update pointers/counts */ + pout += blk_size; + tmp_cnt -= blk_size; + start_ptr = 0; + } + + return ret; +} + +/* The function __munge is an inline function which calls the + subdevice specific munge callback on contiguous windows within the + whole buffer. This function is used in read and write operations */ +static inline void __munge(struct a4l_subdevice * subd, + void (*munge) (struct a4l_subdevice *, + void *, unsigned long), + struct a4l_buffer * buf, unsigned long count) +{ + unsigned long start_ptr = (buf->mng_count % buf->size); + unsigned long tmp_cnt = count; + + while (tmp_cnt != 0) { + /* Check the data copy can be performed contiguously */ + unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ? + buf->size - start_ptr : tmp_cnt; + + /* Perform the munge operation */ + munge(subd, buf->buf + start_ptr, blk_size); + + /* Update the start pointer and the count */ + tmp_cnt -= blk_size; + start_ptr = 0; + } +} + +/* The function __handle_event can only be called from process context + (not interrupt service routine). It allows the client process to + retrieve the buffer status which has been updated by the driver */ +static inline int __handle_event(struct a4l_buffer * buf) +{ + int ret = 0; + + /* The event "End of acquisition" must not be cleaned + before the complete flush of the buffer */ + if (test_bit(A4L_BUF_EOA_NR, &buf->flags)) + ret = -ENOENT; + + if (test_bit(A4L_BUF_ERROR_NR, &buf->flags)) + ret = -EPIPE; + + return ret; +} + +/* --- Counters management functions --- */ + +/* Here, we may wonder why we need more than two counters / pointers. + + Theoretically, we only need two counters (or two pointers): + - one which tells where the reader should be within the buffer + - one which tells where the writer should be within the buffer + + With these two counters (or pointers), we just have to check that + the writer does not overtake the reader inside the ring buffer + BEFORE any read / write operations. + + However, if one element is a DMA controller, we have to be more + careful. Generally a DMA transfer occurs like this: + DMA shot + |-> then DMA interrupt + |-> then DMA soft handler which checks the counter + + So, the checkings occur AFTER the write operations. + + Let's take an example: the reader is a software task and the writer + is a DMA controller. At the end of the DMA shot, the write counter + is higher than the read counter. Unfortunately, a read operation + occurs between the DMA shot and the DMA interrupt, so the handler + will not notice that an overflow occured. + + That is why tmp_count comes into play: tmp_count records the + read/consumer current counter before the next DMA shot and once the + next DMA shot is done, we check that the updated writer/producer + counter is not higher than tmp_count. Thus we are sure that the DMA + writer has not overtaken the reader because it was not able to + overtake the n-1 value. */ + +static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count) +{ + if (count - buf->tmp_count > buf->size) { + set_bit(A4L_BUF_ERROR_NR, &buf->flags); + return -EPIPE; + } + + buf->tmp_count = buf->cns_count; + + return 0; +} + +static inline int __pre_put(struct a4l_buffer * buf, unsigned long count) +{ + return __pre_abs_put(buf, buf->tmp_count + count); +} + +static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count) +{ + /* The first time, we expect the buffer to be properly filled + before the trigger occurence; by the way, we need tmp_count to + have been initialized and tmp_count is updated right here */ + if (buf->tmp_count == 0 || buf->cns_count == 0) + goto out; + + /* At the end of the acquisition, the user application has + written the defined amount of data into the buffer; so the + last time, the DMA channel can easily overtake the tmp + frontier because no more data were sent from user space; + therefore no useless alarm should be sent */ + if (buf->end_count != 0 && (long)(count - buf->end_count) > 0) + goto out; + + /* Once the exception are passed, we check that the DMA + transfer has not overtaken the last record of the production + count (tmp_count was updated with prd_count the last time + __pre_abs_get was called). We must understand that we cannot + compare the current DMA count with the current production + count because even if, right now, the production count is + higher than the DMA count, it does not mean that the DMA count + was not greater a few cycles before; in such case, the DMA + channel would have retrieved the wrong data */ + if ((long)(count - buf->tmp_count) > 0) { + set_bit(A4L_BUF_ERROR_NR, &buf->flags); + return -EPIPE; + } + +out: + buf->tmp_count = buf->prd_count; + + return 0; +} + +static inline int __pre_get(struct a4l_buffer * buf, unsigned long count) +{ + return __pre_abs_get(buf, buf->tmp_count + count); +} + +static inline int __abs_put(struct a4l_buffer * buf, unsigned long count) +{ + unsigned long old = buf->prd_count; + + if ((long)(buf->prd_count - count) >= 0) + return -EINVAL; + + buf->prd_count = count; + + if ((old / buf->size) != (count / buf->size)) + set_bit(A4L_BUF_EOBUF_NR, &buf->flags); + + if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0) + set_bit(A4L_BUF_EOA_NR, &buf->flags); + + return 0; +} + +static inline int __put(struct a4l_buffer * buf, unsigned long count) +{ + return __abs_put(buf, buf->prd_count + count); +} + +static inline int __abs_get(struct a4l_buffer * buf, unsigned long count) +{ + unsigned long old = buf->cns_count; + + if ((long)(buf->cns_count - count) >= 0) + return -EINVAL; + + buf->cns_count = count; + + if ((old / buf->size) != count / buf->size) + set_bit(A4L_BUF_EOBUF_NR, &buf->flags); + + if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0) + set_bit(A4L_BUF_EOA_NR, &buf->flags); + + return 0; +} + +static inline int __get(struct a4l_buffer * buf, unsigned long count) +{ + return __abs_get(buf, buf->cns_count + count); +} + +static inline unsigned long __count_to_put(struct a4l_buffer * buf) +{ + unsigned long ret; + + if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0) + ret = buf->size + buf->cns_count - buf->prd_count; + else + ret = 0; + + return ret; +} + +static inline unsigned long __count_to_get(struct a4l_buffer * buf) +{ + unsigned long ret; + + /* If the acquisition is unlimited (end_count == 0), we must + not take into account end_count */ + if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0) + ret = buf->prd_count; + else + ret = buf->end_count; + + if ((long)(ret - buf->cns_count) > 0) + ret -= buf->cns_count; + else + ret = 0; + + return ret; +} + +static inline unsigned long __count_to_end(struct a4l_buffer * buf) +{ + unsigned long ret = buf->end_count - buf->cns_count; + + if (buf->end_count == 0) + return ULONG_MAX; + + return ((long)ret) < 0 ? 0 : ret; +} + +/* --- Buffer internal functions --- */ + +int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size); + +void a4l_free_buffer(struct a4l_buffer *buf_desc); + +void a4l_init_buffer(struct a4l_buffer * buf_desc); + +void a4l_cleanup_buffer(struct a4l_buffer * buf_desc); + +int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd); + +void a4l_cancel_buffer(struct a4l_device_context *cxt); + +int a4l_buf_prepare_absput(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_absput(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_prepare_put(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_put(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_put(struct a4l_subdevice *subd, + void *bufdata, unsigned long count); + +int a4l_buf_prepare_absget(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_absget(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_prepare_get(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_get(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_get(struct a4l_subdevice *subd, + void *bufdata, unsigned long count); + +int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts); + +unsigned long a4l_buf_count(struct a4l_subdevice *subd); + +/* --- Current Command management function --- */ + +static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd) +{ + return (subd->buf) ? subd->buf->cur_cmd : NULL; +} + +/* --- Munge related function --- */ + +int a4l_get_chan(struct a4l_subdevice *subd); + +/* --- IOCTL / FOPS functions --- */ + +int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg); +ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes); +ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes); +int a4l_select(struct a4l_device_context *cxt, + rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index); + +#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h new file mode 100644 index 0000000..2a16e30 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h @@ -0,0 +1,272 @@ +/** + * @file + * Analogy for Linux, channel, range related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H +#define _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H + +#include <rtdm/uapi/analogy.h> + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_channel_range Channels and ranges + * + * Channels + * + * According to the Analogy nomenclature, the channel is the elementary + * acquisition entity. One channel is supposed to acquire one data at + * a time. A channel can be: + * - an analog input or an analog ouput; + * - a digital input or a digital ouput; + * + * Channels are defined by their type and by some other + * characteristics like: + * - their resolutions for analog channels (which usually ranges from + 8 to 32 bits); + * - their references; + * + * Such parameters must be declared for each channel composing a + * subdevice. The structure a4l_channel (struct a4l_channel) is used to + * define one channel. + * + * Another structure named a4l_channels_desc (struct a4l_channels_desc) + * gathers all channels for a specific subdevice. This latter + * structure also stores : + * - the channels count; + * - the channels declaration mode (A4L_CHAN_GLOBAL_CHANDESC or + A4L_CHAN_PERCHAN_CHANDESC): if all the channels composing a + subdevice are identical, there is no need to declare the + parameters for each channel; the global declaration mode eases + the structure composition. + * + * Usually the channels descriptor looks like this: + * <tt> @verbatim +struct a4l_channels_desc example_chan = { + mode: A4L_CHAN_GLOBAL_CHANDESC, -> Global declaration + mode is set + length: 8, -> 8 channels + chans: { + {A4L_CHAN_AREF_GROUND, 16}, -> Each channel is 16 bits + wide with the ground as + reference + }, +}; +@endverbatim </tt> + * + * Ranges + * + * So as to perform conversion from logical values acquired by the + * device to physical units, some range structure(s) must be declared + * on the driver side. + * + * Such structures contain: + * - the physical unit type (Volt, Ampere, none); + * - the minimal and maximal values; + * + * These range structures must be associated with the channels at + * subdevice registration time as a channel can work with many + * ranges. At configuration time (thanks to an Analogy command), one + * range will be selected for each enabled channel. + * + * Consequently, for each channel, the developer must declare all the + * possible ranges in a structure called struct a4l_rngtab. Here is an + * example: + * <tt> @verbatim +struct a4l_rngtab example_tab = { + length: 2, + rngs: { + RANGE_V(-5,5), + RANGE_V(-10,10), + }, +}; +@endverbatim </tt> + * + * For each subdevice, a specific structure is designed to gather all + * the ranges tabs of all the channels. In this structure, called + * struct a4l_rngdesc, three fields must be filled: + * - the declaration mode (A4L_RNG_GLOBAL_RNGDESC or + * A4L_RNG_PERCHAN_RNGDESC); + * - the number of ranges tab; + * - the tab of ranges tabs pointers; + * + * Most of the time, the channels which belong to the same subdevice + * use the same set of ranges. So, there is no need to declare the + * same ranges for each channel. A macro is defined to prevent + * redundant declarations: RNG_GLOBAL(). + * + * Here is an example: + * <tt> @verbatim +struct a4l_rngdesc example_rng = RNG_GLOBAL(example_tab); +@endverbatim </tt> + * + * @{ + */ + + +/* --- Channel section --- */ + +/*! + * @anchor A4L_CHAN_AREF_xxx @name Channel reference + * @brief Flags to define the channel's reference + * @{ + */ + +/** + * Ground reference + */ +#define A4L_CHAN_AREF_GROUND 0x1 +/** + * Common reference + */ +#define A4L_CHAN_AREF_COMMON 0x2 +/** + * Differential reference + */ +#define A4L_CHAN_AREF_DIFF 0x4 +/** + * Misc reference + */ +#define A4L_CHAN_AREF_OTHER 0x8 + + /*! @} A4L_CHAN_AREF_xxx */ + +/** + * Internal use flag (must not be used by driver developer) + */ +#define A4L_CHAN_GLOBAL 0x10 + +/*! + * @brief Structure describing some channel's characteristics + */ + +struct a4l_channel { + unsigned long flags; /*!< Channel flags to define the reference. */ + unsigned long nb_bits; /*!< Channel resolution. */ +}; + +/*! + * @anchor A4L_CHAN_xxx @name Channels declaration mode + * @brief Constant to define whether the channels in a descriptor are + * identical + * @{ + */ + +/** + * Global declaration, the set contains channels with similar + * characteristics + */ +#define A4L_CHAN_GLOBAL_CHANDESC 0 +/** + * Per channel declaration, the decriptor gathers differents channels + */ +#define A4L_CHAN_PERCHAN_CHANDESC 1 + + /*! @} A4L_CHAN_xxx */ + +/*! + * @brief Structure describing a channels set + */ + +struct a4l_channels_desc { + unsigned long mode; /*!< Declaration mode (global or per channel) */ + unsigned long length; /*!< Channels count */ + struct a4l_channel chans[]; /*!< Channels tab */ +}; + +/** + * Internal use flag (must not be used by driver developer) + */ +#define A4L_RNG_GLOBAL 0x8 + +/*! + * @brief Structure describing a (unique) range + */ + +struct a4l_range { + long min; /*!< Minimal value */ + long max; /*!< Maximal falue */ + unsigned long flags; /*!< Range flags (unit, etc.) */ +}; + +/** + * Macro to declare a (unique) range with no unit defined + */ +#define RANGE(x,y) {(x * A4L_RNG_FACTOR), (y * A4L_RNG_FACTOR), \ + A4L_RNG_NO_UNIT} +/** + * Macro to declare a (unique) range in Volt + */ +#define RANGE_V(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \ + A4L_RNG_VOLT_UNIT} +/** + * Macro to declare a (unique) range in milliAmpere + */ +#define RANGE_mA(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \ + A4L_RNG_MAMP_UNIT} +/** + * Macro to declare a (unique) range in some external reference + */ +#define RANGE_ext(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \ + A4L_RNG_EXT_UNIT} + + +/* Ranges tab descriptor */ +struct a4l_rngtab { + unsigned char length; + struct a4l_range rngs[]; +}; + +/** + * Constant to define a ranges descriptor as global (inter-channel) + */ +#define A4L_RNG_GLOBAL_RNGDESC 0 +/** + * Constant to define a ranges descriptor as specific for a channel + */ +#define A4L_RNG_PERCHAN_RNGDESC 1 + +/* Global ranges descriptor */ +struct a4l_rngdesc { + unsigned char mode; + unsigned char length; + struct a4l_rngtab *rngtabs[]; +}; + +/** + * Macro to declare a ranges global descriptor in one line + */ +#define RNG_GLOBAL(x) { \ + .mode = A4L_RNG_GLOBAL_RNGDESC, \ + .length = 1, \ + .rngtabs = {&(x)}, \ +} + +extern struct a4l_rngdesc a4l_range_bipolar10; +extern struct a4l_rngdesc a4l_range_bipolar5; +extern struct a4l_rngdesc a4l_range_unipolar10; +extern struct a4l_rngdesc a4l_range_unipolar5; +extern struct a4l_rngdesc a4l_range_unknown; +extern struct a4l_rngdesc a4l_range_fake; + +#define range_digital a4l_range_unipolar5 + +/*! @} channelrange */ + +#endif /* !_COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h new file mode 100644 index 0000000..89f7cca --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_COMMAND_H +#define _COBALT_RTDM_ANALOGY_COMMAND_H + +#include <rtdm/uapi/analogy.h> +#include <rtdm/analogy/context.h> + +#define CR_CHAN(a) CHAN(a) +#define CR_RNG(a) (((a)>>16)&0xff) +#define CR_AREF(a) (((a)>>24)&0xf) + +/* --- Command related function --- */ +void a4l_free_cmddesc(struct a4l_cmd_desc * desc); + +/* --- Upper layer functions --- */ +int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc); +int a4l_ioctl_cmd(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_COMMAND_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h new file mode 100644 index 0000000..f619f9c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h @@ -0,0 +1,48 @@ +/* + * Analogy for Linux, context structure / macros declarations + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_CONTEXT_H +#define _COBALT_RTDM_ANALOGY_CONTEXT_H + +#include <rtdm/driver.h> + +struct a4l_device; +struct a4l_buffer; + +struct a4l_device_context { + /* The adequate device pointer + (retrieved thanks to minor at open time) */ + struct a4l_device *dev; + + /* The buffer structure contains everything to transfer data + from asynchronous acquisition operations on a specific + subdevice */ + struct a4l_buffer *buffer; +}; + +static inline int a4l_get_minor(struct a4l_device_context *cxt) +{ + /* Get a pointer on the container structure */ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + /* Get the minor index */ + return rtdm_fd_minor(fd); +} + +#endif /* !_COBALT_RTDM_ANALOGY_CONTEXT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h new file mode 100644 index 0000000..93ecf66 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h @@ -0,0 +1,67 @@ +/* + * Analogy for Linux, device related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_DEVICE_H +#define _COBALT_RTDM_ANALOGY_DEVICE_H + +#include <rtdm/analogy/rtdm_helpers.h> +#include <rtdm/analogy/transfer.h> +#include <rtdm/analogy/driver.h> + +#define A4L_NB_DEVICES 10 + +#define A4L_DEV_ATTACHED_NR 0 + +struct a4l_device { + + /* Spinlock for global device use */ + rtdm_lock_t lock; + + /* Device specific flags */ + unsigned long flags; + + /* Driver assigned to this device thanks to attaching + procedure */ + struct a4l_driver *driver; + + /* Hidden description stuff */ + struct list_head subdvsq; + + /* Context-dependent stuff */ + struct a4l_transfer transfer; + + /* Private data useful for drivers functioning */ + void *priv; +}; + +/* --- Devices tab related functions --- */ +void a4l_init_devs(void); +int a4l_check_cleanup_devs(void); +int a4l_rdproc_devs(struct seq_file *p, void *data); + +/* --- Context related function / macro --- */ +void a4l_set_dev(struct a4l_device_context *cxt); +#define a4l_get_dev(x) ((x)->dev) + +/* --- Upper layer functions --- */ +int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_DEVICE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h new file mode 100644 index 0000000..08a7546 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h @@ -0,0 +1,74 @@ +/** + * @file + * Analogy for Linux, driver facilities + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_DRIVER_H +#define _COBALT_RTDM_ANALOGY_DRIVER_H + +#include <linux/list.h> +#include <rtdm/analogy/rtdm_helpers.h> +#include <rtdm/analogy/context.h> +#include <rtdm/analogy/buffer.h> + +struct seq_file; +struct a4l_link_desc; +struct a4l_device; + +/** Structure containing driver declaration data. + * + * @see rt_task_inquire() + */ +/* Analogy driver descriptor */ +struct a4l_driver { + + /* List stuff */ + struct list_head list; + /**< List stuff */ + + /* Visible description stuff */ + struct module *owner; + /**< Pointer to module containing the code */ + unsigned int flags; + /**< Type / status driver's flags */ + char *board_name; + /**< Board name */ + char *driver_name; + /**< driver name */ + int privdata_size; + /**< Size of the driver's private data */ + + /* Init/destroy procedures */ + int (*attach) (struct a4l_device *, struct a4l_link_desc *); + /**< Attach procedure */ + int (*detach) (struct a4l_device *); + /**< Detach procedure */ + +}; + +/* Driver list related functions */ + +int a4l_register_drv(struct a4l_driver * drv); +int a4l_unregister_drv(struct a4l_driver * drv); +int a4l_lct_drv(char *pin, struct a4l_driver ** pio); +#ifdef CONFIG_PROC_FS +int a4l_rdproc_drvs(struct seq_file *p, void *data); +#endif /* CONFIG_PROC_FS */ + +#endif /* !_COBALT_RTDM_ANALOGY_DRIVER_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h new file mode 100644 index 0000000..2e8245a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h @@ -0,0 +1,45 @@ +/* + * Analogy for Linux, instruction related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_INSTRUCTION_H +#define _COBALT_RTDM_ANALOGY_INSTRUCTION_H + +struct a4l_kernel_instruction { + unsigned int type; + unsigned int idx_subd; + unsigned int chan_desc; + unsigned int data_size; + void *data; + void *__udata; +}; + +struct a4l_kernel_instruction_list { + unsigned int count; + struct a4l_kernel_instruction *insns; + a4l_insn_t *__uinsns; +}; + +/* Instruction related functions */ + +/* Upper layer functions */ +int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h new file mode 100644 index 0000000..1de219f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h @@ -0,0 +1,143 @@ +/* + * Analogy for Linux, Operation system facilities + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H +#define _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H + +#include <linux/fs.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include <rtdm/driver.h> + +/* --- Trace section --- */ +#define A4L_PROMPT "Analogy: " + +#define RTDM_SUBCLASS_ANALOGY 0 + +#define __a4l_err(fmt, args...) rtdm_printk(KERN_ERR A4L_PROMPT fmt, ##args) +#define __a4l_warn(fmt, args...) rtdm_printk(KERN_WARNING A4L_PROMPT fmt, ##args) + +#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE +#define __a4l_info(fmt, args...) trace_printk(fmt, ##args) +#else +#define __a4l_info(fmt, args...) \ + rtdm_printk(KERN_INFO A4L_PROMPT "%s: " fmt, __FUNCTION__, ##args) +#endif + +#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG +#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE +#define __a4l_dbg(level, debug, fmt, args...) \ + do { \ + if ((debug) >= (level)) \ + trace_printk(fmt, ##args); \ + } while (0) +#else +#define __a4l_dbg(level, debug, fmt, args...) \ + do { \ + if ((debug) >= (level)) \ + rtdm_printk(KERN_DEBUG A4L_PROMPT "%s: " fmt, __FUNCTION__ , ##args); \ + } while (0) +#endif + +#define core_dbg CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_LEVEL +#define drv_dbg CONFIG_XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL + +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */ + +#define __a4l_dbg(level, debug, fmt, args...) + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */ + +#define __a4l_dev_name(dev) \ + (dev->driver == NULL) ? "unattached dev" : dev->driver->board_name + +#define a4l_err(dev, fmt, args...) \ + __a4l_err("%s: " fmt, __a4l_dev_name(dev), ##args) + +#define a4l_warn(dev, fmt, args...) \ + __a4l_warn("%s: " fmt, __a4l_dev_name(dev), ##args) + +#define a4l_info(dev, fmt, args...) \ + __a4l_info("%s: " fmt, __a4l_dev_name(dev), ##args) + +#define a4l_dbg(level, debug, dev, fmt, args...) \ + __a4l_dbg(level, debug, "%s: " fmt, __a4l_dev_name(dev), ##args) + + +/* --- Time section --- */ +static inline void a4l_udelay(unsigned int us) +{ + rtdm_task_busy_sleep(((nanosecs_rel_t) us) * 1000); +} + +/* Function which gives absolute time */ +nanosecs_abs_t a4l_get_time(void); + +/* Function for setting up the absolute time recovery */ +void a4l_init_time(void); + +/* --- IRQ section --- */ +#define A4L_IRQ_DISABLED 0 + +typedef int (*a4l_irq_hdlr_t) (unsigned int irq, void *d); + +struct a4l_irq_descriptor { + /* These fields are useful to launch the IRQ trampoline; + that is the reason why a structure has been defined */ + a4l_irq_hdlr_t handler; + unsigned int irq; + void *cookie; + rtdm_irq_t rtdm_desc; +}; + +int __a4l_request_irq(struct a4l_irq_descriptor * dsc, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie); +int __a4l_free_irq(struct a4l_irq_descriptor * dsc); + +/* --- Synchronization section --- */ +#define __NRT_WAITER 1 +#define __RT_WAITER 2 +#define __EVT_PDING 3 + +struct a4l_sync { + unsigned long status; + rtdm_event_t rtdm_evt; + rtdm_nrtsig_t nrt_sig; + wait_queue_head_t wq; +}; + +#define a4l_select_sync(snc, slr, type, fd) \ + rtdm_event_select(&((snc)->rtdm_evt), slr, type, fd) + +int a4l_init_sync(struct a4l_sync * snc); +void a4l_cleanup_sync(struct a4l_sync * snc); +void a4l_flush_sync(struct a4l_sync * snc); +int a4l_wait_sync(struct a4l_sync * snc, int rt); +int a4l_timedwait_sync(struct a4l_sync * snc, + int rt, unsigned long long ns_timeout); +void a4l_signal_sync(struct a4l_sync * snc); + +#endif /* !_COBALT_RTDM_ANALOGY_RTDM_HELPERS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h new file mode 100644 index 0000000..21c09df --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h @@ -0,0 +1,118 @@ +/** + * @file + * Analogy for Linux, subdevice related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_SUBDEVICE_H +#define _COBALT_RTDM_ANALOGY_SUBDEVICE_H + +#include <linux/list.h> +#include <rtdm/analogy/instruction.h> +#include <rtdm/analogy/command.h> +#include <rtdm/analogy/channel_range.h> + +/* --- Subdevice descriptor structure --- */ + +struct a4l_device; +struct a4l_buffer; + +/*! + * @brief Structure describing the subdevice + * @see a4l_add_subd() + */ + +struct a4l_subdevice { + + struct list_head list; + /**< List stuff */ + + struct a4l_device *dev; + /**< Containing device */ + + unsigned int idx; + /**< Subdevice index */ + + struct a4l_buffer *buf; + /**< Linked buffer */ + + /* Subdevice's status (busy, linked?) */ + unsigned long status; + /**< Subdevice's status */ + + /* Descriptors stuff */ + unsigned long flags; + /**< Type flags */ + struct a4l_channels_desc *chan_desc; + /**< Tab of channels descriptors pointers */ + struct a4l_rngdesc *rng_desc; + /**< Tab of ranges descriptors pointers */ + struct a4l_cmd_desc *cmd_mask; + /**< Command capabilities mask */ + + /* Functions stuff */ + int (*insn_read) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the instruction "read" */ + int (*insn_write) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the instruction "write" */ + int (*insn_bits) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the instruction "bits" */ + int (*insn_config) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the configuration instruction */ + int (*do_cmd) (struct a4l_subdevice *, struct a4l_cmd_desc *); + /**< Callback for command handling */ + int (*do_cmdtest) (struct a4l_subdevice *, struct a4l_cmd_desc *); + /**< Callback for command checking */ + void (*cancel) (struct a4l_subdevice *); + /**< Callback for asynchronous transfer cancellation */ + void (*munge) (struct a4l_subdevice *, void *, unsigned long); + /**< Callback for munge operation */ + int (*trigger) (struct a4l_subdevice *, lsampl_t); + /**< Callback for trigger operation */ + + char priv[0]; + /**< Private data */ +}; + +/* --- Subdevice related functions and macros --- */ + +struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice * sb, int idx); +struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice * sb, int chidx, int rngidx); +int a4l_check_chanlist(struct a4l_subdevice * subd, + unsigned char nb_chan, unsigned int *chans); + +#define a4l_subd_is_input(x) ((A4L_SUBD_MASK_READ & (x)->flags) != 0) +/* The following macro considers that a DIO subdevice is firstly an + output subdevice */ +#define a4l_subd_is_output(x) \ + ((A4L_SUBD_MASK_WRITE & (x)->flags) != 0 || \ + (A4L_SUBD_DIO & (x)->flags) != 0) + +/* --- Upper layer functions --- */ + +struct a4l_subdevice * a4l_get_subd(struct a4l_device *dev, int idx); +struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv, + void (*setup)(struct a4l_subdevice *)); +int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice * subd); +int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_SUBDEVICE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h new file mode 100644 index 0000000..c62c22a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h @@ -0,0 +1,78 @@ +/* + * Analogy for Linux, transfer related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_TRANSFER_H +#define _COBALT_RTDM_ANALOGY_TRANSFER_H + +#include <rtdm/analogy/buffer.h> + +/* IRQ types */ +#define A4L_IRQ_DISABLED 0 + +/* Fields init values */ +#define A4L_IRQ_UNUSED (unsigned int)((unsigned short)(~0)) +#define A4L_IDX_UNUSED (unsigned int)(~0) + +/* TODO: IRQ handling must leave transfer for os_facilities */ + +struct a4l_device; +/* Analogy transfer descriptor */ +struct a4l_transfer { + + /* Subdevices desc */ + unsigned int nb_subd; + struct a4l_subdevice **subds; + + /* Buffer stuff: the default size */ + unsigned int default_bufsize; + + /* IRQ in use */ + /* TODO: irq_desc should vanish */ + struct a4l_irq_descriptor irq_desc; +}; + +/* --- Proc function --- */ + +int a4l_rdproc_transfer(struct seq_file *p, void *data); + +/* --- Upper layer functions --- */ + +void a4l_presetup_transfer(struct a4l_device_context * cxt); +int a4l_setup_transfer(struct a4l_device_context * cxt); +int a4l_precleanup_transfer(struct a4l_device_context * cxt); +int a4l_cleanup_transfer(struct a4l_device_context * cxt); +int a4l_reserve_transfer(struct a4l_device_context * cxt, int idx_subd); +int a4l_init_transfer(struct a4l_device_context * cxt, struct a4l_cmd_desc * cmd); +int a4l_cancel_transfer(struct a4l_device_context * cxt, int idx_subd); +int a4l_cancel_transfers(struct a4l_device_context * cxt); + +ssize_t a4l_put(struct a4l_device_context * cxt, void *buf, size_t nbytes); +ssize_t a4l_get(struct a4l_device_context * cxt, void *buf, size_t nbytes); + +int a4l_request_irq(struct a4l_device *dev, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie); +int a4l_free_irq(struct a4l_device *dev, unsigned int irq); +unsigned int a4l_get_irq(struct a4l_device *dev); + +int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_TRANSFER_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h new file mode 100644 index 0000000..885a237 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_AUTOTUNE_H +#define _COBALT_RTDM_AUTOTUNE_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/autotune.h> + +#endif /* !_COBALT_RTDM_AUTOTUNE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h new file mode 100644 index 0000000..73268e3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_CAN_H +#define _COBALT_RTDM_CAN_H + +#include <linux/net.h> +#include <linux/socket.h> +#include <linux/if.h> +#include <rtdm/rtdm.h> +#include <rtdm/uapi/can.h> + +#endif /* _COBALT_RTDM_CAN_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h new file mode 100644 index 0000000..d60cfc5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h @@ -0,0 +1,33 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_COBALT_H +#define _COBALT_RTDM_COBALT_H + +#include <xenomai/posix/process.h> +#include <xenomai/posix/extension.h> +#include <xenomai/posix/thread.h> +#include <xenomai/posix/signal.h> +#include <xenomai/posix/timer.h> +#include <xenomai/posix/clock.h> +#include <xenomai/posix/event.h> +#include <xenomai/posix/monitor.h> +#include <xenomai/posix/corectl.h> + +#endif /* !_COBALT_RTDM_COBALT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h new file mode 100644 index 0000000..2c81a33 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_COMPAT_H +#define _COBALT_RTDM_COMPAT_H + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +#include <cobalt/kernel/compat.h> +#include <rtdm/rtdm.h> + +struct compat_rtdm_getsockopt_args { + int level; + int optname; + compat_uptr_t optval; + compat_uptr_t optlen; +}; + +struct compat_rtdm_setsockopt_args { + int level; + int optname; + const compat_uptr_t optval; + socklen_t optlen; +}; + +struct compat_rtdm_getsockaddr_args { + compat_uptr_t addr; + compat_uptr_t addrlen; +}; + +struct compat_rtdm_setsockaddr_args { + const compat_uptr_t addr; + socklen_t addrlen; +}; + +#define _RTIOC_GETSOCKOPT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x20, \ + struct compat_rtdm_getsockopt_args) +#define _RTIOC_SETSOCKOPT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x21, \ + struct compat_rtdm_setsockopt_args) +#define _RTIOC_BIND_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x22, \ + struct compat_rtdm_setsockaddr_args) +#define _RTIOC_CONNECT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x23, \ + struct compat_rtdm_setsockaddr_args) +#define _RTIOC_ACCEPT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x25, \ + struct compat_rtdm_getsockaddr_args) +#define _RTIOC_GETSOCKNAME_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x26, \ + struct compat_rtdm_getsockaddr_args) +#define _RTIOC_GETPEERNAME_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x27, \ + struct compat_rtdm_getsockaddr_args) + +#define __COMPAT_CASE(__op) : case __op + +#else /* !CONFIG_XENO_ARCH_SYS3264 */ + +#define __COMPAT_CASE(__op) + +#endif /* !CONFIG_XENO_ARCH_SYS3264 */ + +#define COMPAT_CASE(__op) case __op __COMPAT_CASE(__op ## _COMPAT) + +#endif /* !_COBALT_RTDM_COMPAT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h new file mode 100644 index 0000000..2a68c3e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h @@ -0,0 +1,1361 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, driver API header + * + * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * @ingroup driverapi + */ +#ifndef _COBALT_RTDM_DRIVER_H +#define _COBALT_RTDM_DRIVER_H + +#include <asm/atomic.h> +#include <linux/cpumask.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/cdev.h> +#include <linux/wait.h> +#include <linux/notifier.h> +#include <pipeline/lock.h> +#include <pipeline/inband_work.h> +#include <xenomai/version.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/select.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/init.h> +#include <cobalt/kernel/ancillaries.h> +#include <cobalt/kernel/tree.h> +#include <rtdm/fd.h> +#include <rtdm/rtdm.h> + +/* debug support */ +#include <cobalt/kernel/assert.h> +#include <trace/events/cobalt-rtdm.h> +#ifdef CONFIG_PCI +#include <asm-generic/xenomai/pci_ids.h> +#endif /* CONFIG_PCI */ +#include <asm/xenomai/syscall.h> + +struct class; +typedef struct xnselector rtdm_selector_t; +enum rtdm_selecttype; + +/*! + * @addtogroup rtdm_device_register + * @{ + */ + +/*! + * @anchor dev_flags @name Device Flags + * Static flags describing a RTDM device + * @{ + */ +/** If set, only a single instance of the device can be requested by an + * application. */ +#define RTDM_EXCLUSIVE 0x0001 + +/** + * Use fixed minor provided in the rtdm_device description for + * registering. If this flag is absent, the RTDM core assigns minor + * numbers to devices managed by a driver in order of registration. + */ +#define RTDM_FIXED_MINOR 0x0002 + +/** If set, the device is addressed via a clear-text name. */ +#define RTDM_NAMED_DEVICE 0x0010 + +/** If set, the device is addressed via a combination of protocol ID and + * socket type. */ +#define RTDM_PROTOCOL_DEVICE 0x0020 + +/** Mask selecting the device type. */ +#define RTDM_DEVICE_TYPE_MASK 0x00F0 + +/** Flag indicating a secure variant of RTDM (not supported here) */ +#define RTDM_SECURE_DEVICE 0x80000000 +/** @} Device Flags */ + +/** Maximum number of named devices per driver. */ +#define RTDM_MAX_MINOR 4096 + +/** @} rtdm_device_register */ + +/*! + * @addtogroup rtdm_sync + * @{ + */ + +/*! + * @anchor RTDM_SELECTTYPE_xxx @name RTDM_SELECTTYPE_xxx + * Event types select can bind to + * @{ + */ +enum rtdm_selecttype { + /** Select input data availability events */ + RTDM_SELECTTYPE_READ = XNSELECT_READ, + + /** Select ouput buffer availability events */ + RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE, + + /** Select exceptional events */ + RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT +}; +/** @} RTDM_SELECTTYPE_xxx */ + +/** @} rtdm_sync */ + +/** + * @brief Device context + * + * A device context structure is associated with every open device instance. + * RTDM takes care of its creation and destruction and passes it to the + * operation handlers when being invoked. + * + * Drivers can attach arbitrary data immediately after the official + * structure. The size of this data is provided via + * rtdm_driver.context_size during device registration. + */ +struct rtdm_dev_context { + struct rtdm_fd fd; + + /** Set of active device operation handlers */ + /** Reference to owning device */ + struct rtdm_device *device; + + /** Begin of driver defined context data structure */ + char dev_private[0]; +}; + +static inline struct rtdm_dev_context *rtdm_fd_to_context(struct rtdm_fd *fd) +{ + return container_of(fd, struct rtdm_dev_context, fd); +} + +/** + * Locate the driver private area associated to a device context structure + * + * @param[in] fd File descriptor structure associated with opened + * device instance + * + * @return The address of the private driver area associated to @a + * file descriptor. + */ +static inline void *rtdm_fd_to_private(struct rtdm_fd *fd) +{ + return &rtdm_fd_to_context(fd)->dev_private[0]; +} + +/** + * Locate a device file descriptor structure from its driver private area + * + * @param[in] dev_private Address of a private context area + * + * @return The address of the file descriptor structure defining @a + * dev_private. + */ +static inline struct rtdm_fd *rtdm_private_to_fd(void *dev_private) +{ + struct rtdm_dev_context *ctx; + ctx = container_of(dev_private, struct rtdm_dev_context, dev_private); + return &ctx->fd; +} + +/** + * Tell whether the passed file descriptor belongs to an application. + * + * @param[in] fd File descriptor + * + * @return true if passed file descriptor belongs to an application, + * false otherwise. + */ +static inline bool rtdm_fd_is_user(struct rtdm_fd *fd) +{ + return rtdm_fd_owner(fd) != &cobalt_kernel_ppd; +} + +/** + * Locate a device structure from a file descriptor. + * + * @param[in] fd File descriptor + * + * @return The address of the device structure to which this file + * descriptor is attached. + */ +static inline struct rtdm_device *rtdm_fd_device(struct rtdm_fd *fd) +{ + return rtdm_fd_to_context(fd)->device; +} + +/** + * @brief RTDM profile information + * + * This descriptor details the profile information associated to a + * RTDM class of device managed by a driver. + * + * @anchor rtdm_profile_info + */ +struct rtdm_profile_info { + /** Device class name */ + const char *name; + /** Device class ID, see @ref RTDM_CLASS_xxx */ + int class_id; + /** Device sub-class, see RTDM_SUBCLASS_xxx definition in the + @ref rtdm_profiles "Device Profiles" */ + int subclass_id; + /** Supported device profile version */ + int version; + /** Reserved */ + unsigned int magic; + struct module *owner; + struct class *kdev_class; +}; + +struct rtdm_driver; + +/** + * @brief RTDM state management handlers + */ +struct rtdm_sm_ops { + /** Handler called upon transition to COBALT_STATE_WARMUP */ + int (*start)(struct rtdm_driver *drv); + /** Handler called upon transition to COBALT_STATE_TEARDOWN */ + int (*stop)(struct rtdm_driver *drv); +}; + +/** + * @brief RTDM driver + * + * This descriptor describes a RTDM device driver. The structure holds + * runtime data, therefore it must reside in writable memory. + */ +struct rtdm_driver { + /** + * Class profile information. The RTDM_PROFILE_INFO() macro @b + * must be used for filling up this field. + * @anchor rtdm_driver_profile + */ + struct rtdm_profile_info profile_info; + /** + * Device flags, see @ref dev_flags "Device Flags" for details + * @anchor rtdm_driver_flags + */ + int device_flags; + /** + * Size of the private memory area the core should + * automatically allocate for each open file descriptor, which + * is usable for storing the context data associated to each + * connection. The allocated memory is zero-initialized. The + * start of this area can be retrieved by a call to + * rtdm_fd_to_private(). + */ + size_t context_size; + /** Protocol device identification: protocol family (PF_xxx) */ + int protocol_family; + /** Protocol device identification: socket type (SOCK_xxx) */ + int socket_type; + /** I/O operation handlers */ + struct rtdm_fd_ops ops; + /** State management handlers */ + struct rtdm_sm_ops smops; + /** + * Count of devices this driver manages. This value is used to + * allocate a chrdev region for named devices. + */ + int device_count; + /** Base minor for named devices. */ + int base_minor; + /** Reserved area */ + struct { + union { + struct { + struct cdev cdev; + int major; + } named; + }; + atomic_t refcount; + struct notifier_block nb_statechange; + DECLARE_BITMAP(minor_map, RTDM_MAX_MINOR); + }; +}; + +#define RTDM_CLASS_MAGIC 0x8284636c + +/** + * @brief Initializer for class profile information. + * + * This macro must be used to fill in the @ref rtdm_profile_info + * "class profile information" field from a RTDM driver. + * + * @param __name Class name (unquoted). + * + * @param __id Class major identification number + * (profile_version.class_id). + * + * @param __subid Class minor identification number + * (profile_version.subclass_id). + * + * @param __version Profile version number. + * + * @note See @ref rtdm_profiles "Device Profiles". + */ +#define RTDM_PROFILE_INFO(__name, __id, __subid, __version) \ +{ \ + .name = ( # __name ), \ + .class_id = (__id), \ + .subclass_id = (__subid), \ + .version = (__version), \ + .magic = ~RTDM_CLASS_MAGIC, \ + .owner = THIS_MODULE, \ + .kdev_class = NULL, \ +} + +int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls); + +/** + * @brief RTDM device + * + * This descriptor describes a RTDM device instance. The structure + * holds runtime data, therefore it must reside in writable memory. + */ +struct rtdm_device { + /** Device driver. */ + struct rtdm_driver *driver; + /** Driver definable device data */ + void *device_data; + /** + * Device label template for composing the device name. A + * limited printf-like format string is assumed, with a + * provision for replacing the first %d/%i placeholder found + * in the string by the device minor number. It is up to the + * driver to actually mention this placeholder or not, + * depending on the naming convention for its devices. For + * named devices, the corresponding device node will + * automatically appear in the /dev/rtdm hierachy with + * hotplug-enabled device filesystems (DEVTMPFS). + */ + const char *label; + /** + * Minor number of the device. If RTDM_FIXED_MINOR is present + * in the driver flags, the value stored in this field is used + * verbatim by rtdm_dev_register(). Otherwise, the RTDM core + * automatically assigns minor numbers to all devices managed + * by the driver referred to by @a driver, in order of + * registration, storing the resulting values into this field. + * + * Device nodes created for named devices in the Linux /dev + * hierarchy are assigned this minor number. + * + * The minor number of the current device handling an I/O + * request can be retreived by a call to rtdm_fd_minor(). + */ + int minor; + /** Reserved area. */ + struct { + unsigned int magic; + char *name; + union { + struct { + xnhandle_t handle; + } named; + struct { + struct xnid id; + } proto; + }; + dev_t rdev; + struct device *kdev; + struct class *kdev_class; + atomic_t refcount; + struct rtdm_fd_ops ops; + wait_queue_head_t putwq; + struct list_head openfd_list; + }; +}; + +/* --- device registration --- */ + +int rtdm_dev_register(struct rtdm_device *device); + +void rtdm_dev_unregister(struct rtdm_device *device); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ + +static inline struct device *rtdm_dev_to_kdev(struct rtdm_device *device) +{ + return device->kdev; +} + +/* --- clock services --- */ +static inline nanosecs_abs_t rtdm_clock_read(void) +{ + return xnclock_read_realtime(&nkclock); +} + +static inline nanosecs_abs_t rtdm_clock_read_monotonic(void) +{ + return xnclock_read_monotonic(&nkclock); +} +#endif /* !DOXYGEN_CPP */ + +/* --- timeout sequences */ + +typedef nanosecs_abs_t rtdm_toseq_t; + +void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout); + +/*! + * @addtogroup rtdm_sync + * @{ + */ + +/*! + * @defgroup rtdm_sync_biglock Big dual kernel lock + * @{ + */ + +/** + * @brief Enter atomic section (dual kernel only) + * + * This call opens a fully atomic section, serializing execution with + * respect to all interrupt handlers (including for real-time IRQs) + * and Xenomai threads running on all CPUs. + * + * @param __context name of local variable to store the context + * in. This variable updated by the real-time core will hold the + * information required to leave the atomic section properly. + * + * @note Atomic sections may be nested. The caller is allowed to sleep + * on a blocking Xenomai service from primary mode within an atomic + * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls. + * On the contrary, sleeping on a regular Linux kernel service while + * holding such lock is NOT valid. + * + * @note Since the strongest lock is acquired by this service, it can + * be used to synchronize real-time and non-real-time contexts. + * + * @warning This service is not portable to the Mercury core, and + * should be restricted to Cobalt-specific use cases, mainly for the + * purpose of porting existing dual-kernel drivers which still depend + * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct. + */ +#define cobalt_atomic_enter(__context) \ + do { \ + xnlock_get_irqsave(&nklock, (__context)); \ + xnsched_lock(); \ + } while (0) + +/** + * @brief Leave atomic section (dual kernel only) + * + * This call closes an atomic section previously opened by a call to + * cobalt_atomic_enter(), restoring the preemption and interrupt state + * which prevailed prior to entering the exited section. + * + * @param __context name of local variable which stored the context. + * + * @warning This service is not portable to the Mercury core, and + * should be restricted to Cobalt-specific use cases. + */ +#define cobalt_atomic_leave(__context) \ + do { \ + xnsched_unlock(); \ + xnlock_put_irqrestore(&nklock, (__context)); \ + } while (0) + +/** + * @brief Execute code block atomically (DEPRECATED) + * + * Generally, it is illegal to suspend the current task by calling + * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In + * contrast, this macro allows to combine several operations including + * a potentially rescheduling call to an atomic code block with respect to + * other RTDM_EXECUTE_ATOMICALLY() blocks. The macro is a light-weight + * alternative for protecting code blocks via mutexes, and it can even be used + * to synchronise real-time and non-real-time contexts. + * + * @param code_block Commands to be executed atomically + * + * @note It is not allowed to leave the code block explicitly by using + * @c break, @c return, @c goto, etc. This would leave the global lock held + * during the code block execution in an inconsistent state. Moreover, do not + * embed complex operations into the code bock. Consider that they will be + * executed under preemption lock with interrupts switched-off. Also note that + * invocation of rescheduling calls may break the atomicity until the task + * gains the CPU again. + * + * @coretags{unrestricted} + * + * @deprecated This construct will be phased out in Xenomai + * 3.0. Please use rtdm_waitqueue services instead. + * + * @see cobalt_atomic_enter(). + */ +#ifdef DOXYGEN_CPP /* Beautify doxygen output */ +#define RTDM_EXECUTE_ATOMICALLY(code_block) \ +{ \ + <ENTER_ATOMIC_SECTION> \ + code_block; \ + <LEAVE_ATOMIC_SECTION> \ +} +#else /* This is how it really works */ +static inline __attribute__((deprecated)) void +rtdm_execute_atomically(void) { } + +#define RTDM_EXECUTE_ATOMICALLY(code_block) \ +{ \ + spl_t __rtdm_s; \ + \ + rtdm_execute_atomically(); \ + xnlock_get_irqsave(&nklock, __rtdm_s); \ + xnsched_lock(); \ + code_block; \ + xnsched_unlock(); \ + xnlock_put_irqrestore(&nklock, __rtdm_s); \ +} +#endif + +/** @} Big dual kernel lock */ + +/** + * @defgroup rtdm_sync_spinlock Spinlock with preemption deactivation + * @{ + */ + +/** + * Static lock initialisation + */ +#define RTDM_LOCK_UNLOCKED(__name) PIPELINE_SPIN_LOCK_UNLOCKED(__name) + +#define DEFINE_RTDM_LOCK(__name) \ + rtdm_lock_t __name = RTDM_LOCK_UNLOCKED(__name) + +/** Lock variable */ +typedef pipeline_spinlock_t rtdm_lock_t; + +/** Variable to save the context while holding a lock */ +typedef unsigned long rtdm_lockctx_t; + +/** + * Dynamic lock initialisation + * + * @param lock Address of lock variable + * + * @coretags{task-unrestricted} + */ +static inline void rtdm_lock_init(rtdm_lock_t *lock) +{ + raw_spin_lock_init(lock); +} + +/** + * Acquire lock from non-preemptible contexts + * + * @param lock Address of lock variable + * + * @coretags{unrestricted} + */ +static inline void rtdm_lock_get(rtdm_lock_t *lock) +{ + XENO_BUG_ON(COBALT, !spltest()); + raw_spin_lock(lock); + xnsched_lock(); +} + +/** + * Release lock without preemption restoration + * + * @param lock Address of lock variable + * + * @coretags{unrestricted, might-switch} + */ +static inline void rtdm_lock_put(rtdm_lock_t *lock) +{ + raw_spin_unlock(lock); + xnsched_unlock(); +} + +/** + * Acquire lock and disable preemption, by stalling the head domain. + * + * @param __lock Address of lock variable + * @param __context name of local variable to store the context in + * + * @coretags{unrestricted} + */ +#define rtdm_lock_get_irqsave(__lock, __context) \ + ((__context) = __rtdm_lock_get_irqsave(__lock)) + +static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock) +{ + rtdm_lockctx_t context; + + splhigh(context); + raw_spin_lock(lock); + xnsched_lock(); + + return context; +} + +/** + * Release lock and restore preemption state + * + * @param lock Address of lock variable + * @param context name of local variable which stored the context + * + * @coretags{unrestricted} + */ +static inline +void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context) +{ + raw_spin_unlock(lock); + xnsched_unlock(); + splexit(context); +} + +/** + * Disable preemption locally + * + * @param __context name of local variable to store the context in + * + * @coretags{unrestricted} + */ +#define rtdm_lock_irqsave(__context) \ + splhigh(__context) + +/** + * Restore preemption state + * + * @param __context name of local variable which stored the context + * + * @coretags{unrestricted} + */ +#define rtdm_lock_irqrestore(__context) \ + splexit(__context) + +/** @} Spinlock with Preemption Deactivation */ + +#ifndef DOXYGEN_CPP + +struct rtdm_waitqueue { + struct xnsynch wait; +}; +typedef struct rtdm_waitqueue rtdm_waitqueue_t; + +#define RTDM_WAITQUEUE_INITIALIZER(__name) { \ + .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \ + } + +#define DEFINE_RTDM_WAITQUEUE(__name) \ + struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name) + +#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name) \ + DEFINE_RTDM_WAITQUEUE(__name) + +static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq) +{ + *wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq); +} + +static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq) +{ + xnsynch_destroy(&wq->wait); +} + +static inline int __rtdm_dowait(struct rtdm_waitqueue *wq, + nanosecs_rel_t timeout, xntmode_t timeout_mode) +{ + int ret; + + ret = xnsynch_sleep_on(&wq->wait, timeout, timeout_mode); + if (ret & XNBREAK) + return -EINTR; + if (ret & XNTIMEO) + return -ETIMEDOUT; + if (ret & XNRMID) + return -EIDRM; + return 0; +} + +static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq) +{ + if (toseq && timeout > 0) + return __rtdm_dowait(wq, *toseq, XN_ABSOLUTE); + + return __rtdm_dowait(wq, timeout, XN_RELATIVE); +} + +#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \ + ({ \ + int __ret = 0; \ + while (__ret == 0 && !(__cond)) \ + __ret = __rtdm_timedwait(__wq, __timeout, __toseq); \ + __ret; \ + }) + +#define rtdm_wait_condition_locked(__wq, __cond) \ + ({ \ + int __ret = 0; \ + while (__ret == 0 && !(__cond)) \ + __ret = __rtdm_dowait(__wq, \ + XN_INFINITE, XN_RELATIVE); \ + __ret; \ + }) + +#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq) \ + ({ \ + spl_t __s; \ + int __ret; \ + xnlock_get_irqsave(&nklock, __s); \ + __ret = rtdm_timedwait_condition_locked(__wq, __cond, \ + __timeout, __toseq); \ + xnlock_put_irqrestore(&nklock, __s); \ + __ret; \ + }) + +#define rtdm_timedwait(__wq, __timeout, __toseq) \ + __rtdm_timedwait(__wq, __timeout, __toseq) + +#define rtdm_timedwait_locked(__wq, __timeout, __toseq) \ + rtdm_timedwait(__wq, __timeout, __toseq) + +#define rtdm_wait_condition(__wq, __cond) \ + ({ \ + spl_t __s; \ + int __ret; \ + xnlock_get_irqsave(&nklock, __s); \ + __ret = rtdm_wait_condition_locked(__wq, __cond); \ + xnlock_put_irqrestore(&nklock, __s); \ + __ret; \ + }) + +#define rtdm_wait(__wq) \ + __rtdm_dowait(__wq, XN_INFINITE, XN_RELATIVE) + +#define rtdm_wait_locked(__wq) rtdm_wait(__wq) + +#define rtdm_waitqueue_lock(__wq, __context) cobalt_atomic_enter(__context) + +#define rtdm_waitqueue_unlock(__wq, __context) cobalt_atomic_leave(__context) + +#define rtdm_waitqueue_signal(__wq) \ + ({ \ + struct xnthread *__waiter; \ + __waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait); \ + xnsched_run(); \ + __waiter != NULL; \ + }) + +#define __rtdm_waitqueue_flush(__wq, __reason) \ + ({ \ + int __ret; \ + __ret = xnsynch_flush(&(__wq)->wait, __reason); \ + xnsched_run(); \ + __ret == XNSYNCH_RESCHED; \ + }) + +#define rtdm_waitqueue_broadcast(__wq) \ + __rtdm_waitqueue_flush(__wq, 0) + +#define rtdm_waitqueue_flush(__wq) \ + __rtdm_waitqueue_flush(__wq, XNBREAK) + +#define rtdm_waitqueue_wakeup(__wq, __waiter) \ + do { \ + xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter); \ + xnsched_run(); \ + } while (0) + +#define rtdm_for_each_waiter(__pos, __wq) \ + xnsynch_for_each_sleeper(__pos, &(__wq)->wait) + +#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq) \ + xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait) + +#endif /* !DOXYGEN_CPP */ + +/** @} rtdm_sync */ + +/* --- Interrupt management services --- */ +/*! + * @addtogroup rtdm_irq + * @{ + */ + +typedef struct xnintr rtdm_irq_t; + +/*! + * @anchor RTDM_IRQTYPE_xxx @name RTDM_IRQTYPE_xxx + * Interrupt registrations flags + * @{ + */ +/** Enable IRQ-sharing with other real-time drivers */ +#define RTDM_IRQTYPE_SHARED XN_IRQTYPE_SHARED +/** Mark IRQ as edge-triggered, relevant for correct handling of shared + * edge-triggered IRQs */ +#define RTDM_IRQTYPE_EDGE XN_IRQTYPE_EDGE +/** @} RTDM_IRQTYPE_xxx */ + +/** + * Interrupt handler + * + * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 or a combination of @ref RTDM_IRQ_xxx flags + */ +typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle); + +/*! + * @anchor RTDM_IRQ_xxx @name RTDM_IRQ_xxx + * Return flags of interrupt handlers + * @{ + */ +/** Unhandled interrupt */ +#define RTDM_IRQ_NONE XN_IRQ_NONE +/** Denote handled interrupt */ +#define RTDM_IRQ_HANDLED XN_IRQ_HANDLED +/** Request interrupt disabling on exit */ +#define RTDM_IRQ_DISABLE XN_IRQ_DISABLE +/** @} RTDM_IRQ_xxx */ + +/** + * Retrieve IRQ handler argument + * + * @param irq_handle IRQ handle + * @param type Type of the pointer to return + * + * @return The argument pointer registered on rtdm_irq_request() is returned, + * type-casted to the specified @a type. + * + * @coretags{unrestricted} + */ +#define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie) +/** @} rtdm_irq */ + +int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg); + +int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg, + const cpumask_t *cpumask); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline int rtdm_irq_free(rtdm_irq_t *irq_handle) +{ + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + xnintr_destroy(irq_handle); + return 0; +} + +static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle) +{ + xnintr_enable(irq_handle); + return 0; +} + +static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle) +{ + xnintr_disable(irq_handle); + return 0; +} + +static inline int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, + const cpumask_t *cpumask) +{ + return xnintr_affinity(irq_handle, cpumask); +} +#endif /* !DOXYGEN_CPP */ + +/* --- non-real-time signalling services --- */ + +/*! + * @addtogroup rtdm_nrtsignal + * @{ + */ + +typedef struct rtdm_nrtsig rtdm_nrtsig_t; +/** + * Non-real-time signal handler + * + * @param[in] nrt_sig Signal handle pointer as passed to rtdm_nrtsig_init() + * @param[in] arg Argument as passed to rtdm_nrtsig_init() + * + * @note The signal handler will run in soft-IRQ context of the non-real-time + * subsystem. Note the implications of this context, e.g. no invocation of + * blocking operations. + */ +typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg); + +struct rtdm_nrtsig { + struct pipeline_inband_work inband_work; /* Must be first */ + rtdm_nrtsig_handler_t handler; + void *arg; +}; + +void rtdm_schedule_nrt_work(struct work_struct *lostage_work); +/** @} rtdm_nrtsignal */ + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work); + +static inline void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, + rtdm_nrtsig_handler_t handler, void *arg) +{ + nrt_sig->inband_work = (struct pipeline_inband_work) + PIPELINE_INBAND_WORK_INITIALIZER(*nrt_sig, + __rtdm_nrtsig_execute); + nrt_sig->handler = handler; + nrt_sig->arg = arg; +} + +static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig) +{ + nrt_sig->handler = NULL; + nrt_sig->arg = NULL; +} + +void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig); +#endif /* !DOXYGEN_CPP */ + +/* --- timer services --- */ + +/*! + * @addtogroup rtdm_timer + * @{ + */ + +typedef struct xntimer rtdm_timer_t; + +/** + * Timer handler + * + * @param[in] timer Timer handle as returned by rtdm_timer_init() + */ +typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer); + +/*! + * @anchor RTDM_TIMERMODE_xxx @name RTDM_TIMERMODE_xxx + * Timer operation modes + * @{ + */ +enum rtdm_timer_mode { + /** Monotonic timer with relative timeout */ + RTDM_TIMERMODE_RELATIVE = XN_RELATIVE, + + /** Monotonic timer with absolute timeout */ + RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE, + + /** Adjustable timer with absolute timeout */ + RTDM_TIMERMODE_REALTIME = XN_REALTIME +}; +/** @} RTDM_TIMERMODE_xxx */ + +/** @} rtdm_timer */ + +int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler, + const char *name); + +void rtdm_timer_destroy(rtdm_timer_t *timer); + +int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry, + nanosecs_rel_t interval, enum rtdm_timer_mode mode); + +void rtdm_timer_stop(rtdm_timer_t *timer); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer, + nanosecs_abs_t expiry, + nanosecs_rel_t interval, + enum rtdm_timer_mode mode) +{ + return xntimer_start(timer, expiry, interval, (xntmode_t)mode); +} + +static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer) +{ + xntimer_stop(timer); +} +#endif /* !DOXYGEN_CPP */ + +/* --- task services --- */ +/*! + * @addtogroup rtdm_task + * @{ + */ + +typedef struct xnthread rtdm_task_t; + +/** + * Real-time task procedure + * + * @param[in,out] arg argument as passed to rtdm_task_init() + */ +typedef void (*rtdm_task_proc_t)(void *arg); + +/** + * @anchor rtdmtaskprio @name Task Priority Range + * Maximum and minimum task priorities + * @{ */ +#define RTDM_TASK_LOWEST_PRIORITY 0 +#define RTDM_TASK_HIGHEST_PRIORITY 99 +/** @} Task Priority Range */ + +/** + * @anchor rtdmchangetaskprio @name Task Priority Modification + * Raise or lower task priorities by one level + * @{ */ +#define RTDM_TASK_RAISE_PRIORITY (+1) +#define RTDM_TASK_LOWER_PRIORITY (-1) +/** @} Task Priority Modification */ + +/** @} rtdm_task */ + +int rtdm_task_init(rtdm_task_t *task, const char *name, + rtdm_task_proc_t task_proc, void *arg, + int priority, nanosecs_rel_t period); +int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode); +void rtdm_task_busy_sleep(nanosecs_rel_t delay); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline void rtdm_task_destroy(rtdm_task_t *task) +{ + xnthread_cancel(task); + xnthread_join(task, true); +} + +static inline int rtdm_task_should_stop(void) +{ + return xnthread_test_info(xnthread_current(), XNCANCELD); +} + +void rtdm_task_join(rtdm_task_t *task); + +static inline void __deprecated rtdm_task_join_nrt(rtdm_task_t *task, + unsigned int poll_delay) +{ + rtdm_task_join(task); +} + +static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority) +{ + union xnsched_policy_param param = { .rt = { .prio = priority } }; + spl_t s; + + splhigh(s); + xnthread_set_schedparam(task, &xnsched_class_rt, ¶m); + xnsched_run(); + splexit(s); +} + +static inline int rtdm_task_set_period(rtdm_task_t *task, + nanosecs_abs_t start_date, + nanosecs_rel_t period) +{ + if (period < 0) + period = 0; + if (start_date == 0) + start_date = XN_INFINITE; + + return xnthread_set_periodic(task, start_date, XN_ABSOLUTE, period); +} + +static inline int rtdm_task_unblock(rtdm_task_t *task) +{ + spl_t s; + int res; + + splhigh(s); + res = xnthread_unblock(task); + xnsched_run(); + splexit(s); + + return res; +} + +static inline rtdm_task_t *rtdm_task_current(void) +{ + return xnthread_current(); +} + +static inline int rtdm_task_wait_period(unsigned long *overruns_r) +{ + if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p())) + return -EPERM; + return xnthread_wait_period(overruns_r); +} + +static inline int rtdm_task_sleep(nanosecs_rel_t delay) +{ + return __rtdm_task_sleep(delay, XN_RELATIVE); +} + +static inline int +rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode) +{ + /* For the sake of a consistent API usage... */ + if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME) + return -EINVAL; + return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode); +} + +/* rtdm_task_sleep_abs shall be used instead */ +static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time) +{ + return __rtdm_task_sleep(wakeup_time, XN_REALTIME); +} + +#define rtdm_task_busy_wait(__condition, __spin_ns, __sleep_ns) \ + ({ \ + __label__ done; \ + nanosecs_abs_t __end; \ + int __ret = 0; \ + for (;;) { \ + __end = rtdm_clock_read_monotonic() + __spin_ns; \ + for (;;) { \ + if (__condition) \ + goto done; \ + if (rtdm_clock_read_monotonic() >= __end) \ + break; \ + } \ + __ret = rtdm_task_sleep(__sleep_ns); \ + if (__ret) \ + break; \ + } \ + done: \ + __ret; \ + }) + +#define rtdm_wait_context xnthread_wait_context + +static inline +void rtdm_wait_complete(struct rtdm_wait_context *wc) +{ + xnthread_complete_wait(wc); +} + +static inline +int rtdm_wait_is_completed(struct rtdm_wait_context *wc) +{ + return xnthread_wait_complete_p(wc); +} + +static inline void rtdm_wait_prepare(struct rtdm_wait_context *wc) +{ + xnthread_prepare_wait(wc); +} + +static inline +struct rtdm_wait_context *rtdm_wait_get_context(rtdm_task_t *task) +{ + return xnthread_get_wait_context(task); +} + +#endif /* !DOXYGEN_CPP */ + +/* --- event services --- */ + +typedef struct rtdm_event { + struct xnsynch synch_base; + DECLARE_XNSELECT(select_block); +} rtdm_event_t; + +#define RTDM_EVENT_PENDING XNSYNCH_SPARE1 + +void rtdm_event_init(rtdm_event_t *event, unsigned long pending); +int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index); +int rtdm_event_wait(rtdm_event_t *event); +int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq); +void rtdm_event_signal(rtdm_event_t *event); + +void rtdm_event_clear(rtdm_event_t *event); + +void rtdm_event_pulse(rtdm_event_t *event); + +void rtdm_event_destroy(rtdm_event_t *event); + +/* --- semaphore services --- */ + +typedef struct rtdm_sem { + unsigned long value; + struct xnsynch synch_base; + DECLARE_XNSELECT(select_block); +} rtdm_sem_t; + +void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value); +int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index); +int rtdm_sem_down(rtdm_sem_t *sem); +int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq); +void rtdm_sem_up(rtdm_sem_t *sem); + +void rtdm_sem_destroy(rtdm_sem_t *sem); + +/* --- mutex services --- */ + +typedef struct rtdm_mutex { + struct xnsynch synch_base; + atomic_t fastlock; +} rtdm_mutex_t; + +void rtdm_mutex_init(rtdm_mutex_t *mutex); +int rtdm_mutex_lock(rtdm_mutex_t *mutex); +int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq); +void rtdm_mutex_unlock(rtdm_mutex_t *mutex); +void rtdm_mutex_destroy(rtdm_mutex_t *mutex); + +/* --- utility functions --- */ + +#define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__) + +#define rtdm_printk_ratelimited(fmt, ...) do { \ + if (xnclock_ratelimit()) \ + printk(fmt, ##__VA_ARGS__); \ +} while (0) + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline void *rtdm_malloc(size_t size) +{ + return xnmalloc(size); +} + +static inline void rtdm_free(void *ptr) +{ + xnfree(ptr); +} + +int rtdm_mmap_to_user(struct rtdm_fd *fd, + void *src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data); + +int rtdm_iomap_to_user(struct rtdm_fd *fd, + phys_addr_t src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data); + +int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va); + +int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va); + +int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa); + +int rtdm_munmap(void *ptr, size_t len); + +static inline int rtdm_read_user_ok(struct rtdm_fd *fd, + const void __user *ptr, size_t size) +{ + return access_rok(ptr, size); +} + +static inline int rtdm_rw_user_ok(struct rtdm_fd *fd, + const void __user *ptr, size_t size) +{ + return access_wok(ptr, size); +} + +static inline int rtdm_copy_from_user(struct rtdm_fd *fd, + void *dst, const void __user *src, + size_t size) +{ + return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0; +} + +static inline int rtdm_safe_copy_from_user(struct rtdm_fd *fd, + void *dst, const void __user *src, + size_t size) +{ + return cobalt_copy_from_user(dst, src, size); +} + +static inline int rtdm_copy_to_user(struct rtdm_fd *fd, + void __user *dst, const void *src, + size_t size) +{ + return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0; +} + +static inline int rtdm_safe_copy_to_user(struct rtdm_fd *fd, + void __user *dst, const void *src, + size_t size) +{ + return cobalt_copy_to_user(dst, src, size); +} + +static inline int rtdm_strncpy_from_user(struct rtdm_fd *fd, + char *dst, + const char __user *src, size_t count) +{ + return cobalt_strncpy_from_user(dst, src, count); +} + +static inline bool rtdm_available(void) +{ + return realtime_core_enabled(); +} + +static inline int rtdm_rt_capable(struct rtdm_fd *fd) +{ + if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p())) + return 0; + + if (!rtdm_fd_is_user(fd)) + return !xnsched_root_p(); + + return xnthread_current() != NULL; +} + +static inline int rtdm_in_rt_context(void) +{ + return is_primary_domain(); +} + +#define RTDM_IOV_FASTMAX 16 + +int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iov, + const struct user_msghdr *msg, + struct iovec *iov_fast); + +int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov, + const struct user_msghdr *msg, + struct iovec *iov_fast); + +static inline +void rtdm_drop_iovec(struct iovec *iov, struct iovec *iov_fast) +{ + if (iov != iov_fast) + xnfree(iov); +} + +ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen); + +#endif /* !DOXYGEN_CPP */ + +#endif /* _COBALT_RTDM_DRIVER_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h new file mode 100644 index 0000000..176c67e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h @@ -0,0 +1,431 @@ +/* + * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * Copyright (C) 2008,2013,2014 Gilles Chanteperdrix <gch@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_FD_H +#define _COBALT_KERNEL_FD_H + +#include <linux/types.h> +#include <linux/socket.h> +#include <linux/file.h> +#include <cobalt/kernel/tree.h> +#include <asm-generic/xenomai/syscall.h> + +struct vm_area_struct; +struct rtdm_fd; +struct _rtdm_mmap_request; +struct _rtdm_setsockaddr_args; +struct _rtdm_setsockopt_args; +struct xnselector; +struct cobalt_ppd; +struct rtdm_device; + +/** + * @file + * @anchor File operation handlers + * @addtogroup rtdm_device_register + * @{ + */ + +/** + * Open handler for named devices + * + * @param[in] fd File descriptor associated with opened device instance + * @param[in] oflags Open flags as passed by the user + * + * The file descriptor carries a device minor information which can be + * retrieved by a call to rtdm_fd_minor(fd). The minor number can be + * used for distinguishing devices managed by a driver. + * + * @return 0 on success. On failure, a negative error code is returned. + * + * @see @c open() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +int rtdm_open_handler(struct rtdm_fd *fd, int oflags); + +/** + * Socket creation handler for protocol devices + * + * @param[in] fd File descriptor associated with opened device instance + * @param[in] protocol Protocol number as passed by the user + * + * @return 0 on success. On failure, a negative error code is returned. + * + * @see @c socket() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +int rtdm_socket_handler(struct rtdm_fd *fd, int protocol); + +/** + * Close handler + * + * @param[in] fd File descriptor associated with opened + * device instance. + * + * @see @c close() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +void rtdm_close_handler(struct rtdm_fd *fd); + +/** + * IOCTL handler + * + * @param[in] fd File descriptor + * @param[in] request Request number as passed by the user + * @param[in,out] arg Request argument as passed by the user + * + * @return A positive value or 0 on success. On failure return either + * -ENOSYS, to request that the function be called again from the opposite + * realtime/non-realtime context, or another negative error code. + * + * @see @c ioctl() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +int rtdm_ioctl_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg); + +/** + * Read handler + * + * @param[in] fd File descriptor + * @param[out] buf Input buffer as passed by the user + * @param[in] size Number of bytes the user requests to read + * + * @return On success, the number of bytes read. On failure return either + * -ENOSYS, to request that this handler be called again from the opposite + * realtime/non-realtime context, or another negative error code. + * + * @see @c read() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_read_handler(struct rtdm_fd *fd, void __user *buf, size_t size); + +/** + * Write handler + * + * @param[in] fd File descriptor + * @param[in] buf Output buffer as passed by the user + * @param[in] size Number of bytes the user requests to write + * + * @return On success, the number of bytes written. On failure return + * either -ENOSYS, to request that this handler be called again from the + * opposite realtime/non-realtime context, or another negative error code. + * + * @see @c write() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_write_handler(struct rtdm_fd *fd, const void __user *buf, size_t size); + +/** + * Receive message handler + * + * @param[in] fd File descriptor + * @param[in,out] msg Message descriptor as passed by the user, automatically + * mirrored to safe kernel memory in case of user mode call + * @param[in] flags Message flags as passed by the user + * + * @return On success, the number of bytes received. On failure return + * either -ENOSYS, to request that this handler be called again from the + * opposite realtime/non-realtime context, or another negative error code. + * + * @see @c recvmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_recvmsg_handler(struct rtdm_fd *fd, struct user_msghdr *msg, int flags); + +/** + * Transmit message handler + * + * @param[in] fd File descriptor + * @param[in] msg Message descriptor as passed by the user, automatically + * mirrored to safe kernel memory in case of user mode call + * @param[in] flags Message flags as passed by the user + * + * @return On success, the number of bytes transmitted. On failure return + * either -ENOSYS, to request that this handler be called again from the + * opposite realtime/non-realtime context, or another negative error code. + * + * @see @c sendmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_sendmsg_handler(struct rtdm_fd *fd, const struct user_msghdr *msg, int flags); + +/** + * Select handler + * + * @param[in] fd File descriptor + * @param selector Pointer to the selector structure + * @param type Type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a + * XNSELECT_EXCEPT) + * @param index Index of the file descriptor + * + * @return 0 on success. On failure, a negative error code is + * returned. + * + * @see @c select() in POSIX.1-2001, + * http://pubs.opengroup.org/onlinepubs/007908799/xsh/select.html + */ +int rtdm_select_handler(struct rtdm_fd *fd, struct xnselector *selector, + unsigned int type, unsigned int index); + +/** + * Memory mapping handler + * + * @param[in] fd File descriptor + * @param[in] vma Virtual memory area descriptor + * + * @return 0 on success. On failure, a negative error code is + * returned. + * + * @see @c mmap() in POSIX.1-2001, + * http://pubs.opengroup.org/onlinepubs/7908799/xsh/mmap.html + * + * @note The address hint passed to the mmap() request is deliberately + * ignored by RTDM. + */ +int rtdm_mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma); + +/** + * Allocate mapping region in address space + * + * When present, this optional handler should return the start address + * of a free region in the process's address space, large enough to + * cover the ongoing mmap() operation. If unspecified, the default + * architecture-defined handler is invoked. + * + * Most drivers can omit this handler, except on MMU-less platforms + * (see second note). + * + * @param[in] fd File descriptor + * @param[in] len Length of the requested region + * @param[in] pgoff Page frame number to map to (see second note). + * @param[in] flags Requested mapping flags + * + * @return The start address of the mapping region on success. On + * failure, a negative error code should be returned, with -ENOSYS + * meaning that the driver does not want to provide such information, + * in which case the ongoing mmap() operation will fail. + * + * @note The address hint passed to the mmap() request is deliberately + * ignored by RTDM, and therefore not passed to this handler. + * + * @note On MMU-less platforms, this handler is required because RTDM + * issues mapping requests over a shareable character device + * internally. In such context, the RTDM core may pass a null @a pgoff + * argument to the handler, for probing for the logical start address + * of the memory region to map to. Otherwise, when @a pgoff is + * non-zero, pgoff << PAGE_SHIFT is usually returned. + */ +unsigned long +rtdm_get_unmapped_area_handler(struct rtdm_fd *fd, + unsigned long len, unsigned long pgoff, + unsigned long flags); +/** + * @anchor rtdm_fd_ops + * @brief RTDM file operation descriptor. + * + * This structure describes the operations available with a RTDM + * device, defining handlers for submitting I/O requests. Those + * handlers are implemented by RTDM device drivers. + */ +struct rtdm_fd_ops { + /** See rtdm_open_handler(). */ + int (*open)(struct rtdm_fd *fd, int oflags); + /** See rtdm_socket_handler(). */ + int (*socket)(struct rtdm_fd *fd, int protocol); + /** See rtdm_close_handler(). */ + void (*close)(struct rtdm_fd *fd); + /** See rtdm_ioctl_handler(). */ + int (*ioctl_rt)(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + /** See rtdm_ioctl_handler(). */ + int (*ioctl_nrt)(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + /** See rtdm_read_handler(). */ + ssize_t (*read_rt)(struct rtdm_fd *fd, + void __user *buf, size_t size); + /** See rtdm_read_handler(). */ + ssize_t (*read_nrt)(struct rtdm_fd *fd, + void __user *buf, size_t size); + /** See rtdm_write_handler(). */ + ssize_t (*write_rt)(struct rtdm_fd *fd, + const void __user *buf, size_t size); + /** See rtdm_write_handler(). */ + ssize_t (*write_nrt)(struct rtdm_fd *fd, + const void __user *buf, size_t size); + /** See rtdm_recvmsg_handler(). */ + ssize_t (*recvmsg_rt)(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags); + /** See rtdm_recvmsg_handler(). */ + ssize_t (*recvmsg_nrt)(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags); + /** See rtdm_sendmsg_handler(). */ + ssize_t (*sendmsg_rt)(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags); + /** See rtdm_sendmsg_handler(). */ + ssize_t (*sendmsg_nrt)(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags); + /** See rtdm_select_handler(). */ + int (*select)(struct rtdm_fd *fd, + struct xnselector *selector, + unsigned int type, unsigned int index); + /** See rtdm_mmap_handler(). */ + int (*mmap)(struct rtdm_fd *fd, + struct vm_area_struct *vma); + /** See rtdm_get_unmapped_area_handler(). */ + unsigned long (*get_unmapped_area)(struct rtdm_fd *fd, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +}; + +/** @} File operation handlers */ + +struct rtdm_fd { + unsigned int magic; + struct rtdm_fd_ops *ops; + struct cobalt_ppd *owner; + unsigned int refs; + int ufd; + int minor; + int oflags; +#ifdef CONFIG_XENO_ARCH_SYS3264 + int compat; +#endif + bool stale; + struct list_head cleanup; + struct list_head next; /* in dev->openfd_list */ +}; + +#define RTDM_FD_MAGIC 0x52544446 + +#define RTDM_FD_COMPAT __COBALT_COMPAT_BIT +#define RTDM_FD_COMPATX __COBALT_COMPATX_BIT + +int __rtdm_anon_getfd(const char *name, int flags); + +void __rtdm_anon_putfd(int ufd); + +static inline struct cobalt_ppd *rtdm_fd_owner(const struct rtdm_fd *fd) +{ + return fd->owner; +} + +static inline int rtdm_fd_ufd(const struct rtdm_fd *fd) +{ + return fd->ufd; +} + +static inline int rtdm_fd_minor(const struct rtdm_fd *fd) +{ + return fd->minor; +} + +static inline int rtdm_fd_flags(const struct rtdm_fd *fd) +{ + return fd->oflags; +} + +#ifdef CONFIG_XENO_ARCH_SYS3264 +static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd) +{ + return fd->compat; +} +#else +static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd) +{ + return 0; +} +#endif + +int rtdm_fd_enter(struct rtdm_fd *rtdm_fd, int ufd, + unsigned int magic, struct rtdm_fd_ops *ops); + +int rtdm_fd_register(struct rtdm_fd *fd, int ufd); + +struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic); + +int rtdm_fd_lock(struct rtdm_fd *fd); + +void rtdm_fd_put(struct rtdm_fd *fd); + +void rtdm_fd_unlock(struct rtdm_fd *fd); + +int rtdm_fd_fcntl(int ufd, int cmd, ...); + +int rtdm_fd_ioctl(int ufd, unsigned int request, ...); + +ssize_t rtdm_fd_read(int ufd, void __user *buf, size_t size); + +ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size); + +int rtdm_fd_close(int ufd, unsigned int magic); + +ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags); + +int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg), + int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts)); + +int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg)); + +ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, + int flags); + +int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg)); + +int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma, + void **u_addrp); + +int rtdm_fd_valid_p(int ufd); + +int rtdm_fd_select(int ufd, struct xnselector *selector, + unsigned int type); + +int rtdm_fd_get_setsockaddr_args(struct rtdm_fd *fd, + struct _rtdm_setsockaddr_args *dst, + const void *src); + +int rtdm_fd_get_setsockopt_args(struct rtdm_fd *fd, + struct _rtdm_setsockopt_args *dst, + const void *src); + +int rtdm_fd_get_iovec(struct rtdm_fd *fd, struct iovec *iov, + const struct user_msghdr *msg, bool rw); + +int rtdm_fd_put_iovec(struct rtdm_fd *fd, const struct iovec *iov, + const struct user_msghdr *msg); + +int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd, + struct rtdm_device *dev); + +void rtdm_device_flush_fds(struct rtdm_device *dev); + +void rtdm_fd_cleanup(struct cobalt_ppd *p); + +void rtdm_fd_init(void); + +#endif /* _COBALT_KERNEL_FD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h new file mode 100644 index 0000000..b621a71 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h @@ -0,0 +1,82 @@ +/** + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_GPIO_H +#define _COBALT_RTDM_GPIO_H + +#include <linux/list.h> +#include <rtdm/driver.h> +#include <rtdm/uapi/gpio.h> + +struct class; +struct device_node; +struct gpio_desc; + +struct rtdm_gpio_pin { + struct rtdm_device dev; + struct list_head next; + rtdm_irq_t irqh; + rtdm_event_t event; + char *name; + struct gpio_desc *desc; + nanosecs_abs_t timestamp; + bool monotonic_timestamp; +}; + +struct rtdm_gpio_chip { + struct gpio_chip *gc; + struct rtdm_driver driver; + struct class *devclass; + struct list_head next; + rtdm_lock_t lock; + struct rtdm_gpio_pin pins[0]; +}; + +int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc, + struct gpio_chip *gc, + int gpio_subclass); + +struct rtdm_gpio_chip * +rtdm_gpiochip_alloc(struct gpio_chip *gc, + int gpio_subclass); + +void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc); + +int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc, + const char *label, int gpio_subclass); + +int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc, + unsigned int offset); + +int rtdm_gpiochip_find(struct device_node *from, const char *label, int type); + +int rtdm_gpiochip_array_find(struct device_node *from, const char *label[], + int nentries, int type); + +#ifdef CONFIG_OF + +int rtdm_gpiochip_scan_of(struct device_node *from, + const char *compat, int type); + +int rtdm_gpiochip_scan_array_of(struct device_node *from, + const char *compat[], + int nentries, int type); +#endif + +void rtdm_gpiochip_remove_by_type(int type); + +#endif /* !_COBALT_RTDM_GPIO_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h new file mode 100644 index 0000000..e38d241 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_PWM_H +#define _COBALT_RTDM_PWM_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/gpiopwm.h> + +#endif /* !_COBALT_RTDM_PWM_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h new file mode 100644 index 0000000..5eefccd --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h @@ -0,0 +1,30 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_RTDM_IPC_H +#define _COBALT_RTDM_IPC_H + +#include <linux/net.h> +#include <linux/socket.h> +#include <linux/if.h> +#include <rtdm/rtdm.h> +#include <rtdm/uapi/ipc.h> + +#endif /* !_COBALT_RTDM_IPC_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h new file mode 100644 index 0000000..07198f8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h @@ -0,0 +1,45 @@ +/* + * RTnet - real-time networking subsystem + * Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _COBALT_RTDM_NET_H +#define _COBALT_RTDM_NET_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/net.h> +#include <rtdm/driver.h> + +struct rtnet_callback { + void (*func)(struct rtdm_fd *, void *); + void *arg; +}; + +#define RTNET_RTIOC_CALLBACK _IOW(RTIOC_TYPE_NETWORK, 0x12, \ + struct rtnet_callback) + +/* utility functions */ + +/* provided by rt_ipv4 */ +unsigned long rt_inet_aton(const char *ip); + +/* provided by rt_packet */ +int rt_eth_aton(unsigned char *addr_buf, const char *mac); + +#define RTNET_RTDM_VER 914 + +#endif /* _COBALT_RTDM_NET_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h new file mode 100644 index 0000000..b937df2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_RTDM_H +#define _COBALT_RTDM_RTDM_H + +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/ioctl.h> +#include <linux/sched.h> +#include <linux/socket.h> +#include <cobalt/kernel/ppd.h> +#include <rtdm/fd.h> + +typedef __u32 socklen_t; + +#include <rtdm/uapi/rtdm.h> + +int __rtdm_dev_open(const char *path, int oflag); + +int __rtdm_dev_socket(int protocol_family, + int socket_type, int protocol); + +static inline int rtdm_open(const char *path, int oflag, ...) +{ + return __rtdm_dev_open(path, oflag); +} + +static inline int rtdm_socket(int protocol_family, + int socket_type, int protocol) +{ + return __rtdm_dev_socket(protocol_family, socket_type, protocol); +} + +static inline int rtdm_close(int fd) +{ + return rtdm_fd_close(fd, RTDM_FD_MAGIC); +} + +#define rtdm_fcntl(__fd, __cmd, __args...) \ + rtdm_fd_fcntl(__fd, __cmd, ##__args) + +#define rtdm_ioctl(__fd, __request, __args...) \ + rtdm_fd_ioctl(__fd, __request, ##__args) + +static inline ssize_t rtdm_read(int fd, void *buf, size_t count) +{ + return rtdm_fd_read(fd, buf, count); +} + +static inline ssize_t rtdm_write(int fd, const void *buf, size_t count) +{ + return rtdm_fd_write(fd, buf, count); +} + +static inline ssize_t rtdm_recvmsg(int s, struct user_msghdr *msg, int flags) +{ + return rtdm_fd_recvmsg(s, msg, flags); +} + +static inline ssize_t rtdm_sendmsg(int s, const struct user_msghdr *msg, int flags) +{ + return rtdm_fd_sendmsg(s, msg, flags); +} + +static inline +ssize_t rtdm_recvfrom(int s, void *buf, size_t len, int flags, + struct sockaddr *from, + socklen_t *fromlen) +{ + struct user_msghdr msg; + struct iovec iov; + ssize_t ret; + + iov.iov_base = buf; + iov.iov_len = len; + msg.msg_name = from; + msg.msg_namelen = from ? *fromlen : 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + ret = rtdm_recvmsg(s, &msg, flags); + if (ret < 0) + return ret; + + if (from) + *fromlen = msg.msg_namelen; + + return ret; +} + +static inline ssize_t rtdm_recv(int s, void *buf, size_t len, int flags) +{ + return rtdm_recvfrom(s, buf, len, flags, NULL, NULL); +} + +static inline ssize_t rtdm_sendto(int s, const void *buf, size_t len, + int flags, const struct sockaddr *to, + socklen_t tolen) +{ + struct user_msghdr msg; + struct iovec iov; + + iov.iov_base = (void *)buf; + iov.iov_len = len; + msg.msg_name = (struct sockaddr *)to; + msg.msg_namelen = tolen; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + return rtdm_sendmsg(s, &msg, flags); +} + +static inline ssize_t rtdm_send(int s, const void *buf, size_t len, int flags) +{ + return rtdm_sendto(s, buf, len, flags, NULL, 0); +} + +static inline int rtdm_getsockopt(int s, int level, int optname, + void *optval, socklen_t *optlen) +{ + struct _rtdm_getsockopt_args args = { + level, optname, optval, optlen + }; + + return rtdm_ioctl(s, _RTIOC_GETSOCKOPT, &args); +} + +static inline int rtdm_setsockopt(int s, int level, int optname, + const void *optval, socklen_t optlen) +{ + struct _rtdm_setsockopt_args args = { + level, optname, (void *)optval, optlen + }; + + return rtdm_ioctl(s, _RTIOC_SETSOCKOPT, &args); +} + +static inline int rtdm_bind(int s, const struct sockaddr *my_addr, + socklen_t addrlen) +{ + struct _rtdm_setsockaddr_args args = { + my_addr, addrlen + }; + + return rtdm_ioctl(s, _RTIOC_BIND, &args); +} + +static inline int rtdm_connect(int s, const struct sockaddr *serv_addr, + socklen_t addrlen) +{ + struct _rtdm_setsockaddr_args args = { + serv_addr, addrlen + }; + + return rtdm_ioctl(s, _RTIOC_CONNECT, &args); +} + +static inline int rtdm_listen(int s, int backlog) +{ + return rtdm_ioctl(s, _RTIOC_LISTEN, backlog); +} + +static inline int rtdm_accept(int s, struct sockaddr *addr, + socklen_t *addrlen) +{ + struct _rtdm_getsockaddr_args args = { + addr, addrlen + }; + + return rtdm_ioctl(s, _RTIOC_ACCEPT, &args); +} + +static inline int rtdm_getsockname(int s, struct sockaddr *name, + socklen_t *namelen) +{ + struct _rtdm_getsockaddr_args args = { + name, namelen + }; + + return rtdm_ioctl(s, _RTIOC_GETSOCKNAME, &args); +} + +static inline int rtdm_getpeername(int s, struct sockaddr *name, + socklen_t *namelen) +{ + struct _rtdm_getsockaddr_args args = { + name, namelen + }; + + return rtdm_ioctl(s, _RTIOC_GETPEERNAME, &args); +} + +static inline int rtdm_shutdown(int s, int how) +{ + return rtdm_ioctl(s, _RTIOC_SHUTDOWN, how); +} + +#endif /* _COBALT_RTDM_RTDM_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h new file mode 100644 index 0000000..0b557b4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_SERIAL_H +#define _COBALT_RTDM_SERIAL_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/serial.h> + +#endif /* !_COBALT_RTDM_SERIAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h new file mode 100644 index 0000000..d2669e1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_TESTING_H +#define _COBALT_RTDM_TESTING_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/testing.h> + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +#include <rtdm/compat.h> + +struct compat_rttst_overall_bench_res { + struct rttst_bench_res result; + compat_uptr_t histogram_avg; + compat_uptr_t histogram_min; + compat_uptr_t histogram_max; +}; + +struct compat_rttst_heap_stathdr { + int nrstats; + compat_uptr_t buf; +}; + +#define RTTST_RTIOC_TMBENCH_STOP_COMPAT \ + _IOWR(RTIOC_TYPE_TESTING, 0x11, struct compat_rttst_overall_bench_res) + +#endif /* CONFIG_XENO_ARCH_SYS3264 */ + +#endif /* !_COBALT_RTDM_TESTING_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h new file mode 100644 index 0000000..bc2a68d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h @@ -0,0 +1,340 @@ +/** + * @file + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_UDD_H +#define _COBALT_RTDM_UDD_H + +#include <linux/list.h> +#include <rtdm/driver.h> +#include <rtdm/uapi/udd.h> + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_udd User-space driver core + * + * This profile includes all mini-drivers sitting on top of the + * User-space Device Driver framework (UDD). The generic UDD core + * driver enables interrupt control and I/O memory access interfaces + * to user-space device drivers, as defined by the mini-drivers when + * registering. + * + * A mini-driver supplements the UDD core with ancillary functions for + * dealing with @ref udd_memory_region "memory mappings" and @ref + * udd_irq_handler "interrupt control" for a particular I/O + * card/device. + * + * UDD-compliant mini-drivers only have to provide the basic support + * for dealing with the interrupt sources present in the device, so + * that most part of the device requests can be handled from a Xenomai + * application running in user-space. Typically, a mini-driver would + * handle the interrupt top-half, and the user-space application would + * handle the bottom-half. + * + * This profile is reminiscent of the UIO framework available with the + * Linux kernel, adapted to the dual kernel Cobalt environment. + * + * @{ + */ + +/** + * @anchor udd_irq_special + * Special IRQ values for udd_device.irq + * + * @{ + */ +/** + * No IRQ managed. Passing this code implicitly disables all + * interrupt-related services, including control (disable/enable) and + * notification. + */ +#define UDD_IRQ_NONE 0 +/** + * IRQ directly managed from the mini-driver on top of the UDD + * core. The mini-driver is in charge of attaching the handler(s) to + * the IRQ(s) it manages, notifying the Cobalt threads waiting for IRQ + * events by calling the udd_notify_event() service. + */ +#define UDD_IRQ_CUSTOM (-1) +/** @} */ + +/** + * @anchor udd_memory_types @name Memory types for mapping + * Types of memory for mapping + * + * The UDD core implements a default ->mmap() handler which first + * attempts to hand over the request to the corresponding handler + * defined by the mini-driver. If not present, the UDD core + * establishes the mapping automatically, depending on the memory + * type defined for the region. + * + * @{ + */ +/** + * No memory region. Use this type code to disable an entry in the + * array of memory mappings, i.e. udd_device.mem_regions[]. + */ +#define UDD_MEM_NONE 0 +/** + * Physical I/O memory region. By default, the UDD core maps such + * memory to a virtual user range by calling the rtdm_mmap_iomem() + * service. + */ +#define UDD_MEM_PHYS 1 +/** + * Kernel logical memory region (e.g. kmalloc()). By default, the UDD + * core maps such memory to a virtual user range by calling the + * rtdm_mmap_kmem() service. */ +#define UDD_MEM_LOGICAL 2 +/** + * Virtual memory region with no direct physical mapping + * (e.g. vmalloc()). By default, the UDD core maps such memory to a + * virtual user range by calling the rtdm_mmap_vmem() service. + */ +#define UDD_MEM_VIRTUAL 3 +/** @} */ + +#define UDD_NR_MAPS 5 + +/** + * @anchor udd_memory_region + * UDD memory region descriptor. + * + * This descriptor defines the characteristics of a memory region + * declared to the UDD core by the mini-driver. All valid regions + * should be declared in the udd_device.mem_regions[] array, + * invalid/unassigned ones should bear the UDD_MEM_NONE type. + * + * The UDD core exposes each region via the mmap(2) interface to the + * application. To this end, a companion mapper device is created + * automatically when registering the mini-driver. + * + * The mapper device creates special files in the RTDM namespace for + * reaching the individual regions, which the application can open + * then map to its address space via the mmap(2) system call. + * + * For instance, declaring a region of physical memory at index #2 of + * the memory region array could be done as follows: + * + * @code + * static struct udd_device udd; + * + * static int foocard_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) + * { + * udd.device_name = "foocard"; + * ... + * udd.mem_regions[2].name = "ADC"; + * udd.mem_regions[2].addr = pci_resource_start(dev, 1); + * udd.mem_regions[2].len = pci_resource_len(dev, 1); + * udd.mem_regions[2].type = UDD_MEM_PHYS; + * ... + * return udd_register_device(&udd); + * } + * @endcode + * + * This will make such region accessible via the mapper device using + * the following sequence of code (see note), via the default + * ->mmap() handler from the UDD core: + * + * @code + * int fd, fdm; + * void *p; + * + * fd = open("/dev/rtdm/foocard", O_RDWR); + * fdm = open("/dev/rtdm/foocard,mapper2", O_RDWR); + * p = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fdm, 0); + * @endcode + * + * if no valid region has been declared in the + * udd_device.mem_regions[] array, no mapper device is created. + * + * @note The example code assumes that @ref cobalt_api POSIX symbol + * wrapping is in effect, so that RTDM performs the memory mapping + * operation (not the regular kernel). + */ +struct udd_memregion { + /** Name of the region (informational but required) */ + const char *name; + /** + * Start address of the region. This may be a physical or + * virtual address, depending on the @ref udd_memory_types + * "memory type". + */ + unsigned long addr; + /** + * Length (in bytes) of the region. This value must be + * PAGE_SIZE aligned. + */ + size_t len; + /** + * Type of the region. See the discussion about @ref + * udd_memory_types "UDD memory types" for possible values. + */ + int type; +}; + +/** + * @anchor udd_device + * UDD device descriptor. + * + * This descriptor defines the characteristics of a UDD-based + * mini-driver when registering via a call to udd_register_device(). + */ +struct udd_device { + /** + * Name of the device managed by the mini-driver, appears + * automatically in the /dev/rtdm namespace upon creation. + */ + const char *device_name; + /** + * Additional device flags (e.g. RTDM_EXCLUSIVE) + * RTDM_NAMED_DEVICE may be omitted). + */ + int device_flags; + /** + * Subclass code of the device managed by the mini-driver (see + * RTDM_SUBCLASS_xxx definition in the @ref rtdm_profiles + * "Device Profiles"). The main class code is pre-set to + * RTDM_CLASS_UDD. + */ + int device_subclass; + struct { + /** + * Ancillary open() handler, optional. See + * rtdm_open_handler(). + * + * @note This handler is called from secondary mode + * only. + */ + int (*open)(struct rtdm_fd *fd, int oflags); + /** + * Ancillary close() handler, optional. See + * rtdm_close_handler(). + * + * @note This handler is called from secondary mode + * only. + */ + void (*close)(struct rtdm_fd *fd); + /** + * Ancillary ioctl() handler, optional. See + * rtdm_ioctl_handler(). + * + * If this routine returns -ENOSYS, the default action + * implemented by the UDD core for the corresponding + * request will be applied, as if no ioctl handler had + * been defined. + * + * @note This handler is called from primary mode + * only. + */ + int (*ioctl)(struct rtdm_fd *fd, + unsigned int request, void *arg); + /** + * Ancillary mmap() handler for the mapper device, + * optional. See rtdm_mmap_handler(). The mapper + * device operates on a valid region defined in the @a + * mem_regions[] array. A pointer to the region + * can be obtained by a call to udd_get_region(). + * + * If this handler is NULL, the UDD core establishes + * the mapping automatically, depending on the memory + * type defined for the region. + * + * @note This handler is called from secondary mode + * only. + */ + int (*mmap)(struct rtdm_fd *fd, + struct vm_area_struct *vma); + /** + * @anchor udd_irq_handler + * + * Ancillary handler for receiving interrupts. This + * handler must be provided if the mini-driver hands + * over IRQ handling to the UDD core, by setting the + * @a irq field to a valid value, different from + * UDD_IRQ_CUSTOM and UDD_IRQ_NONE. + * + * The ->interrupt() handler shall return one of the + * following status codes: + * + * - RTDM_IRQ_HANDLED, if the mini-driver successfully + * handled the IRQ. This flag can be combined with + * RTDM_IRQ_DISABLE to prevent the Cobalt kernel from + * re-enabling the interrupt line upon return, + * otherwise it is re-enabled automatically. + * + * - RTDM_IRQ_NONE, if the interrupt does not match + * any IRQ the mini-driver can handle. + * + * Once the ->interrupt() handler has returned, the + * UDD core notifies user-space Cobalt threads waiting + * for IRQ events (if any). + * + * @note This handler is called from primary mode + * only. + */ + int (*interrupt)(struct udd_device *udd); + } ops; + /** + * IRQ number. If valid, the UDD core manages the + * corresponding interrupt line, installing a base handler. + * Otherwise, a special value can be passed for declaring + * @ref udd_irq_special "unmanaged IRQs". + */ + int irq; + /** + * Array of memory regions defined by the device. The array + * can be sparse, with some entries bearing the UDD_MEM_NONE + * type interleaved with valid ones. See the discussion about + * @ref udd_memory_region "UDD memory regions". + */ + struct udd_memregion mem_regions[UDD_NR_MAPS]; + /** Reserved to the UDD core. */ + struct udd_reserved { + rtdm_irq_t irqh; + u32 event_count; + struct udd_signotify signfy; + struct rtdm_event pulse; + struct rtdm_driver driver; + struct rtdm_device device; + struct rtdm_driver mapper_driver; + struct udd_mapper { + struct udd_device *udd; + struct rtdm_device dev; + } mapdev[UDD_NR_MAPS]; + char *mapper_name; + int nr_maps; + } __reserved; +}; + +int udd_register_device(struct udd_device *udd); + +int udd_unregister_device(struct udd_device *udd); + +struct udd_device *udd_get_device(struct rtdm_fd *fd); + +void udd_notify_event(struct udd_device *udd); + +void udd_enable_irq(struct udd_device *udd, + rtdm_event_t *done); + +void udd_disable_irq(struct udd_device *udd, + rtdm_event_t *done); + +/** @} */ + +#endif /* !_COBALT_RTDM_UDD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h new file mode 100644 index 0000000..75efdec --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_IDLE_H +#define _COBALT_KERNEL_SCHED_IDLE_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-idle.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +/* Idle priority level - actually never used for indexing. */ +#define XNSCHED_IDLE_PRIO -1 + +extern struct xnsched_class xnsched_class_idle; + +static inline bool __xnsched_idle_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + xnthread_clear_state(thread, XNWEAK); + return xnsched_set_effective_priority(thread, p->idle.prio); +} + +static inline void __xnsched_idle_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->idle.prio = thread->cprio; +} + +static inline void __xnsched_idle_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) + /* Inheriting a priority-less class makes no sense. */ + XENO_WARN_ON_ONCE(COBALT, 1); + else + thread->cprio = XNSCHED_IDLE_PRIO; +} + +static inline void __xnsched_idle_protectprio(struct xnthread *thread, int prio) +{ + XENO_WARN_ON_ONCE(COBALT, 1); +} + +static inline int xnsched_idle_init_thread(struct xnthread *thread) +{ + return 0; +} + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_IDLE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h new file mode 100644 index 0000000..57a46a9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_QUOTA_H +#define _COBALT_KERNEL_SCHED_QUOTA_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-quota.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + +#define XNSCHED_QUOTA_MIN_PRIO 1 +#define XNSCHED_QUOTA_MAX_PRIO 255 +#define XNSCHED_QUOTA_NR_PRIO \ + (XNSCHED_QUOTA_MAX_PRIO - XNSCHED_QUOTA_MIN_PRIO + 1) + +extern struct xnsched_class xnsched_class_quota; + +struct xnsched_quota_group { + struct xnsched *sched; + xnticks_t quota_ns; + xnticks_t quota_peak_ns; + xnticks_t run_start_ns; + xnticks_t run_budget_ns; + xnticks_t run_credit_ns; + struct list_head members; + struct list_head expired; + struct list_head next; + int nr_active; + int nr_threads; + int tgid; + int quota_percent; + int quota_peak_percent; +}; + +struct xnsched_quota { + xnticks_t period_ns; + struct xntimer refill_timer; + struct xntimer limit_timer; + struct list_head groups; +}; + +static inline int xnsched_quota_init_thread(struct xnthread *thread) +{ + thread->quota = NULL; + INIT_LIST_HEAD(&thread->quota_expired); + + return 0; +} + +int xnsched_quota_create_group(struct xnsched_quota_group *tg, + struct xnsched *sched, + int *quota_sum_r); + +int xnsched_quota_destroy_group(struct xnsched_quota_group *tg, + int force, + int *quota_sum_r); + +void xnsched_quota_set_limit(struct xnsched_quota_group *tg, + int quota_percent, int quota_peak_percent, + int *quota_sum_r); + +struct xnsched_quota_group * +xnsched_quota_find_group(struct xnsched *sched, int tgid); + +int xnsched_quota_sum_all(struct xnsched *sched); + +#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h new file mode 100644 index 0000000..992a5ba --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_RT_H +#define _COBALT_KERNEL_SCHED_RT_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-rt.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +/* + * Global priority scale for Xenomai's core scheduling class, + * available to SCHED_COBALT members. + */ +#define XNSCHED_CORE_MIN_PRIO 0 +#define XNSCHED_CORE_MAX_PRIO 259 +#define XNSCHED_CORE_NR_PRIO \ + (XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1) + +/* + * Priority range for SCHED_FIFO, and all other classes Cobalt + * implements except SCHED_COBALT. + */ +#define XNSCHED_FIFO_MIN_PRIO 1 +#define XNSCHED_FIFO_MAX_PRIO 256 + +#if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR || \ + (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \ + XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS) +#error "XNSCHED_MLQ_LEVELS is too low" +#endif + +extern struct xnsched_class xnsched_class_rt; + +static inline void __xnsched_rt_requeue(struct xnthread *thread) +{ + xnsched_addq(&thread->sched->rt.runnable, thread); +} + +static inline void __xnsched_rt_enqueue(struct xnthread *thread) +{ + xnsched_addq_tail(&thread->sched->rt.runnable, thread); +} + +static inline void __xnsched_rt_dequeue(struct xnthread *thread) +{ + xnsched_delq(&thread->sched->rt.runnable, thread); +} + +static inline void __xnsched_rt_track_weakness(struct xnthread *thread) +{ + /* + * We have to track threads exiting weak scheduling, i.e. any + * thread leaving the WEAK class code if compiled in, or + * assigned a zero priority if weak threads are hosted by the + * RT class. + * + * CAUTION: since we need to check the effective priority + * level for determining the weakness state, this can only + * apply to non-boosted threads. + */ + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio) + xnthread_clear_state(thread, XNWEAK); + else + xnthread_set_state(thread, XNWEAK); +} + +static inline bool __xnsched_rt_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + bool ret = xnsched_set_effective_priority(thread, p->rt.prio); + + if (!xnthread_test_state(thread, XNBOOST)) + __xnsched_rt_track_weakness(thread); + + return ret; +} + +static inline void __xnsched_rt_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->rt.prio = thread->cprio; +} + +static inline void __xnsched_rt_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) + thread->cprio = p->rt.prio; /* Force update. */ + else { + thread->cprio = thread->bprio; + /* Leaving PI/PP, so non-boosted by definition. */ + __xnsched_rt_track_weakness(thread); + } +} + +static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio) +{ + /* + * The RT class supports the widest priority range from + * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive, + * no need to cap the input value which is guaranteed to be in + * the range [1..XNSCHED_CORE_MAX_PRIO]. + */ + thread->cprio = prio; +} + +static inline void __xnsched_rt_forget(struct xnthread *thread) +{ +} + +static inline int xnsched_rt_init_thread(struct xnthread *thread) +{ + return 0; +} + +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES +struct xnthread *xnsched_rt_pick(struct xnsched *sched); +#else +static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched) +{ + return xnsched_getq(&sched->rt.runnable); +} +#endif + +void xnsched_rt_tick(struct xnsched *sched); + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_RT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h new file mode 100644 index 0000000..50ca406 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_SPORADIC_H +#define _COBALT_KERNEL_SCHED_SPORADIC_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-sporadic.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + +#define XNSCHED_SPORADIC_MIN_PRIO 1 +#define XNSCHED_SPORADIC_MAX_PRIO 255 +#define XNSCHED_SPORADIC_NR_PRIO \ + (XNSCHED_SPORADIC_MAX_PRIO - XNSCHED_SPORADIC_MIN_PRIO + 1) + +extern struct xnsched_class xnsched_class_sporadic; + +struct xnsched_sporadic_repl { + xnticks_t date; + xnticks_t amount; +}; + +struct xnsched_sporadic_data { + xnticks_t resume_date; + xnticks_t budget; + int repl_in; + int repl_out; + int repl_pending; + struct xntimer repl_timer; + struct xntimer drop_timer; + struct xnsched_sporadic_repl repl_data[CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL]; + struct xnsched_sporadic_param param; + struct xnthread *thread; +}; + +struct xnsched_sporadic { +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + unsigned long drop_retries; +#endif +}; + +static inline int xnsched_sporadic_init_thread(struct xnthread *thread) +{ + thread->pss = NULL; + + return 0; +} + +#endif /* !CONFIG_XENO_OPT_SCHED_SPORADIC */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_SPORADIC_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h new file mode 100644 index 0000000..6ae5ff8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_TP_H +#define _COBALT_KERNEL_SCHED_TP_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-tp.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_TP + +#define XNSCHED_TP_MIN_PRIO 1 +#define XNSCHED_TP_MAX_PRIO 255 +#define XNSCHED_TP_NR_PRIO \ + (XNSCHED_TP_MAX_PRIO - XNSCHED_TP_MIN_PRIO + 1) + +extern struct xnsched_class xnsched_class_tp; + +struct xnsched_tp_window { + xnticks_t w_offset; + int w_part; +}; + +struct xnsched_tp_schedule { + int pwin_nr; + xnticks_t tf_duration; + atomic_t refcount; + struct xnsched_tp_window pwins[0]; +}; + +struct xnsched_tp { + struct xnsched_tpslot { + /** Per-partition runqueue. */ + xnsched_queue_t runnable; + } partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART]; + /** Idle slot for passive windows. */ + struct xnsched_tpslot idle; + /** Active partition slot */ + struct xnsched_tpslot *tps; + /** Time frame timer */ + struct xntimer tf_timer; + /** Global partition schedule */ + struct xnsched_tp_schedule *gps; + /** Window index of next partition */ + int wnext; + /** Start of next time frame */ + xnticks_t tf_start; + /** Assigned thread queue */ + struct list_head threads; +}; + +static inline int xnsched_tp_init_thread(struct xnthread *thread) +{ + thread->tps = NULL; + + return 0; +} + +struct xnsched_tp_schedule * +xnsched_tp_set_schedule(struct xnsched *sched, + struct xnsched_tp_schedule *gps); + +void xnsched_tp_start_schedule(struct xnsched *sched); + +void xnsched_tp_stop_schedule(struct xnsched *sched); + +int xnsched_tp_get_partition(struct xnsched *sched); + +struct xnsched_tp_schedule * +xnsched_tp_get_schedule(struct xnsched *sched); + +void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps); + +#endif /* CONFIG_XENO_OPT_SCHED_TP */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_TP_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h new file mode 100644 index 0000000..400aa73 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_WEAK_H +#define _COBALT_KERNEL_SCHED_WEAK_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-weak.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + +#define XNSCHED_WEAK_MIN_PRIO 0 +#define XNSCHED_WEAK_MAX_PRIO 99 +#define XNSCHED_WEAK_NR_PRIO \ + (XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO + 1) + +#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR || \ + (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \ + XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS) +#error "WEAK class has too many priority levels" +#endif + +extern struct xnsched_class xnsched_class_weak; + +struct xnsched_weak { + xnsched_queue_t runnable; /*!< Runnable thread queue. */ +}; + +static inline int xnsched_weak_init_thread(struct xnthread *thread) +{ + return 0; +} + +#endif /* CONFIG_XENO_OPT_SCHED_WEAK */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_WEAK_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h new file mode 100644 index 0000000..aa24d54 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h @@ -0,0 +1,674 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_H +#define _COBALT_KERNEL_SCHED_H + +#include <linux/percpu.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/schedqueue.h> +#include <cobalt/kernel/sched-tp.h> +#include <cobalt/kernel/sched-weak.h> +#include <cobalt/kernel/sched-sporadic.h> +#include <cobalt/kernel/sched-quota.h> +#include <cobalt/kernel/vfile.h> +#include <cobalt/kernel/assert.h> +#include <asm/xenomai/machine.h> +#include <pipeline/sched.h> + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +/* Sched status flags */ +#define XNRESCHED 0x10000000 /* Needs rescheduling */ +#define XNINSW 0x20000000 /* In context switch */ +#define XNINTCK 0x40000000 /* In master tick handler context */ + +/* Sched local flags */ +#define XNIDLE 0x00010000 /* Idle (no outstanding timer) */ +#define XNHTICK 0x00008000 /* Host tick pending */ +#define XNINIRQ 0x00004000 /* In IRQ handling context */ +#define XNHDEFER 0x00002000 /* Host tick deferred */ + +/* + * Hardware timer is stopped. + */ +#define XNTSTOP 0x00000800 + +struct xnsched_rt { + xnsched_queue_t runnable; /*!< Runnable thread queue. */ +}; + +/*! + * \brief Scheduling information structure. + */ + +struct xnsched { + /*!< Scheduler specific status bitmask. */ + unsigned long status; + /*!< Scheduler specific local flags bitmask. */ + unsigned long lflags; + /*!< Current thread. */ + struct xnthread *curr; +#ifdef CONFIG_SMP + /*!< Owner CPU id. */ + int cpu; + /*!< Mask of CPUs needing rescheduling. */ + cpumask_t resched; +#endif + /*!< Context of built-in real-time class. */ + struct xnsched_rt rt; +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + /*!< Context of weak scheduling class. */ + struct xnsched_weak weak; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + /*!< Context of TP class. */ + struct xnsched_tp tp; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + /*!< Context of sporadic scheduling class. */ + struct xnsched_sporadic pss; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + /*!< Context of runtime quota scheduling. */ + struct xnsched_quota quota; +#endif + /*!< Interrupt nesting level. */ + volatile unsigned inesting; + /*!< Host timer. */ + struct xntimer htimer; + /*!< Round-robin timer. */ + struct xntimer rrbtimer; + /*!< Root thread control block. */ + struct xnthread rootcb; +#ifdef CONFIG_XENO_ARCH_FPU + /*!< Thread owning the current FPU context. */ + struct xnthread *fpuholder; +#endif +#ifdef CONFIG_XENO_OPT_WATCHDOG + /*!< Watchdog timer object. */ + struct xntimer wdtimer; +#endif +#ifdef CONFIG_XENO_OPT_STATS + /*!< Last account switch date (ticks). */ + xnticks_t last_account_switch; + /*!< Currently active account */ + xnstat_exectime_t *current_account; +#endif +}; + +DECLARE_PER_CPU(struct xnsched, nksched); + +extern cpumask_t cobalt_cpu_affinity; + +extern struct list_head nkthreadq; + +extern int cobalt_nrthreads; + +#ifdef CONFIG_XENO_OPT_VFILE +extern struct xnvfile_rev_tag nkthreadlist_tag; +#endif + +union xnsched_policy_param; + +struct xnsched_class { + void (*sched_init)(struct xnsched *sched); + void (*sched_enqueue)(struct xnthread *thread); + void (*sched_dequeue)(struct xnthread *thread); + void (*sched_requeue)(struct xnthread *thread); + struct xnthread *(*sched_pick)(struct xnsched *sched); + void (*sched_tick)(struct xnsched *sched); + void (*sched_rotate)(struct xnsched *sched, + const union xnsched_policy_param *p); + void (*sched_migrate)(struct xnthread *thread, + struct xnsched *sched); + int (*sched_chkparam)(struct xnthread *thread, + const union xnsched_policy_param *p); + /** + * Set base scheduling parameters. This routine is indirectly + * called upon a change of base scheduling settings through + * __xnthread_set_schedparam() -> xnsched_set_policy(), + * exclusively. + * + * The scheduling class implementation should do the necessary + * housekeeping to comply with the new settings. + * thread->base_class is up to date before the call is made, + * and should be considered for the new weighted priority + * calculation. On the contrary, thread->sched_class should + * NOT be referred to by this handler. + * + * sched_setparam() is NEVER involved in PI or PP + * management. However it must deny a priority update if it + * contradicts an ongoing boost for @a thread. This is + * typically what the xnsched_set_effective_priority() helper + * does for such handler. + * + * @param thread Affected thread. + * @param p New base policy settings. + * + * @return True if the effective priority was updated + * (thread->cprio). + */ + bool (*sched_setparam)(struct xnthread *thread, + const union xnsched_policy_param *p); + void (*sched_getparam)(struct xnthread *thread, + union xnsched_policy_param *p); + void (*sched_trackprio)(struct xnthread *thread, + const union xnsched_policy_param *p); + void (*sched_protectprio)(struct xnthread *thread, int prio); + int (*sched_declare)(struct xnthread *thread, + const union xnsched_policy_param *p); + void (*sched_forget)(struct xnthread *thread); + void (*sched_kick)(struct xnthread *thread); +#ifdef CONFIG_XENO_OPT_VFILE + int (*sched_init_vfile)(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot); + void (*sched_cleanup_vfile)(struct xnsched_class *schedclass); +#endif + int nthreads; + struct xnsched_class *next; + int weight; + int policy; + const char *name; +}; + +#define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR) + +/* Placeholder for current thread priority */ +#define XNSCHED_RUNPRIO 0x80000000 + +#define xnsched_for_each_thread(__thread) \ + list_for_each_entry(__thread, &nkthreadq, glink) + +#ifdef CONFIG_SMP +static inline int xnsched_cpu(struct xnsched *sched) +{ + return sched->cpu; +} +#else /* !CONFIG_SMP */ +static inline int xnsched_cpu(struct xnsched *sched) +{ + return 0; +} +#endif /* CONFIG_SMP */ + +static inline struct xnsched *xnsched_struct(int cpu) +{ + return &per_cpu(nksched, cpu); +} + +static inline struct xnsched *xnsched_current(void) +{ + /* IRQs off */ + return raw_cpu_ptr(&nksched); +} + +static inline struct xnthread *xnsched_current_thread(void) +{ + return xnsched_current()->curr; +} + +/* Test resched flag of given sched. */ +static inline int xnsched_resched_p(struct xnsched *sched) +{ + return sched->status & XNRESCHED; +} + +/* Set self resched flag for the current scheduler. */ +static inline void xnsched_set_self_resched(struct xnsched *sched) +{ + sched->status |= XNRESCHED; +} + +/* Set resched flag for the given scheduler. */ +#ifdef CONFIG_SMP + +static inline void xnsched_set_resched(struct xnsched *sched) +{ + struct xnsched *current_sched = xnsched_current(); + + if (current_sched == sched) + current_sched->status |= XNRESCHED; + else if (!xnsched_resched_p(sched)) { + cpumask_set_cpu(xnsched_cpu(sched), ¤t_sched->resched); + sched->status |= XNRESCHED; + current_sched->status |= XNRESCHED; + } +} + +#define xnsched_realtime_cpus cobalt_pipeline.supported_cpus + +static inline int xnsched_supported_cpu(int cpu) +{ + return cpumask_test_cpu(cpu, &xnsched_realtime_cpus); +} + +static inline int xnsched_threading_cpu(int cpu) +{ + return cpumask_test_cpu(cpu, &cobalt_cpu_affinity); +} + +#else /* !CONFIG_SMP */ + +static inline void xnsched_set_resched(struct xnsched *sched) +{ + xnsched_set_self_resched(sched); +} + +#define xnsched_realtime_cpus CPU_MASK_ALL + +static inline int xnsched_supported_cpu(int cpu) +{ + return 1; +} + +static inline int xnsched_threading_cpu(int cpu) +{ + return 1; +} + +#endif /* !CONFIG_SMP */ + +#define for_each_realtime_cpu(cpu) \ + for_each_online_cpu(cpu) \ + if (xnsched_supported_cpu(cpu)) \ + +int ___xnsched_run(struct xnsched *sched); + +void __xnsched_run_handler(void); + +static inline int __xnsched_run(struct xnsched *sched) +{ + /* + * Reschedule if XNSCHED is pending, but never over an IRQ + * handler or in the middle of unlocked context switch. + */ + if (((sched->status|sched->lflags) & + (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED) + return 0; + + return pipeline_schedule(sched); +} + +static inline int xnsched_run(void) +{ + struct xnsched *sched = xnsched_current(); + /* + * sched->curr is shared locklessly with ___xnsched_run(). + * READ_ONCE() makes sure the compiler never uses load tearing + * for reading this pointer piecemeal, so that multiple stores + * occurring concurrently on remote CPUs never yield a + * spurious merged value on the local one. + */ + struct xnthread *curr = READ_ONCE(sched->curr); + + /* + * If running over the root thread, hard irqs must be off + * (asserted out of line in ___xnsched_run()). + */ + return curr->lock_count > 0 ? 0 : __xnsched_run(sched); +} + +void xnsched_lock(void); + +void xnsched_unlock(void); + +static inline int xnsched_interrupt_p(void) +{ + return xnsched_current()->lflags & XNINIRQ; +} + +static inline int xnsched_root_p(void) +{ + return xnthread_test_state(xnsched_current_thread(), XNROOT); +} + +static inline int xnsched_unblockable_p(void) +{ + return xnsched_interrupt_p() || xnsched_root_p(); +} + +static inline int xnsched_primary_p(void) +{ + return !xnsched_unblockable_p(); +} + +bool xnsched_set_effective_priority(struct xnthread *thread, + int prio); + +#include <cobalt/kernel/sched-idle.h> +#include <cobalt/kernel/sched-rt.h> + +int xnsched_init_proc(void); + +void xnsched_cleanup_proc(void); + +void xnsched_register_classes(void); + +void xnsched_init_all(void); + +void xnsched_destroy_all(void); + +struct xnthread *xnsched_pick_next(struct xnsched *sched); + +void xnsched_putback(struct xnthread *thread); + +int xnsched_set_policy(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *p); + +void xnsched_track_policy(struct xnthread *thread, + struct xnthread *target); + +void xnsched_protect_priority(struct xnthread *thread, + int prio); + +void xnsched_migrate(struct xnthread *thread, + struct xnsched *sched); + +void xnsched_migrate_passive(struct xnthread *thread, + struct xnsched *sched); + +/** + * @fn void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param) + * @brief Rotate a scheduler runqueue. + * + * The specified scheduling class is requested to rotate its runqueue + * for the given scheduler. Rotation is performed according to the + * scheduling parameter specified by @a sched_param. + * + * @note The nucleus supports round-robin scheduling for the members + * of the RT class. + * + * @param sched The per-CPU scheduler hosting the target scheduling + * class. + * + * @param sched_class The scheduling class which should rotate its + * runqueue. + * + * @param sched_param The scheduling parameter providing rotation + * information to the specified scheduling class. + * + * @coretags{unrestricted, atomic-entry} + */ +static inline void xnsched_rotate(struct xnsched *sched, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param) +{ + sched_class->sched_rotate(sched, sched_param); +} + +static inline int xnsched_init_thread(struct xnthread *thread) +{ + int ret = 0; + + xnsched_idle_init_thread(thread); + xnsched_rt_init_thread(thread); + +#ifdef CONFIG_XENO_OPT_SCHED_TP + ret = xnsched_tp_init_thread(thread); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_SCHED_TP */ +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + ret = xnsched_sporadic_init_thread(thread); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */ +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + ret = xnsched_quota_init_thread(thread); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */ + + return ret; +} + +static inline int xnsched_root_priority(struct xnsched *sched) +{ + return sched->rootcb.cprio; +} + +static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched) +{ + return sched->rootcb.sched_class; +} + +static inline void xnsched_tick(struct xnsched *sched) +{ + struct xnthread *curr = sched->curr; + struct xnsched_class *sched_class = curr->sched_class; + /* + * A thread that undergoes round-robin scheduling only + * consumes its time slice when it runs within its own + * scheduling class, which excludes temporary PI boosts, and + * does not hold the scheduler lock. + */ + if (sched_class == curr->base_class && + sched_class->sched_tick && + xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB && + curr->lock_count == 0) + sched_class->sched_tick(sched); +} + +static inline int xnsched_chkparam(struct xnsched_class *sched_class, + struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (sched_class->sched_chkparam) + return sched_class->sched_chkparam(thread, p); + + return 0; +} + +static inline int xnsched_declare(struct xnsched_class *sched_class, + struct xnthread *thread, + const union xnsched_policy_param *p) +{ + int ret; + + if (sched_class->sched_declare) { + ret = sched_class->sched_declare(thread, p); + if (ret) + return ret; + } + if (sched_class != thread->base_class) + sched_class->nthreads++; + + return 0; +} + +static inline int xnsched_calc_wprio(struct xnsched_class *sched_class, + int prio) +{ + return prio + sched_class->weight; +} + +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES + +static inline void xnsched_enqueue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + sched_class->sched_enqueue(thread); +} + +static inline void xnsched_dequeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + sched_class->sched_dequeue(thread); +} + +static inline void xnsched_requeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + sched_class->sched_requeue(thread); +} + +static inline +bool xnsched_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + return thread->base_class->sched_setparam(thread, p); +} + +static inline void xnsched_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + thread->sched_class->sched_getparam(thread, p); +} + +static inline void xnsched_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + thread->sched_class->sched_trackprio(thread, p); + thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio); +} + +static inline void xnsched_protectprio(struct xnthread *thread, int prio) +{ + thread->sched_class->sched_protectprio(thread, prio); + thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio); +} + +static inline void xnsched_forget(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->base_class; + + --sched_class->nthreads; + + if (sched_class->sched_forget) + sched_class->sched_forget(thread); +} + +static inline void xnsched_kick(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->base_class; + + xnthread_set_info(thread, XNKICKED); + + if (sched_class->sched_kick) + sched_class->sched_kick(thread); + + xnsched_set_resched(thread->sched); +} + +#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */ + +/* + * If only the RT and IDLE scheduling classes are compiled in, we can + * fully inline common helpers for dealing with those. + */ + +static inline void xnsched_enqueue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + __xnsched_rt_enqueue(thread); +} + +static inline void xnsched_dequeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + __xnsched_rt_dequeue(thread); +} + +static inline void xnsched_requeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + __xnsched_rt_requeue(thread); +} + +static inline bool xnsched_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_class *sched_class = thread->base_class; + + if (sched_class == &xnsched_class_idle) + return __xnsched_idle_setparam(thread, p); + + return __xnsched_rt_setparam(thread, p); +} + +static inline void xnsched_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class == &xnsched_class_idle) + __xnsched_idle_getparam(thread, p); + else + __xnsched_rt_getparam(thread, p); +} + +static inline void xnsched_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class == &xnsched_class_idle) + __xnsched_idle_trackprio(thread, p); + else + __xnsched_rt_trackprio(thread, p); + + thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio); +} + +static inline void xnsched_protectprio(struct xnthread *thread, int prio) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class == &xnsched_class_idle) + __xnsched_idle_protectprio(thread, prio); + else + __xnsched_rt_protectprio(thread, prio); + + thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio); +} + +static inline void xnsched_forget(struct xnthread *thread) +{ + --thread->base_class->nthreads; + __xnsched_rt_forget(thread); +} + +static inline void xnsched_kick(struct xnthread *thread) +{ + xnthread_set_info(thread, XNKICKED); + xnsched_set_resched(thread->sched); +} + +#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h new file mode 100644 index 0000000..9da95aa --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHEDPARAM_H +#define _COBALT_KERNEL_SCHEDPARAM_H + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +struct xnsched_idle_param { + int prio; +}; + +struct xnsched_weak_param { + int prio; +}; + +struct xnsched_rt_param { + int prio; +}; + +struct xnsched_tp_param { + int prio; + int ptid; /* partition id. */ +}; + +struct xnsched_sporadic_param { + xnticks_t init_budget; + xnticks_t repl_period; + int max_repl; + int low_prio; + int normal_prio; + int current_prio; +}; + +struct xnsched_quota_param { + int prio; + int tgid; /* thread group id. */ +}; + +union xnsched_policy_param { + struct xnsched_idle_param idle; + struct xnsched_rt_param rt; +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + struct xnsched_weak_param weak; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + struct xnsched_tp_param tp; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + struct xnsched_sporadic_param pss; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + struct xnsched_quota_param quota; +#endif +}; + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h new file mode 100644 index 0000000..f7e87a3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHEDQUEUE_H +#define _COBALT_KERNEL_SCHEDQUEUE_H + +#include <cobalt/kernel/list.h> + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#define XNSCHED_CLASS_WEIGHT_FACTOR 1024 + +#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED + +#include <linux/bitmap.h> + +/* + * Multi-level priority queue, suitable for handling the runnable + * thread queue of the core scheduling class with O(1) property. We + * only manage a descending queuing order, i.e. highest numbered + * priorities come first. + */ +#define XNSCHED_MLQ_LEVELS 260 /* i.e. XNSCHED_CORE_NR_PRIO */ + +struct xnsched_mlq { + int elems; + DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS); + struct list_head heads[XNSCHED_MLQ_LEVELS]; +}; + +struct xnthread; + +void xnsched_initq(struct xnsched_mlq *q); + +void xnsched_addq(struct xnsched_mlq *q, + struct xnthread *thread); + +void xnsched_addq_tail(struct xnsched_mlq *q, + struct xnthread *thread); + +void xnsched_delq(struct xnsched_mlq *q, + struct xnthread *thread); + +struct xnthread *xnsched_getq(struct xnsched_mlq *q); + +static inline int xnsched_emptyq_p(struct xnsched_mlq *q) +{ + return q->elems == 0; +} + +static inline int xnsched_weightq(struct xnsched_mlq *q) +{ + return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS); +} + +typedef struct xnsched_mlq xnsched_queue_t; + +#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */ + +typedef struct list_head xnsched_queue_t; + +#define xnsched_initq(__q) INIT_LIST_HEAD(__q) +#define xnsched_emptyq_p(__q) list_empty(__q) +#define xnsched_addq(__q, __t) list_add_prilf(__t, __q, cprio, rlink) +#define xnsched_addq_tail(__q, __t) list_add_priff(__t, __q, cprio, rlink) +#define xnsched_delq(__q, __t) (void)(__q), list_del(&(__t)->rlink) +#define xnsched_getq(__q) \ + ({ \ + struct xnthread *__t = NULL; \ + if (!list_empty(__q)) \ + __t = list_get_entry(__q, struct xnthread, rlink); \ + __t; \ + }) +#define xnsched_weightq(__q) \ + ({ \ + struct xnthread *__t; \ + __t = list_first_entry(__q, struct xnthread, rlink); \ + __t->cprio; \ + }) + + +#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */ + +struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio); + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h new file mode 100644 index 0000000..1bac45a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2008 Efixo <gilles.chanteperdrix@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SELECT_H +#define _COBALT_KERNEL_SELECT_H + +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/thread.h> + +/** + * @addtogroup cobalt_core_select + * @{ + */ + +#define XNSELECT_READ 0 +#define XNSELECT_WRITE 1 +#define XNSELECT_EXCEPT 2 +#define XNSELECT_MAX_TYPES 3 + +struct xnselector { + struct xnsynch synchbase; + struct fds { + fd_set expected; + fd_set pending; + } fds [XNSELECT_MAX_TYPES]; + struct list_head destroy_link; + struct list_head bindings; /* only used by xnselector_destroy */ +}; + +#define __NFDBITS__ (8 * sizeof(unsigned long)) +#define __FDSET_LONGS__ (__FD_SETSIZE/__NFDBITS__) +#define __FDELT__(d) ((d) / __NFDBITS__) +#define __FDMASK__(d) (1UL << ((d) % __NFDBITS__)) + +static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS__; + unsigned long __rem = __fd % __NFDBITS__; + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); +} + +static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS__; + unsigned long __rem = __fd % __NFDBITS__; + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); +} + +static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p) +{ + unsigned long __tmp = __fd / __NFDBITS__; + unsigned long __rem = __fd % __NFDBITS__; + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; +} + +static inline void __FD_ZERO__(__kernel_fd_set *__p) +{ + unsigned long *__tmp = __p->fds_bits; + int __i; + + __i = __FDSET_LONGS__; + while (__i) { + __i--; + *__tmp = 0; + __tmp++; + } +} + +struct xnselect { + struct list_head bindings; +}; + +#define DECLARE_XNSELECT(name) struct xnselect name + +struct xnselect_binding { + struct xnselector *selector; + struct xnselect *fd; + unsigned int type; + unsigned int bit_index; + struct list_head link; /* link in selected fds list. */ + struct list_head slink; /* link in selector list */ +}; + +void xnselect_init(struct xnselect *select_block); + +int xnselect_bind(struct xnselect *select_block, + struct xnselect_binding *binding, + struct xnselector *selector, + unsigned int type, + unsigned int bit_index, + unsigned int state); + +int __xnselect_signal(struct xnselect *select_block, unsigned int state); + +/** + * Signal a file descriptor state change. + * + * @param select_block pointer to an @a xnselect structure representing the file + * descriptor whose state changed; + * @param state new value of the state. + * + * @retval 1 if rescheduling is needed; + * @retval 0 otherwise. + */ +static inline int +xnselect_signal(struct xnselect *select_block, unsigned int state) +{ + if (!list_empty(&select_block->bindings)) + return __xnselect_signal(select_block, state); + + return 0; +} + +void xnselect_destroy(struct xnselect *select_block); + +int xnselector_init(struct xnselector *selector); + +int xnselect(struct xnselector *selector, + fd_set *out_fds[XNSELECT_MAX_TYPES], + fd_set *in_fds[XNSELECT_MAX_TYPES], + int nfds, + xnticks_t timeout, xntmode_t timeout_mode); + +void xnselector_destroy(struct xnselector *selector); + +int xnselect_mount(void); + +int xnselect_umount(void); + +/** @} */ + +#endif /* _COBALT_KERNEL_SELECT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h new file mode 100644 index 0000000..3c059a5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>. + * Copyright (C) 2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_STAT_H +#define _COBALT_KERNEL_STAT_H + +#include <cobalt/kernel/clock.h> + +/** + * @ingroup cobalt_core_thread + * @defgroup cobalt_core_stat Thread runtime statistics + * @{ + */ +#ifdef CONFIG_XENO_OPT_STATS + +typedef struct xnstat_exectime { + + xnticks_t start; /* Start of execution time accumulation */ + + xnticks_t total; /* Accumulated execution time */ + +} xnstat_exectime_t; + +/* Return current date which can be passed to other xnstat services for + immediate or lazy accounting. */ +#define xnstat_exectime_now() xnclock_core_read_raw() + +/* Accumulate exectime of the current account until the given date. */ +#define xnstat_exectime_update(sched, date) \ +do { \ + xnticks_t __date = date; \ + (sched)->current_account->total += \ + __date - (sched)->last_account_switch; \ + (sched)->last_account_switch = __date; \ + /* All changes must be committed before changing the current_account \ + reference in sched (required for xnintr_sync_stat_references) */ \ + smp_wmb(); \ +} while (0) + +/* Update the current account reference, returning the previous one. */ +#define xnstat_exectime_set_current(sched, new_account) \ +({ \ + xnstat_exectime_t *__prev; \ + __prev = (xnstat_exectime_t *) \ + atomic_long_xchg((atomic_long_t *)&(sched)->current_account, \ + (long)(new_account)); \ + __prev; \ +}) + +/* Return the currently active accounting entity. */ +#define xnstat_exectime_get_current(sched) ((sched)->current_account) + +/* Finalize an account (no need to accumulate the exectime, just mark the + switch date and set the new account). */ +#define xnstat_exectime_finalize(sched, new_account) \ +do { \ + (sched)->last_account_switch = xnclock_core_read_raw(); \ + (sched)->current_account = (new_account); \ +} while (0) + +/* Obtain content of xnstat_exectime_t */ +#define xnstat_exectime_get_start(account) ((account)->start) +#define xnstat_exectime_get_total(account) ((account)->total) + +/* Obtain last account switch date of considered sched */ +#define xnstat_exectime_get_last_switch(sched) ((sched)->last_account_switch) + +/* Reset statistics from inside the accounted entity (e.g. after CPU + migration). */ +#define xnstat_exectime_reset_stats(stat) \ +do { \ + (stat)->total = 0; \ + (stat)->start = xnclock_core_read_raw(); \ +} while (0) + + +typedef struct xnstat_counter { + unsigned long counter; +} xnstat_counter_t; + +static inline unsigned long xnstat_counter_inc(xnstat_counter_t *c) +{ + return c->counter++; +} + +static inline unsigned long xnstat_counter_get(xnstat_counter_t *c) +{ + return c->counter; +} + +static inline void xnstat_counter_set(xnstat_counter_t *c, unsigned long value) +{ + c->counter = value; +} + +#else /* !CONFIG_XENO_OPT_STATS */ +typedef struct xnstat_exectime { +} xnstat_exectime_t; + +#define xnstat_exectime_now() ({ 0; }) +#define xnstat_exectime_update(sched, date) do { } while (0) +#define xnstat_exectime_set_current(sched, new_account) ({ (void)sched; NULL; }) +#define xnstat_exectime_get_current(sched) ({ (void)sched; NULL; }) +#define xnstat_exectime_finalize(sched, new_account) do { } while (0) +#define xnstat_exectime_get_start(account) ({ 0; }) +#define xnstat_exectime_get_total(account) ({ 0; }) +#define xnstat_exectime_get_last_switch(sched) ({ 0; }) +#define xnstat_exectime_reset_stats(account) do { } while (0) + +typedef struct xnstat_counter { +} xnstat_counter_t; + +#define xnstat_counter_inc(c) ({ do { } while(0); 0; }) +#define xnstat_counter_get(c) ({ 0; }) +#define xnstat_counter_set(c, value) do { } while (0) +#endif /* CONFIG_XENO_OPT_STATS */ + +/* Account the exectime of the current account until now, switch to + new_account, and return the previous one. */ +#define xnstat_exectime_switch(sched, new_account) \ +({ \ + xnstat_exectime_update(sched, xnstat_exectime_now()); \ + xnstat_exectime_set_current(sched, new_account); \ +}) + +/* Account the exectime of the current account until given start time, switch + to new_account, and return the previous one. */ +#define xnstat_exectime_lazy_switch(sched, new_account, date) \ +({ \ + xnstat_exectime_update(sched, date); \ + xnstat_exectime_set_current(sched, new_account); \ +}) + +/** @} */ + +#endif /* !_COBALT_KERNEL_STAT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h new file mode 100644 index 0000000..a2bf80d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SYNCH_H +#define _COBALT_KERNEL_SYNCH_H + +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/uapi/kernel/synch.h> +#include <cobalt/uapi/kernel/thread.h> + +/** + * @addtogroup cobalt_core_synch + * @{ + */ +#define XNSYNCH_CLAIMED 0x100 /* Claimed by other thread(s) (PI) */ +#define XNSYNCH_CEILING 0x200 /* Actively boosting (PP) */ + +/* Spare flags usable by upper interfaces */ +#define XNSYNCH_SPARE0 0x01000000 +#define XNSYNCH_SPARE1 0x02000000 +#define XNSYNCH_SPARE2 0x04000000 +#define XNSYNCH_SPARE3 0x08000000 +#define XNSYNCH_SPARE4 0x10000000 +#define XNSYNCH_SPARE5 0x20000000 +#define XNSYNCH_SPARE6 0x40000000 +#define XNSYNCH_SPARE7 0x80000000 + +/* Statuses */ +#define XNSYNCH_DONE 0 /* Resource available / operation complete */ +#define XNSYNCH_WAIT 1 /* Calling thread blocked -- start rescheduling */ +#define XNSYNCH_RESCHED 2 /* Force rescheduling */ + +struct xnthread; +struct xnsynch; + +struct xnsynch { + /** wait (weighted) prio in thread->boosters */ + int wprio; + /** thread->boosters */ + struct list_head next; + /** + * &variable holding the current priority ceiling value + * (xnsched_class_rt-based, [1..255], XNSYNCH_PP). + */ + u32 *ceiling_ref; + /** Status word */ + unsigned long status; + /** Pending threads */ + struct list_head pendq; + /** Thread which owns the resource */ + struct xnthread *owner; + /** Pointer to fast lock word */ + atomic_t *fastlock; + /* Cleanup handler */ + void (*cleanup)(struct xnsynch *synch); +}; + +#define XNSYNCH_WAITQUEUE_INITIALIZER(__name) { \ + .status = XNSYNCH_PRIO, \ + .wprio = -1, \ + .pendq = LIST_HEAD_INIT((__name).pendq), \ + .owner = NULL, \ + .cleanup = NULL, \ + .fastlock = NULL, \ + } + +#define DEFINE_XNWAITQ(__name) \ + struct xnsynch __name = XNSYNCH_WAITQUEUE_INITIALIZER(__name) + +static inline void xnsynch_set_status(struct xnsynch *synch, int bits) +{ + synch->status |= bits; +} + +static inline void xnsynch_clear_status(struct xnsynch *synch, int bits) +{ + synch->status &= ~bits; +} + +#define xnsynch_for_each_sleeper(__pos, __synch) \ + list_for_each_entry(__pos, &(__synch)->pendq, plink) + +#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch) \ + list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink) + +static inline int xnsynch_pended_p(struct xnsynch *synch) +{ + return !list_empty(&synch->pendq); +} + +static inline struct xnthread *xnsynch_owner(struct xnsynch *synch) +{ + return synch->owner; +} + +#define xnsynch_fastlock(synch) ((synch)->fastlock) +#define xnsynch_fastlock_p(synch) ((synch)->fastlock != NULL) +#define xnsynch_owner_check(synch, thread) \ + xnsynch_fast_owner_check((synch)->fastlock, thread->handle) + +#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED + +void xnsynch_detect_relaxed_owner(struct xnsynch *synch, + struct xnthread *sleeper); + +void xnsynch_detect_boosted_relax(struct xnthread *owner); + +#else /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */ + +static inline void xnsynch_detect_relaxed_owner(struct xnsynch *synch, + struct xnthread *sleeper) { } + +static inline void xnsynch_detect_boosted_relax(struct xnthread *owner) { } + +#endif /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */ + +void xnsynch_init(struct xnsynch *synch, int flags, + atomic_t *fastlock); + +void xnsynch_init_protect(struct xnsynch *synch, int flags, + atomic_t *fastlock, u32 *ceiling_ref); + +int xnsynch_destroy(struct xnsynch *synch); + +void xnsynch_commit_ceiling(struct xnthread *curr); + +static inline void xnsynch_register_cleanup(struct xnsynch *synch, + void (*handler)(struct xnsynch *)) +{ + synch->cleanup = handler; +} + +int __must_check xnsynch_sleep_on(struct xnsynch *synch, + xnticks_t timeout, + xntmode_t timeout_mode); + +struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch); + +int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr); + +void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, + struct xnthread *sleeper); + +int __must_check xnsynch_acquire(struct xnsynch *synch, + xnticks_t timeout, + xntmode_t timeout_mode); + +int __must_check xnsynch_try_acquire(struct xnsynch *synch); + +bool xnsynch_release(struct xnsynch *synch, struct xnthread *thread); + +struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch); + +int xnsynch_flush(struct xnsynch *synch, int reason); + +void xnsynch_requeue_sleeper(struct xnthread *thread); + +void xnsynch_forget_sleeper(struct xnthread *thread); + +/** @} */ + +#endif /* !_COBALT_KERNEL_SYNCH_H_ */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h new file mode 100644 index 0000000..b79cb84 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h @@ -0,0 +1,581 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_THREAD_H +#define _COBALT_KERNEL_THREAD_H + +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/sched/rt.h> +#include <pipeline/thread.h> +#include <pipeline/inband_work.h> +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/stat.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/registry.h> +#include <cobalt/kernel/schedparam.h> +#include <cobalt/kernel/trace.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/uapi/kernel/thread.h> +#include <cobalt/uapi/signal.h> +#include <asm/xenomai/machine.h> +#include <asm/xenomai/thread.h> + +/** + * @addtogroup cobalt_core_thread + * @{ + */ +#define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP) +#define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB) + +#define XNTHREAD_SIGDEBUG 0 +#define XNTHREAD_SIGSHADOW_HARDEN 1 +#define XNTHREAD_SIGSHADOW_BACKTRACE 2 +#define XNTHREAD_SIGSHADOW_HOME 3 +#define XNTHREAD_SIGTERM 4 +#define XNTHREAD_MAX_SIGNALS 5 + +struct xnthread; +struct xnsched; +struct xnselector; +struct xnsched_class; +struct xnsched_tpslot; +struct xnthread_personality; +struct completion; + +struct lostage_signal { + struct pipeline_inband_work inband_work; /* Must be first. */ + struct task_struct *task; + int signo, sigval; + struct lostage_signal *self; /* Revisit: I-pipe requirement */ +}; + +struct xnthread_init_attr { + struct xnthread_personality *personality; + cpumask_t affinity; + int flags; + const char *name; +}; + +struct xnthread_start_attr { + int mode; + void (*entry)(void *cookie); + void *cookie; +}; + +struct xnthread_wait_context { + int posted; +}; + +struct xnthread_personality { + const char *name; + unsigned int magic; + int xid; + atomic_t refcnt; + struct { + void *(*attach_process)(void); + void (*detach_process)(void *arg); + void (*map_thread)(struct xnthread *thread); + struct xnthread_personality *(*relax_thread)(struct xnthread *thread); + struct xnthread_personality *(*harden_thread)(struct xnthread *thread); + struct xnthread_personality *(*move_thread)(struct xnthread *thread, + int dest_cpu); + struct xnthread_personality *(*exit_thread)(struct xnthread *thread); + struct xnthread_personality *(*finalize_thread)(struct xnthread *thread); + } ops; + struct module *module; +}; + +struct xnthread { + struct xnarchtcb tcb; /* Architecture-dependent block */ + + __u32 state; /* Thread state flags */ + __u32 info; /* Thread information flags */ + __u32 local_info; /* Local thread information flags */ + + struct xnsched *sched; /* Thread scheduler */ + struct xnsched_class *sched_class; /* Current scheduling class */ + struct xnsched_class *base_class; /* Base scheduling class */ + +#ifdef CONFIG_XENO_OPT_SCHED_TP + struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */ + struct list_head tp_link; /* Link in per-sched TP thread queue */ +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */ +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + struct xnsched_quota_group *quota; /* Quota scheduling group. */ + struct list_head quota_expired; + struct list_head quota_next; +#endif + cpumask_t affinity; /* Processor affinity. */ + + /** Base priority (before PI/PP boost) */ + int bprio; + + /** Current (effective) priority */ + int cprio; + + /** + * Weighted priority (cprio + scheduling class weight). + */ + int wprio; + + int lock_count; /** Scheduler lock count. */ + + /** + * Thread holder in xnsched run queue. Ordered by + * thread->cprio. + */ + struct list_head rlink; + + /** + * Thread holder in xnsynch pendq. Prioritized by + * thread->cprio + scheduling class weight. + */ + struct list_head plink; + + /** Thread holder in global queue. */ + struct list_head glink; + + /** + * List of xnsynch owned by this thread which cause a priority + * boost due to one of the following reasons: + * + * - they are currently claimed by other thread(s) when + * enforcing the priority inheritance protocol (XNSYNCH_PI). + * + * - they require immediate priority ceiling (XNSYNCH_PP). + * + * This list is ordered by decreasing (weighted) thread + * priorities. + */ + struct list_head boosters; + + struct xnsynch *wchan; /* Resource the thread pends on */ + + struct xnsynch *wwake; /* Wait channel the thread was resumed from */ + + int res_count; /* Held resources count */ + + struct xntimer rtimer; /* Resource timer */ + + struct xntimer ptimer; /* Periodic timer */ + + xnticks_t rrperiod; /* Allotted round-robin period (ns) */ + + struct xnthread_wait_context *wcontext; /* Active wait context. */ + + struct { + xnstat_counter_t ssw; /* Primary -> secondary mode switch count */ + xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */ + xnstat_counter_t xsc; /* Xenomai syscalls */ + xnstat_counter_t pf; /* Number of page faults */ + xnstat_exectime_t account; /* Execution time accounting entity */ + xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */ + } stat; + + struct xnselector *selector; /* For select. */ + + xnhandle_t handle; /* Handle in registry */ + + char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */ + + void (*entry)(void *cookie); /* Thread entry routine */ + void *cookie; /* Cookie to pass to the entry routine */ + + /** + * Thread data visible from userland through a window on the + * global heap. + */ + struct xnthread_user_window *u_window; + + struct xnthread_personality *personality; + + struct completion exited; + +#ifdef CONFIG_XENO_OPT_DEBUG + const char *exe_path; /* Executable path */ + u32 proghash; /* Hash value for exe_path */ +#endif + struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS]; +}; + +static inline int xnthread_get_state(const struct xnthread *thread) +{ + return thread->state; +} + +static inline int xnthread_test_state(struct xnthread *thread, int bits) +{ + return thread->state & bits; +} + +static inline void xnthread_set_state(struct xnthread *thread, int bits) +{ + thread->state |= bits; +} + +static inline void xnthread_clear_state(struct xnthread *thread, int bits) +{ + thread->state &= ~bits; +} + +static inline int xnthread_test_info(struct xnthread *thread, int bits) +{ + return thread->info & bits; +} + +static inline void xnthread_set_info(struct xnthread *thread, int bits) +{ + thread->info |= bits; +} + +static inline void xnthread_clear_info(struct xnthread *thread, int bits) +{ + thread->info &= ~bits; +} + +static inline int xnthread_test_localinfo(struct xnthread *curr, int bits) +{ + return curr->local_info & bits; +} + +static inline void xnthread_set_localinfo(struct xnthread *curr, int bits) +{ + curr->local_info |= bits; +} + +static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits) +{ + curr->local_info &= ~bits; +} + +static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread) +{ + return &thread->tcb; +} + +static inline int xnthread_base_priority(const struct xnthread *thread) +{ + return thread->bprio; +} + +static inline int xnthread_current_priority(const struct xnthread *thread) +{ + return thread->cprio; +} + +static inline struct task_struct *xnthread_host_task(struct xnthread *thread) +{ + return xnarch_host_task(xnthread_archtcb(thread)); +} + +#define xnthread_for_each_booster(__pos, __thread) \ + list_for_each_entry(__pos, &(__thread)->boosters, next) + +#define xnthread_for_each_booster_safe(__pos, __tmp, __thread) \ + list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next) + +#define xnthread_run_handler(__t, __h, __a...) \ + do { \ + struct xnthread_personality *__p__ = (__t)->personality; \ + if ((__p__)->ops.__h) \ + (__p__)->ops.__h(__t, ##__a); \ + } while (0) + +#define xnthread_run_handler_stack(__t, __h, __a...) \ + do { \ + struct xnthread_personality *__p__ = (__t)->personality; \ + do { \ + if ((__p__)->ops.__h == NULL) \ + break; \ + __p__ = (__p__)->ops.__h(__t, ##__a); \ + } while (__p__); \ + } while (0) + +static inline +struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread) +{ + return thread->wcontext; +} + +static inline +int xnthread_register(struct xnthread *thread, const char *name) +{ + return xnregistry_enter(name, thread, &thread->handle, NULL); +} + +static inline +struct xnthread *xnthread_lookup(xnhandle_t threadh) +{ + struct xnthread *thread = xnregistry_lookup(threadh, NULL); + return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL; +} + +static inline void xnthread_sync_window(struct xnthread *thread) +{ + if (thread->u_window) { + thread->u_window->state = thread->state; + thread->u_window->info = thread->info; + } +} + +static inline +void xnthread_clear_sync_window(struct xnthread *thread, int state_bits) +{ + if (thread->u_window) { + thread->u_window->state = thread->state & ~state_bits; + thread->u_window->info = thread->info; + } +} + +static inline +void xnthread_set_sync_window(struct xnthread *thread, int state_bits) +{ + if (thread->u_window) { + thread->u_window->state = thread->state | state_bits; + thread->u_window->info = thread->info; + } +} + +static inline int normalize_priority(int prio) +{ + return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1; +} + +int __xnthread_init(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched *sched, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +void __xnthread_test_cancel(struct xnthread *curr); + +void __xnthread_cleanup(struct xnthread *curr); + +void __xnthread_discard(struct xnthread *thread); + +/** + * @fn struct xnthread *xnthread_current(void) + * @brief Retrieve the current Cobalt core TCB. + * + * Returns the address of the current Cobalt core thread descriptor, + * or NULL if running over a regular Linux task. This call is not + * affected by the current runtime mode of the core thread. + * + * @note The returned value may differ from xnsched_current_thread() + * called from the same context, since the latter returns the root + * thread descriptor for the current CPU if the caller is running in + * secondary mode. + * + * @coretags{unrestricted} + */ +static inline struct xnthread *xnthread_current(void) +{ + return pipeline_current()->thread; +} + +/** + * @fn struct xnthread *xnthread_from_task(struct task_struct *p) + * @brief Retrieve the Cobalt core TCB attached to a Linux task. + * + * Returns the address of the Cobalt core thread descriptor attached + * to the Linux task @a p, or NULL if @a p is a regular Linux + * task. This call is not affected by the current runtime mode of the + * core thread. + * + * @coretags{unrestricted} + */ +static inline struct xnthread *xnthread_from_task(struct task_struct *p) +{ + return pipeline_thread_from_task(p); +} + +/** + * @fn void xnthread_test_cancel(void) + * @brief Introduce a thread cancellation point. + * + * Terminates the current thread if a cancellation request is pending + * for it, i.e. if xnthread_cancel() was called. + * + * @coretags{mode-unrestricted} + */ +static inline void xnthread_test_cancel(void) +{ + struct xnthread *curr = xnthread_current(); + + if (curr && xnthread_test_info(curr, XNCANCELD)) + __xnthread_test_cancel(curr); +} + +static inline +void xnthread_complete_wait(struct xnthread_wait_context *wc) +{ + wc->posted = 1; +} + +static inline +int xnthread_wait_complete_p(struct xnthread_wait_context *wc) +{ + return wc->posted; +} + +#ifdef CONFIG_XENO_ARCH_FPU +void xnthread_switch_fpu(struct xnsched *sched); +#else +static inline void xnthread_switch_fpu(struct xnsched *sched) { } +#endif /* CONFIG_XENO_ARCH_FPU */ + +void xnthread_deregister(struct xnthread *thread); + +char *xnthread_format_status(unsigned long status, + char *buf, int size); + +pid_t xnthread_host_pid(struct xnthread *thread); + +int xnthread_set_clock(struct xnthread *thread, + struct xnclock *newclock); + +xnticks_t xnthread_get_timeout(struct xnthread *thread, + xnticks_t ns); + +xnticks_t xnthread_get_period(struct xnthread *thread); + +void xnthread_prepare_wait(struct xnthread_wait_context *wc); + +int xnthread_init(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +int xnthread_start(struct xnthread *thread, + const struct xnthread_start_attr *attr); + +int xnthread_set_mode(int clrmask, + int setmask); + +void xnthread_suspend(struct xnthread *thread, + int mask, + xnticks_t timeout, + xntmode_t timeout_mode, + struct xnsynch *wchan); + +void xnthread_resume(struct xnthread *thread, + int mask); + +int xnthread_unblock(struct xnthread *thread); + +int xnthread_set_periodic(struct xnthread *thread, + xnticks_t idate, + xntmode_t timeout_mode, + xnticks_t period); + +int xnthread_wait_period(unsigned long *overruns_r); + +int xnthread_set_slice(struct xnthread *thread, + xnticks_t quantum); + +void xnthread_cancel(struct xnthread *thread); + +int xnthread_join(struct xnthread *thread, bool uninterruptible); + +int xnthread_harden(void); + +void xnthread_relax(int notify, int reason); + +void __xnthread_kick(struct xnthread *thread); + +void xnthread_kick(struct xnthread *thread); + +void __xnthread_demote(struct xnthread *thread); + +void xnthread_demote(struct xnthread *thread); + +void __xnthread_signal(struct xnthread *thread, int sig, int arg); + +void xnthread_signal(struct xnthread *thread, int sig, int arg); + +void xnthread_pin_initial(struct xnthread *thread); + +void xnthread_call_mayday(struct xnthread *thread, int reason); + +static inline void xnthread_get_resource(struct xnthread *curr) +{ + if (xnthread_test_state(curr, XNWEAK|XNDEBUG)) + curr->res_count++; +} + +static inline int xnthread_put_resource(struct xnthread *curr) +{ + if (xnthread_test_state(curr, XNWEAK) || + IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) { + if (unlikely(curr->res_count == 0)) { + if (xnthread_test_state(curr, XNWARN)) + xnthread_signal(curr, SIGDEBUG, + SIGDEBUG_RESCNT_IMBALANCE); + return -EPERM; + } + curr->res_count--; + } + + return 0; +} + +static inline void xnthread_commit_ceiling(struct xnthread *curr) +{ + if (curr->u_window->pp_pending) + xnsynch_commit_ceiling(curr); +} + +#ifdef CONFIG_SMP + +void xnthread_migrate_passive(struct xnthread *thread, + struct xnsched *sched); +#else + +static inline void xnthread_migrate_passive(struct xnthread *thread, + struct xnsched *sched) +{ } + +#endif + +int __xnthread_set_schedparam(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +int xnthread_set_schedparam(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +int xnthread_killall(int grace, int mask); + +void __xnthread_propagate_schedparam(struct xnthread *curr); + +static inline void xnthread_propagate_schedparam(struct xnthread *curr) +{ + if (xnthread_test_info(curr, XNSCHEDP)) + __xnthread_propagate_schedparam(curr); +} + +extern struct xnthread_personality xenomai_personality; + +/** @} */ + +#endif /* !_COBALT_KERNEL_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h new file mode 100644 index 0000000..e48022f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _COBALT_KERNEL_TIME_H +#define _COBALT_KERNEL_TIME_H + +#include <linux/time.h> +#include <linux/time64.h> + +/** + * Read struct __kernel_timespec from userspace and convert to + * struct timespec64 + * + * @param ts The destination, will be filled + * @param uts The source, provided by an application + * @return 0 on success, -EFAULT otherwise + */ +int cobalt_get_timespec64(struct timespec64 *ts, + const struct __kernel_timespec __user *uts); + +/** + * Covert struct timespec64 to struct __kernel_timespec + * and copy to userspace + * + * @param ts The source, provided by kernel + * @param uts The destination, will be filled + * @return 0 on success, -EFAULT otherwise + */ +int cobalt_put_timespec64(const struct timespec64 *ts, + struct __kernel_timespec __user *uts); + +#endif //_COBALT_KERNEL_TIME_H diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h new file mode 100644 index 0000000..703a135 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h @@ -0,0 +1,551 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#ifndef _COBALT_KERNEL_TIMER_H +#define _COBALT_KERNEL_TIMER_H + +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/stat.h> +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/ancillaries.h> +#include <asm/xenomai/wrappers.h> + +/** + * @addtogroup cobalt_core_timer + * @{ + */ +#define XN_INFINITE ((xnticks_t)0) +#define XN_NONBLOCK ((xnticks_t)-1) + +/* Timer modes */ +typedef enum xntmode { + XN_RELATIVE, + XN_ABSOLUTE, + XN_REALTIME +} xntmode_t; + +/* Timer status */ +#define XNTIMER_DEQUEUED 0x00000001 +#define XNTIMER_KILLED 0x00000002 +#define XNTIMER_PERIODIC 0x00000004 +#define XNTIMER_REALTIME 0x00000008 +#define XNTIMER_FIRED 0x00000010 +#define XNTIMER_RUNNING 0x00000020 +#define XNTIMER_KGRAVITY 0x00000040 +#define XNTIMER_UGRAVITY 0x00000080 +#define XNTIMER_IGRAVITY 0 /* most conservative */ + +#define XNTIMER_GRAVITY_MASK (XNTIMER_KGRAVITY|XNTIMER_UGRAVITY) +#define XNTIMER_INIT_MASK XNTIMER_GRAVITY_MASK + +/* These flags are available to the real-time interfaces */ +#define XNTIMER_SPARE0 0x01000000 +#define XNTIMER_SPARE1 0x02000000 +#define XNTIMER_SPARE2 0x04000000 +#define XNTIMER_SPARE3 0x08000000 +#define XNTIMER_SPARE4 0x10000000 +#define XNTIMER_SPARE5 0x20000000 +#define XNTIMER_SPARE6 0x40000000 +#define XNTIMER_SPARE7 0x80000000 + +/* Timer priorities */ +#define XNTIMER_LOPRIO (-999999999) +#define XNTIMER_STDPRIO 0 +#define XNTIMER_HIPRIO 999999999 + +struct xntlholder { + struct list_head link; + xnticks_t key; + int prio; +}; + +#define xntlholder_date(h) ((h)->key) +#define xntlholder_prio(h) ((h)->prio) +#define xntlist_init(q) INIT_LIST_HEAD(q) +#define xntlist_empty(q) list_empty(q) + +static inline struct xntlholder *xntlist_head(struct list_head *q) +{ + if (list_empty(q)) + return NULL; + + return list_first_entry(q, struct xntlholder, link); +} + +static inline struct xntlholder *xntlist_next(struct list_head *q, + struct xntlholder *h) +{ + if (list_is_last(&h->link, q)) + return NULL; + + return list_entry(h->link.next, struct xntlholder, link); +} + +static inline struct xntlholder *xntlist_second(struct list_head *q, + struct xntlholder *h) +{ + return xntlist_next(q, h); +} + +static inline void xntlist_insert(struct list_head *q, struct xntlholder *holder) +{ + struct xntlholder *p; + + if (list_empty(q)) { + list_add(&holder->link, q); + return; + } + + /* + * Insert the new timer at the proper place in the single + * queue. O(N) here, but this is the price for the increased + * flexibility... + */ + list_for_each_entry_reverse(p, q, link) { + if ((xnsticks_t) (holder->key - p->key) > 0 || + (holder->key == p->key && holder->prio <= p->prio)) + break; + } + + list_add(&holder->link, &p->link); +} + +#define xntlist_remove(q, h) \ + do { \ + (void)(q); \ + list_del(&(h)->link); \ + } while (0) + +#if defined(CONFIG_XENO_OPT_TIMER_RBTREE) + +#include <linux/rbtree.h> + +typedef struct { + unsigned long long date; + unsigned prio; + struct rb_node link; +} xntimerh_t; + +#define xntimerh_date(h) ((h)->date) +#define xntimerh_prio(h) ((h)->prio) +#define xntimerh_init(h) do { } while (0) + +typedef struct { + struct rb_root root; + xntimerh_t *head; +} xntimerq_t; + +#define xntimerq_init(q) \ + ({ \ + xntimerq_t *_q = (q); \ + _q->root = RB_ROOT; \ + _q->head = NULL; \ + }) + +#define xntimerq_destroy(q) do { } while (0) +#define xntimerq_empty(q) ((q)->head == NULL) + +#define xntimerq_head(q) ((q)->head) + +#define xntimerq_next(q, h) \ + ({ \ + struct rb_node *_node = rb_next(&(h)->link); \ + _node ? (container_of(_node, xntimerh_t, link)) : NULL; \ + }) + +#define xntimerq_second(q, h) xntimerq_next(q, h) + +void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder); + +static inline void xntimerq_remove(xntimerq_t *q, xntimerh_t *holder) +{ + if (holder == q->head) + q->head = xntimerq_second(q, holder); + + rb_erase(&holder->link, &q->root); +} + +typedef struct { } xntimerq_it_t; + +#define xntimerq_it_begin(q,i) ((void) (i), xntimerq_head(q)) +#define xntimerq_it_next(q,i,h) ((void) (i), xntimerq_next((q),(h))) + +#else /* CONFIG_XENO_OPT_TIMER_LIST */ + +typedef struct xntlholder xntimerh_t; + +#define xntimerh_date(h) xntlholder_date(h) +#define xntimerh_prio(h) xntlholder_prio(h) +#define xntimerh_init(h) do { } while (0) + +typedef struct list_head xntimerq_t; + +#define xntimerq_init(q) xntlist_init(q) +#define xntimerq_destroy(q) do { } while (0) +#define xntimerq_empty(q) xntlist_empty(q) +#define xntimerq_head(q) xntlist_head(q) +#define xntimerq_second(q, h) xntlist_second((q),(h)) +#define xntimerq_insert(q, h) xntlist_insert((q),(h)) +#define xntimerq_remove(q, h) xntlist_remove((q),(h)) + +typedef struct { } xntimerq_it_t; + +#define xntimerq_it_begin(q,i) ((void) (i), xntlist_head(q)) +#define xntimerq_it_next(q,i,h) ((void) (i), xntlist_next((q),(h))) + +#endif /* CONFIG_XENO_OPT_TIMER_LIST */ + +struct xnsched; + +struct xntimerdata { + xntimerq_t q; +}; + +static inline struct xntimerdata * +xnclock_percpu_timerdata(struct xnclock *clock, int cpu) +{ + return per_cpu_ptr(clock->timerdata, cpu); +} + +static inline struct xntimerdata * +xnclock_this_timerdata(struct xnclock *clock) +{ + return raw_cpu_ptr(clock->timerdata); +} + +struct xntimer { +#ifdef CONFIG_XENO_OPT_EXTCLOCK + struct xnclock *clock; +#endif + /** Link in timers list. */ + xntimerh_t aplink; + struct list_head adjlink; + /** Timer status. */ + unsigned long status; + /** Periodic interval (clock ticks, 0 == one shot). */ + xnticks_t interval; + /** Periodic interval (nanoseconds, 0 == one shot). */ + xnticks_t interval_ns; + /** Count of timer ticks in periodic mode. */ + xnticks_t periodic_ticks; + /** First tick date in periodic mode. */ + xnticks_t start_date; + /** Date of next periodic release point (timer ticks). */ + xnticks_t pexpect_ticks; + /** Sched structure to which the timer is attached. */ + struct xnsched *sched; + /** Timeout handler. */ + void (*handler)(struct xntimer *timer); +#ifdef CONFIG_XENO_OPT_STATS +#ifdef CONFIG_XENO_OPT_EXTCLOCK + struct xnclock *tracker; +#endif + /** Timer name to be displayed. */ + char name[XNOBJECT_NAME_LEN]; + /** Timer holder in timebase. */ + struct list_head next_stat; + /** Number of timer schedules. */ + xnstat_counter_t scheduled; + /** Number of timer events. */ + xnstat_counter_t fired; +#endif /* CONFIG_XENO_OPT_STATS */ +}; + +#ifdef CONFIG_XENO_OPT_EXTCLOCK + +static inline struct xnclock *xntimer_clock(struct xntimer *timer) +{ + return timer->clock; +} + +void xntimer_set_clock(struct xntimer *timer, + struct xnclock *newclock); + +#else /* !CONFIG_XENO_OPT_EXTCLOCK */ + +static inline struct xnclock *xntimer_clock(struct xntimer *timer) +{ + return &nkclock; +} + +static inline void xntimer_set_clock(struct xntimer *timer, + struct xnclock *newclock) +{ + XENO_BUG_ON(COBALT, newclock != &nkclock); +} + +#endif /* !CONFIG_XENO_OPT_EXTCLOCK */ + +#ifdef CONFIG_SMP +static inline struct xnsched *xntimer_sched(struct xntimer *timer) +{ + return timer->sched; +} +#else /* !CONFIG_SMP */ +#define xntimer_sched(t) xnsched_current() +#endif /* !CONFIG_SMP */ + +#define xntimer_percpu_queue(__timer) \ + ({ \ + struct xntimerdata *tmd; \ + int cpu = xnsched_cpu((__timer)->sched); \ + tmd = xnclock_percpu_timerdata(xntimer_clock(__timer), cpu); \ + &tmd->q; \ + }) + +static inline unsigned long xntimer_gravity(struct xntimer *timer) +{ + struct xnclock *clock = xntimer_clock(timer); + + if (timer->status & XNTIMER_KGRAVITY) + return clock->gravity.kernel; + + if (timer->status & XNTIMER_UGRAVITY) + return clock->gravity.user; + + return clock->gravity.irq; +} + +static inline void xntimer_update_date(struct xntimer *timer) +{ + xntimerh_date(&timer->aplink) = timer->start_date + + xnclock_ns_to_ticks(xntimer_clock(timer), + timer->periodic_ticks * timer->interval_ns) + - xntimer_gravity(timer); +} + +static inline xnticks_t xntimer_pexpect(struct xntimer *timer) +{ + return timer->start_date + + xnclock_ns_to_ticks(xntimer_clock(timer), + timer->pexpect_ticks * timer->interval_ns); +} + +static inline void xntimer_set_priority(struct xntimer *timer, + int prio) +{ + xntimerh_prio(&timer->aplink) = prio; +} + +static inline int xntimer_active_p(struct xntimer *timer) +{ + return timer->sched != NULL; +} + +static inline int xntimer_running_p(struct xntimer *timer) +{ + return (timer->status & XNTIMER_RUNNING) != 0; +} + +static inline int xntimer_fired_p(struct xntimer *timer) +{ + return (timer->status & XNTIMER_FIRED) != 0; +} + +static inline int xntimer_periodic_p(struct xntimer *timer) +{ + return (timer->status & XNTIMER_PERIODIC) != 0; +} + +void __xntimer_init(struct xntimer *timer, + struct xnclock *clock, + void (*handler)(struct xntimer *timer), + struct xnsched *sched, + int flags); + +void xntimer_set_gravity(struct xntimer *timer, + int gravity); + +#ifdef CONFIG_XENO_OPT_STATS + +#define xntimer_init(__timer, __clock, __handler, __sched, __flags) \ +do { \ + __xntimer_init(__timer, __clock, __handler, __sched, __flags); \ + xntimer_set_name(__timer, #__handler); \ +} while (0) + +static inline void xntimer_reset_stats(struct xntimer *timer) +{ + xnstat_counter_set(&timer->scheduled, 0); + xnstat_counter_set(&timer->fired, 0); +} + +static inline void xntimer_account_scheduled(struct xntimer *timer) +{ + xnstat_counter_inc(&timer->scheduled); +} + +static inline void xntimer_account_fired(struct xntimer *timer) +{ + xnstat_counter_inc(&timer->fired); +} + +static inline void xntimer_set_name(struct xntimer *timer, const char *name) +{ + knamecpy(timer->name, name); +} + +#else /* !CONFIG_XENO_OPT_STATS */ + +#define xntimer_init __xntimer_init + +static inline void xntimer_reset_stats(struct xntimer *timer) { } + +static inline void xntimer_account_scheduled(struct xntimer *timer) { } + +static inline void xntimer_account_fired(struct xntimer *timer) { } + +static inline void xntimer_set_name(struct xntimer *timer, const char *name) { } + +#endif /* !CONFIG_XENO_OPT_STATS */ + +#if defined(CONFIG_XENO_OPT_EXTCLOCK) && defined(CONFIG_XENO_OPT_STATS) +void xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock); +#else +static inline +void xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock) { } +#endif + +void xntimer_destroy(struct xntimer *timer); + +/** + * @fn xnticks_t xntimer_interval(struct xntimer *timer) + * + * @brief Return the timer interval value. + * + * Return the timer interval value in nanoseconds. + * + * @param timer The address of a valid timer descriptor. + * + * @return The duration of a period in nanoseconds. The special value + * XN_INFINITE is returned if @a timer is currently disabled or + * one shot. + * + * @coretags{unrestricted, atomic-entry} + */ +static inline xnticks_t xntimer_interval(struct xntimer *timer) +{ + return timer->interval_ns; +} + +static inline xnticks_t xntimer_expiry(struct xntimer *timer) +{ + /* Real expiry date in ticks without anticipation (no gravity) */ + return xntimerh_date(&timer->aplink) + xntimer_gravity(timer); +} + +int xntimer_start(struct xntimer *timer, + xnticks_t value, + xnticks_t interval, + xntmode_t mode); + +void __xntimer_stop(struct xntimer *timer); + +xnticks_t xntimer_get_date(struct xntimer *timer); + +xnticks_t __xntimer_get_timeout(struct xntimer *timer); + +xnticks_t xntimer_get_interval(struct xntimer *timer); + +int xntimer_heading_p(struct xntimer *timer); + +static inline void xntimer_stop(struct xntimer *timer) +{ + if (timer->status & XNTIMER_RUNNING) + __xntimer_stop(timer); +} + +static inline xnticks_t xntimer_get_timeout(struct xntimer *timer) +{ + if (!xntimer_running_p(timer)) + return XN_INFINITE; + + return __xntimer_get_timeout(timer); +} + +static inline xnticks_t xntimer_get_timeout_stopped(struct xntimer *timer) +{ + return __xntimer_get_timeout(timer); +} + +static inline void xntimer_enqueue(struct xntimer *timer, + xntimerq_t *q) +{ + xntimerq_insert(q, &timer->aplink); + timer->status &= ~XNTIMER_DEQUEUED; + xntimer_account_scheduled(timer); +} + +static inline void xntimer_dequeue(struct xntimer *timer, + xntimerq_t *q) +{ + xntimerq_remove(q, &timer->aplink); + timer->status |= XNTIMER_DEQUEUED; +} + +unsigned long long xntimer_get_overruns(struct xntimer *timer, + struct xnthread *waiter, + xnticks_t now); + +#ifdef CONFIG_SMP + +void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched); + +static inline +void xntimer_migrate(struct xntimer *timer, struct xnsched *sched) +{ /* nklocked, IRQs off */ + if (timer->sched != sched) + __xntimer_migrate(timer, sched); +} + +void __xntimer_set_affinity(struct xntimer *timer, + struct xnsched *sched); + +static inline void xntimer_set_affinity(struct xntimer *timer, + struct xnsched *sched) +{ + if (sched != xntimer_sched(timer)) + __xntimer_set_affinity(timer, sched); +} + +#else /* ! CONFIG_SMP */ + +static inline void xntimer_migrate(struct xntimer *timer, + struct xnsched *sched) +{ + timer->sched = sched; +} + +static inline void xntimer_set_affinity(struct xntimer *timer, + struct xnsched *sched) +{ + xntimer_migrate(timer, sched); +} + +#endif /* CONFIG_SMP */ + +char *xntimer_format_time(xnticks_t ns, + char *buf, size_t bufsz); + +/** @} */ + +#endif /* !_COBALT_KERNEL_TIMER_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h new file mode 100644 index 0000000..e46dd4e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_TRACE_H +#define _COBALT_KERNEL_TRACE_H + +#include <pipeline/trace.h> + +#endif /* !_COBALT_KERNEL_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h new file mode 100644 index 0000000..c52ee32 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_TREE_H +#define _COBALT_KERNEL_TREE_H + +#include <linux/errno.h> +#include <linux/rbtree.h> +#include <cobalt/kernel/assert.h> + +typedef unsigned long long xnkey_t; + +static inline xnkey_t PTR_KEY(void *p) +{ + return (xnkey_t)(long)p; +} + +struct xnid { + xnkey_t key; + struct rb_node link; +}; + +#define xnid_entry(ptr, type, member) \ + ({ \ + typeof(ptr) _ptr = (ptr); \ + (_ptr ? container_of(_ptr, type, member.link) : NULL); \ + }) + +#define xnid_next_entry(ptr, member) \ + xnid_entry(rb_next(&ptr->member.link), typeof(*ptr), member) + +static inline void xntree_init(struct rb_root *t) +{ + *t = RB_ROOT; +} + +#define xntree_for_each_entry(pos, root, member) \ + for (pos = xnid_entry(rb_first(root), typeof(*pos), member); \ + pos; pos = xnid_next_entry(pos, member)) + +void xntree_cleanup(struct rb_root *t, void *cookie, + void (*destroy)(void *cookie, struct xnid *id)); + +int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key); + +static inline xnkey_t xnid_key(struct xnid *i) +{ + return i->key; +} + +static inline +struct xnid *xnid_fetch(struct rb_root *t, xnkey_t key) +{ + struct rb_node *node = t->rb_node; + + while (node) { + struct xnid *i = container_of(node, struct xnid, link); + + if (key < i->key) + node = node->rb_left; + else if (key > i->key) + node = node->rb_right; + else + return i; + } + + return NULL; +} + +static inline int xnid_remove(struct rb_root *t, struct xnid *xnid) +{ +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + if (xnid_fetch(t, xnid->key) != xnid) + return -ENOENT; +#endif + rb_erase(&xnid->link, t); + return 0; +} + +#endif /* _COBALT_KERNEL_TREE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h new file mode 100644 index 0000000..7da88a7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_VDSO_H +#define _COBALT_KERNEL_VDSO_H + +#include <linux/time.h> +#include <asm/barrier.h> +#include <asm/atomic.h> +#include <asm/processor.h> +#include <cobalt/uapi/kernel/vdso.h> + +extern struct xnvdso *nkvdso; + +/* + * Define the available feature set here. We have a single feature + * defined for now, only in the I-pipe case. + */ +#ifdef CONFIG_IPIPE_HAVE_HOSTRT + +#define XNVDSO_FEATURES XNVDSO_FEAT_HOST_REALTIME + +static inline struct xnvdso_hostrt_data *get_hostrt_data(void) +{ + return &nkvdso->hostrt_data; +} + +#else + +#define XNVDSO_FEATURES 0 + +#endif + +#endif /* _COBALT_KERNEL_VDSO_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h new file mode 100644 index 0000000..a53c237 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h @@ -0,0 +1,667 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_KERNEL_VFILE_H +#define _COBALT_KERNEL_VFILE_H + +#if defined(CONFIG_XENO_OPT_VFILE) || defined(DOXYGEN_CPP) + +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <cobalt/kernel/lock.h> + +/** + * @addtogroup cobalt_core_vfile + * @{ + */ + +struct xnvfile_directory; +struct xnvfile_regular_iterator; +struct xnvfile_snapshot_iterator; +struct xnvfile_lock_ops; + +struct xnvfile { + struct proc_dir_entry *pde; + struct file *file; + struct xnvfile_lock_ops *lockops; + int refcnt; + void *private; +}; + +/** + * @brief Vfile locking operations + * @anchor vfile_lockops + * + * This structure describes the operations to be provided for + * implementing locking support on vfiles. They apply to both + * snapshot-driven and regular vfiles. + */ +struct xnvfile_lock_ops { + /** + * @anchor lockops_get + * This handler should grab the desired lock. + * + * @param vfile A pointer to the virtual file which needs + * locking. + * + * @return zero should be returned if the call + * succeeds. Otherwise, a negative error code can be returned; + * upon error, the current vfile operation is aborted, and the + * user-space caller is passed back the error value. + */ + int (*get)(struct xnvfile *vfile); + /** + * @anchor lockops_put This handler should release the lock + * previously grabbed by the @ref lockops_get "get() handler". + * + * @param vfile A pointer to the virtual file which currently + * holds the lock to release. + */ + void (*put)(struct xnvfile *vfile); +}; + +struct xnvfile_hostlock_class { + struct xnvfile_lock_ops ops; + struct mutex mutex; +}; + +struct xnvfile_nklock_class { + struct xnvfile_lock_ops ops; + spl_t s; +}; + +struct xnvfile_input { + const char __user *u_buf; + size_t size; + struct xnvfile *vfile; +}; + +/** + * @brief Regular vfile operation descriptor + * @anchor regular_ops + * + * This structure describes the operations available with a regular + * vfile. It defines handlers for sending back formatted kernel data + * upon a user-space read request, and for obtaining user data upon a + * user-space write request. + */ +struct xnvfile_regular_ops { + /** + * @anchor regular_rewind This handler is called only once, + * when the virtual file is opened, before the @ref + * regular_begin "begin() handler" is invoked. + * + * @param it A pointer to the vfile iterator which will be + * used to read the file contents. + * + * @return Zero should be returned upon success. Otherwise, a + * negative error code aborts the operation, and is passed + * back to the reader. + * + * @note This handler is optional. It should not be used to + * allocate resources but rather to perform consistency + * checks, since no closure call is issued in case the open + * sequence eventually fails. + */ + int (*rewind)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_begin + * This handler should prepare for iterating over the records + * upon a read request, starting from the specified position. + * + * @param it A pointer to the current vfile iterator. On + * entry, it->pos is set to the (0-based) position of the + * first record to output. This handler may be called multiple + * times with different position requests. + * + * @return A pointer to the first record to format and output, + * to be passed to the @ref regular_show "show() handler" as + * its @a data parameter, if the call succeeds. Otherwise: + * + * - NULL in case no record is available, in which case the + * read operation will terminate immediately with no output. + * + * - VFILE_SEQ_START, a special value indicating that @ref + * regular_show "the show() handler" should receive a NULL + * data pointer first, in order to output a header. + * + * - ERR_PTR(errno), where errno is a negative error code; + * upon error, the current operation will be aborted + * immediately. + * + * @note This handler is optional; if none is given in the + * operation descriptor (i.e. NULL value), the @ref + * regular_show "show() handler()" will be called only once + * for a read operation, with a NULL @a data parameter. This + * particular setting is convenient for simple regular vfiles + * having a single, fixed record to output. + */ + void *(*begin)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_next + * This handler should return the address of the next record + * to format and output by the @ref regular_show "show() + * handler". + * + * @param it A pointer to the current vfile iterator. On + * entry, it->pos is set to the (0-based) position of the + * next record to output. + * + * @return A pointer to the next record to format and output, + * to be passed to the @ref regular_show "show() handler" as + * its @a data parameter, if the call succeeds. Otherwise: + * + * - NULL in case no record is available, in which case the + * read operation will terminate immediately with no output. + * + * - ERR_PTR(errno), where errno is a negative error code; + * upon error, the current operation will be aborted + * immediately. + * + * @note This handler is optional; if none is given in the + * operation descriptor (i.e. NULL value), the read operation + * will stop after the first invocation of the @ref regular_show + * "show() handler". + */ + void *(*next)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_end + * This handler is called after all records have been output. + * + * @param it A pointer to the current vfile iterator. + * + * @note This handler is optional and the pointer may be NULL. + */ + void (*end)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_show + * This handler should format and output a record. + * + * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and + * xnvfile_putc() are available to format and/or emit the + * output. All routines take the iterator argument @a it as + * their first parameter. + * + * @param it A pointer to the current vfile iterator. + * + * @param data A pointer to the record to format then + * output. The first call to the handler may receive a NULL @a + * data pointer, depending on the presence and/or return of a + * @ref regular_begin "hander"; the show handler should test + * this special value to output any header that fits, prior to + * receiving more calls with actual records. + * + * @return zero if the call succeeds, also indicating that the + * handler should be called for the next record if + * any. Otherwise: + * + * - A negative error code. This will abort the output phase, + * and return this status to the reader. + * + * - VFILE_SEQ_SKIP, a special value indicating that the + * current record should be skipped and will not be output. + */ + int (*show)(struct xnvfile_regular_iterator *it, void *data); + /** + * @anchor regular_store + * This handler receives data written to the vfile, likely for + * updating some kernel setting, or triggering any other + * action which fits. This is the only handler which deals + * with the write-side of a vfile. It is called when writing + * to the /proc entry of the vfile from a user-space process. + * + * The input data is described by a descriptor passed to the + * handler, which may be subsequently passed to parsing helper + * routines. For instance, xnvfile_get_string() will accept + * the input descriptor for returning the written data as a + * null-terminated character string. On the other hand, + * xnvfile_get_integer() will attempt to return a long integer + * from the input data. + * + * @param input A pointer to an input descriptor. It refers to + * an opaque data from the handler's standpoint. + * + * @return the number of bytes read from the input descriptor + * if the call succeeds. Otherwise, a negative error code. + * Return values from parsing helper routines are commonly + * passed back to the caller by the @ref regular_store + * "store() handler". + * + * @note This handler is optional, and may be omitted for + * read-only vfiles. + */ + ssize_t (*store)(struct xnvfile_input *input); +}; + +struct xnvfile_regular { + struct xnvfile entry; + size_t privsz; + struct xnvfile_regular_ops *ops; +}; + +struct xnvfile_regular_template { + size_t privsz; + struct xnvfile_regular_ops *ops; + struct xnvfile_lock_ops *lockops; +}; + +/** + * @brief Regular vfile iterator + * @anchor regular_iterator + * + * This structure defines an iterator over a regular vfile. + */ +struct xnvfile_regular_iterator { + /** Current record position while iterating. */ + loff_t pos; + /** Backlink to the host sequential file supporting the vfile. */ + struct seq_file *seq; + /** Backlink to the vfile being read. */ + struct xnvfile_regular *vfile; + /** + * Start of private area. Use xnvfile_iterator_priv() to + * address it. + */ + char private[0]; +}; + +/** + * @brief Snapshot vfile operation descriptor + * @anchor snapshot_ops + * + * This structure describes the operations available with a + * snapshot-driven vfile. It defines handlers for returning a + * printable snapshot of some Xenomai object contents upon a + * user-space read request, and for updating this object upon a + * user-space write request. + */ +struct xnvfile_snapshot_ops { + /** + * @anchor snapshot_rewind + * This handler (re-)initializes the data collection, moving + * the seek pointer at the first record. When the file + * revision tag is touched while collecting data, the current + * reading is aborted, all collected data dropped, and the + * vfile is eventually rewound. + * + * @param it A pointer to the current snapshot iterator. Two + * useful information can be retrieved from this iterator in + * this context: + * + * - it->vfile is a pointer to the descriptor of the virtual + * file being rewound. + * + * - xnvfile_iterator_priv(it) returns a pointer to the + * private data area, available from the descriptor, which + * size is vfile->privsz. If the latter size is zero, the + * returned pointer is meaningless and should not be used. + * + * @return A negative error code aborts the data collection, + * and is passed back to the reader. Otherwise: + * + * - a strictly positive value is interpreted as the total + * number of records which will be returned by the @ref + * snapshot_next "next() handler" during the data collection + * phase. If no @ref snapshot_begin "begin() handler" is + * provided in the @ref snapshot_ops "operation descriptor", + * this value is used to allocate the snapshot buffer + * internally. The size of this buffer would then be + * vfile->datasz * value. + * + * - zero leaves the allocation to the @ref snapshot_begin + * "begin() handler" if present, or indicates that no record + * is to be output in case such handler is not given. + * + * @note This handler is optional; a NULL value indicates that + * nothing needs to be done for rewinding the vfile. It is + * called with the vfile lock held. + */ + int (*rewind)(struct xnvfile_snapshot_iterator *it); + /** + * @anchor snapshot_begin + * This handler should allocate the snapshot buffer to hold + * records during the data collection phase. When specified, + * all records collected via the @ref snapshot_next "next() + * handler" will be written to a cell from the memory area + * returned by begin(). + * + * @param it A pointer to the current snapshot iterator. + * + * @return A pointer to the record buffer, if the call + * succeeds. Otherwise: + * + * - NULL in case of allocation error. This will abort the data + * collection, and return -ENOMEM to the reader. + * + * - VFILE_SEQ_EMPTY, a special value indicating that no + * record will be output. In such a case, the @ref + * snapshot_next "next() handler" will not be called, and the + * data collection will stop immediately. However, the @ref + * snapshot_show "show() handler" will still be called once, + * with a NULL data pointer (i.e. header display request). + * + * @note This handler is optional; if none is given, an + * internal allocation depending on the value returned by the + * @ref snapshot_rewind "rewind() handler" can be obtained. + */ + void *(*begin)(struct xnvfile_snapshot_iterator *it); + /** + * @anchor snapshot_end + * This handler releases the memory buffer previously obtained + * from begin(). It is usually called after the snapshot data + * has been output by show(), but it may also be called before + * rewinding the vfile after a revision change, to release the + * dropped buffer. + * + * @param it A pointer to the current snapshot iterator. + * + * @param buf A pointer to the buffer to release. + * + * @note This routine is optional and the pointer may be + * NULL. It is not needed upon internal buffer allocation; + * see the description of the @ref snapshot_rewind "rewind() + * handler". + */ + void (*end)(struct xnvfile_snapshot_iterator *it, void *buf); + /** + * @anchor snapshot_next + * This handler fetches the next record, as part of the + * snapshot data to be sent back to the reader via the + * show(). + * + * @param it A pointer to the current snapshot iterator. + * + * @param data A pointer to the record to fill in. + * + * @return a strictly positive value, if the call succeeds and + * leaves a valid record into @a data, which should be passed + * to the @ref snapshot_show "show() handler()" during the + * formatting and output phase. Otherwise: + * + * - A negative error code. This will abort the data + * collection, and return this status to the reader. + * + * - VFILE_SEQ_SKIP, a special value indicating that the + * current record should be skipped. In such a case, the @a + * data pointer is not advanced to the next position before + * the @ref snapshot_next "next() handler" is called anew. + * + * @note This handler is called with the vfile lock + * held. Before each invocation of this handler, the vfile + * core checks whether the revision tag has been touched, in + * which case the data collection is restarted from scratch. A + * data collection phase succeeds whenever all records can be + * fetched via the @ref snapshot_next "next() handler", while + * the revision tag remains unchanged, which indicates that a + * consistent snapshot of the object state was taken. + */ + int (*next)(struct xnvfile_snapshot_iterator *it, void *data); + /** + * @anchor snapshot_show + * This handler should format and output a record from the + * collected data. + * + * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and + * xnvfile_putc() are available to format and/or emit the + * output. All routines take the iterator argument @a it as + * their first parameter. + * + * @param it A pointer to the current snapshot iterator. + * + * @param data A pointer to the record to format then + * output. The first call to the handler is always passed a + * NULL @a data pointer; the show handler should test this + * special value to output any header that fits, prior to + * receiving more calls with actual records. + * + * @return zero if the call succeeds, also indicating that the + * handler should be called for the next record if + * any. Otherwise: + * + * - A negative error code. This will abort the output phase, + * and return this status to the reader. + * + * - VFILE_SEQ_SKIP, a special value indicating that the + * current record should be skipped and will not be output. + */ + int (*show)(struct xnvfile_snapshot_iterator *it, void *data); + /** + * @anchor snapshot_store + * This handler receives data written to the vfile, likely for + * updating the associated Xenomai object's state, or + * triggering any other action which fits. This is the only + * handler which deals with the write-side of a vfile. It is + * called when writing to the /proc entry of the vfile + * from a user-space process. + * + * The input data is described by a descriptor passed to the + * handler, which may be subsequently passed to parsing helper + * routines. For instance, xnvfile_get_string() will accept + * the input descriptor for returning the written data as a + * null-terminated character string. On the other hand, + * xnvfile_get_integer() will attempt to return a long integer + * from the input data. + * + * @param input A pointer to an input descriptor. It refers to + * an opaque data from the handler's standpoint. + * + * @return the number of bytes read from the input descriptor + * if the call succeeds. Otherwise, a negative error code. + * Return values from parsing helper routines are commonly + * passed back to the caller by the @ref snapshot_store + * "store() handler". + * + * @note This handler is optional, and may be omitted for + * read-only vfiles. + */ + ssize_t (*store)(struct xnvfile_input *input); +}; + +/** + * @brief Snapshot revision tag + * @anchor revision_tag + * + * This structure defines a revision tag to be used with @ref + * snapshot_vfile "snapshot-driven vfiles". + */ +struct xnvfile_rev_tag { + /** Current revision number. */ + int rev; +}; + +struct xnvfile_snapshot_template { + size_t privsz; + size_t datasz; + struct xnvfile_rev_tag *tag; + struct xnvfile_snapshot_ops *ops; + struct xnvfile_lock_ops *lockops; +}; + +/** + * @brief Snapshot vfile descriptor + * @anchor snapshot_vfile + * + * This structure describes a snapshot-driven vfile. Reading from + * such a vfile involves a preliminary data collection phase under + * lock protection, and a subsequent formatting and output phase of + * the collected data records. Locking is done in a way that does not + * increase worst-case latency, regardless of the number of records to + * be collected for output. + */ +struct xnvfile_snapshot { + struct xnvfile entry; + size_t privsz; + size_t datasz; + struct xnvfile_rev_tag *tag; + struct xnvfile_snapshot_ops *ops; +}; + +/** + * @brief Snapshot-driven vfile iterator + * @anchor snapshot_iterator + * + * This structure defines an iterator over a snapshot-driven vfile. + */ +struct xnvfile_snapshot_iterator { + /** Number of collected records. */ + int nrdata; + /** Address of record buffer. */ + caddr_t databuf; + /** Backlink to the host sequential file supporting the vfile. */ + struct seq_file *seq; + /** Backlink to the vfile being read. */ + struct xnvfile_snapshot *vfile; + /** Buffer release handler. */ + void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf); + /** + * Start of private area. Use xnvfile_iterator_priv() to + * address it. + */ + char private[0]; +}; + +struct xnvfile_directory { + struct xnvfile entry; +}; + +struct xnvfile_link { + struct xnvfile entry; +}; + +/* vfile.begin()=> */ +#define VFILE_SEQ_EMPTY ((void *)-1) +/* =>vfile.show() */ +#define VFILE_SEQ_START SEQ_START_TOKEN +/* vfile.next/show()=> */ +#define VFILE_SEQ_SKIP 2 + +#define xnvfile_printf(it, args...) seq_printf((it)->seq, ##args) +#define xnvfile_write(it, data, len) seq_write((it)->seq, (data),(len)) +#define xnvfile_puts(it, s) seq_puts((it)->seq, (s)) +#define xnvfile_putc(it, c) seq_putc((it)->seq, (c)) + +static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag) +{ + tag->rev++; +} + +static inline void xnvfile_touch(struct xnvfile_snapshot *vfile) +{ + xnvfile_touch_tag(vfile->tag); +} + +#define xnvfile_noentry \ + { \ + .pde = NULL, \ + .private = NULL, \ + .file = NULL, \ + .refcnt = 0, \ + } + +#define xnvfile_nodir { .entry = xnvfile_noentry } +#define xnvfile_nolink { .entry = xnvfile_noentry } +#define xnvfile_nofile { .entry = xnvfile_noentry } + +#define xnvfile_priv(e) ((e)->entry.private) +#define xnvfile_nref(e) ((e)->entry.refcnt) +#define xnvfile_file(e) ((e)->entry.file) +#define xnvfile_iterator_priv(it) ((void *)(&(it)->private)) + +extern struct xnvfile_nklock_class xnvfile_nucleus_lock; + +extern struct xnvfile_directory cobalt_vfroot; + +int xnvfile_init_root(void); + +void xnvfile_destroy_root(void); + +int xnvfile_init_snapshot(const char *name, + struct xnvfile_snapshot *vfile, + struct xnvfile_directory *parent); + +int xnvfile_init_regular(const char *name, + struct xnvfile_regular *vfile, + struct xnvfile_directory *parent); + +int xnvfile_init_dir(const char *name, + struct xnvfile_directory *vdir, + struct xnvfile_directory *parent); + +int xnvfile_init_link(const char *from, + const char *to, + struct xnvfile_link *vlink, + struct xnvfile_directory *parent); + +void xnvfile_destroy(struct xnvfile *vfile); + +ssize_t xnvfile_get_blob(struct xnvfile_input *input, + void *data, size_t size); + +ssize_t xnvfile_get_string(struct xnvfile_input *input, + char *s, size_t maxlen); + +ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp); + +int __vfile_hostlock_get(struct xnvfile *vfile); + +void __vfile_hostlock_put(struct xnvfile *vfile); + +static inline +void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile) +{ + xnvfile_destroy(&vfile->entry); +} + +static inline +void xnvfile_destroy_regular(struct xnvfile_regular *vfile) +{ + xnvfile_destroy(&vfile->entry); +} + +static inline +void xnvfile_destroy_dir(struct xnvfile_directory *vdir) +{ + xnvfile_destroy(&vdir->entry); +} + +static inline +void xnvfile_destroy_link(struct xnvfile_link *vlink) +{ + xnvfile_destroy(&vlink->entry); +} + +#define DEFINE_VFILE_HOSTLOCK(name) \ + struct xnvfile_hostlock_class name = { \ + .ops = { \ + .get = __vfile_hostlock_get, \ + .put = __vfile_hostlock_put, \ + }, \ + .mutex = __MUTEX_INITIALIZER(name.mutex), \ + } + +#else /* !CONFIG_XENO_OPT_VFILE */ + +#define xnvfile_touch_tag(tag) do { } while (0) + +#define xnvfile_touch(vfile) do { } while (0) + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_VFILE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/mqueue.h b/kernel/xenomai-v3.2.4/include/cobalt/mqueue.h new file mode 100644 index 0000000..496632d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/mqueue.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <mqueue.h> + +#ifndef _COBALT_MQUEUE_H +#define _COBALT_MQUEUE_H + +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(mqd_t, mq_open(const char *name, + int oflags, + ...)); + +COBALT_DECL(int, mq_close(mqd_t qd)); + +COBALT_DECL(int, mq_unlink(const char *name)); + +COBALT_DECL(int, mq_getattr(mqd_t qd, + struct mq_attr *attr)); + +COBALT_DECL(int, mq_setattr(mqd_t qd, + const struct mq_attr *__restrict__ attr, + struct mq_attr *__restrict__ oattr)); + +COBALT_DECL(int, mq_send(mqd_t qd, + const char *buffer, + size_t len, + unsigned prio)); + +COBALT_DECL(int, mq_timedsend(mqd_t q, + const char * buffer, + size_t len, + unsigned prio, + const struct timespec *timeout)); + +COBALT_DECL(ssize_t, mq_receive(mqd_t q, + char *buffer, + size_t len, + unsigned *prio)); + +COBALT_DECL(ssize_t, mq_timedreceive(mqd_t q, + char *__restrict__ buffer, + size_t len, + unsigned *__restrict__ prio, + const struct timespec *__restrict__ timeout)); + +COBALT_DECL(int, mq_notify(mqd_t q, + const struct sigevent *evp)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_MQUEUE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/pthread.h b/kernel/xenomai-v3.2.4/include/cobalt/pthread.h new file mode 100644 index 0000000..3e9bd47 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/pthread.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <pthread.h> + +#ifndef _COBALT_PTHREAD_H +#define _COBALT_PTHREAD_H + +#include <boilerplate/libc.h> +#include <cobalt/wrappers.h> +#include <cobalt/uapi/thread.h> + +typedef struct pthread_attr_ex { + pthread_attr_t std; + struct { + int personality; + int sched_policy; + struct sched_param_ex sched_param; + } nonstd; +} pthread_attr_ex_t; + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, pthread_attr_init(pthread_attr_t *attr)); + +COBALT_DECL(int, pthread_create(pthread_t *ptid_r, + const pthread_attr_t *attr, + void *(*start) (void *), + void *arg)); + +COBALT_DECL(int, pthread_getschedparam(pthread_t thread, + int *policy, + struct sched_param *param)); + +COBALT_DECL(int, pthread_setschedparam(pthread_t thread, + int policy, + const struct sched_param *param)); + +COBALT_DECL(int, pthread_setschedprio(pthread_t thread, int prio)); + +COBALT_DECL(int, pthread_mutex_init(pthread_mutex_t *mutex, + const pthread_mutexattr_t *attr)); + +COBALT_DECL(int, pthread_mutex_destroy(pthread_mutex_t *mutex)); + +COBALT_DECL(int, pthread_mutex_lock(pthread_mutex_t *mutex)); + +COBALT_DECL(int, pthread_mutex_timedlock(pthread_mutex_t *mutex, + const struct timespec *to)); + +COBALT_DECL(int, pthread_mutex_trylock(pthread_mutex_t *mutex)); + +COBALT_DECL(int, pthread_mutex_unlock(pthread_mutex_t *mutex)); + +COBALT_DECL(int, pthread_mutex_setprioceiling(pthread_mutex_t *__restrict mutex, + int prioceiling, + int *__restrict old_ceiling)); + +COBALT_DECL(int, pthread_mutex_getprioceiling(pthread_mutex_t *__restrict mutex, + int *__restrict old_ceiling)); + +COBALT_DECL(int, pthread_cond_init (pthread_cond_t *cond, + const pthread_condattr_t *attr)); + +COBALT_DECL(int, pthread_cond_destroy(pthread_cond_t *cond)); + +COBALT_DECL(int, pthread_cond_wait(pthread_cond_t *cond, + pthread_mutex_t *mutex)); + +COBALT_DECL(int, pthread_cond_timedwait(pthread_cond_t *cond, + pthread_mutex_t *mutex, + const struct timespec *abstime)); + +COBALT_DECL(int, pthread_cond_signal(pthread_cond_t *cond)); + +COBALT_DECL(int, pthread_cond_broadcast(pthread_cond_t *cond)); + +COBALT_DECL(int, pthread_kill(pthread_t ptid, int sig)); + +COBALT_DECL(int, pthread_join(pthread_t ptid, void **retval)); + +#ifndef pthread_yield +/* + * linuxthreads wraps pthread_yield() to sched_yield() via a + * preprocessor macro, which confuses the compiler with + * COBALT_DECL(). Since Cobalt also routes pthread_yield() to its own + * sched_yield() implementation internally, we can live with this + * wrapping. + */ +COBALT_DECL(int, pthread_yield(void)); +#endif + +int pthread_setmode_np(int clrmask, int setmask, + int *mask_r); + +COBALT_DECL(int, pthread_setname_np(pthread_t thread, const char *name)); + +int pthread_create_ex(pthread_t *ptid_r, + const pthread_attr_ex_t *attr_ex, + void *(*start)(void *), + void *arg); + +int pthread_getschedparam_ex(pthread_t ptid, + int *pol, + struct sched_param_ex *par); + +int pthread_setschedparam_ex(pthread_t ptid, + int pol, + const struct sched_param_ex *par); + +int pthread_attr_init_ex(pthread_attr_ex_t *attr_ex); + +int pthread_attr_destroy_ex(pthread_attr_ex_t *attr_ex); + +int pthread_attr_setschedpolicy_ex(pthread_attr_ex_t *attr_ex, + int policy); + +int pthread_attr_getschedpolicy_ex(const pthread_attr_ex_t *attr_ex, + int *policy); + +int pthread_attr_setschedparam_ex(pthread_attr_ex_t *attr_ex, + const struct sched_param_ex *param_ex); + +int pthread_attr_getschedparam_ex(const pthread_attr_ex_t *attr_ex, + struct sched_param_ex *param_ex); + +int pthread_attr_getinheritsched_ex(const pthread_attr_ex_t *attr_ex, + int *inheritsched); + +int pthread_attr_setinheritsched_ex(pthread_attr_ex_t *attr_ex, + int inheritsched); + +int pthread_attr_getdetachstate_ex(const pthread_attr_ex_t *attr_ex, + int *detachstate); + +int pthread_attr_setdetachstate_ex(pthread_attr_ex_t *attr_ex, + int detachstate); + +int pthread_attr_setdetachstate_ex(pthread_attr_ex_t *attr_ex, + int detachstate); + +int pthread_attr_getstacksize_ex(const pthread_attr_ex_t *attr_ex, + size_t *stacksize); + +int pthread_attr_setstacksize_ex(pthread_attr_ex_t *attr_ex, + size_t stacksize); + +int pthread_attr_getscope_ex(const pthread_attr_ex_t *attr_ex, + int *scope); + +int pthread_attr_setscope_ex(pthread_attr_ex_t *attr_ex, + int scope); + +int pthread_attr_getpersonality_ex(const pthread_attr_ex_t *attr_ex, + int *personality); + +int pthread_attr_setpersonality_ex(pthread_attr_ex_t *attr_ex, + int personality); +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_PTHREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/sched.h new file mode 100644 index 0000000..7c5b26e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sched.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <sched.h> + +#ifndef _COBALT_SCHED_H +#define _COBALT_SCHED_H + +#include <sys/types.h> +#include <cobalt/wrappers.h> +#include <cobalt/uapi/sched.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, sched_yield(void)); + +COBALT_DECL(int, sched_get_priority_min(int policy)); + +COBALT_DECL(int, sched_get_priority_max(int policy)); + +COBALT_DECL(int, sched_setscheduler(pid_t pid, int policy, + const struct sched_param *param)); + +COBALT_DECL(int, sched_getscheduler(pid_t pid)); + +int sched_get_priority_min_ex(int policy); + +int sched_get_priority_max_ex(int policy); + +int sched_setscheduler_ex(pid_t pid, int policy, + const struct sched_param_ex *param_ex); + +int sched_getscheduler_ex(pid_t pid, int *policy_r, + struct sched_param_ex *param_ex); + +int sched_setconfig_np(int cpu, int policy, + const union sched_config *config, size_t len); + +ssize_t sched_getconfig_np(int cpu, int policy, + union sched_config *config, size_t *len_r); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/semaphore.h b/kernel/xenomai-v3.2.4/include/cobalt/semaphore.h new file mode 100644 index 0000000..a7714fd --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/semaphore.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <semaphore.h> + +#ifndef _COBALT_SEMAPHORE_H +#define _COBALT_SEMAPHORE_H + +#include <boilerplate/atomic.h> +#include <cobalt/uapi/sem.h> +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, sem_init(sem_t *sem, + int pshared, + unsigned int value)); + +COBALT_DECL(int, sem_destroy(sem_t *sem)); + +COBALT_DECL(int, sem_post(sem_t *sem)); + +COBALT_DECL(int, sem_wait(sem_t *sem)); + +COBALT_DECL(int, sem_timedwait(sem_t *sem, + const struct timespec *abs_timeout)); + +COBALT_DECL(int, sem_trywait(sem_t *sem)); + +COBALT_DECL(int, sem_getvalue(sem_t *sem, int *value)); + +COBALT_DECL(sem_t *, sem_open(const char *name, int oflags, ...)); + +COBALT_DECL(int, sem_close(sem_t *sem)); + +COBALT_DECL(int, sem_unlink(const char *name)); + +int sem_init_np(sem_t *sem, + int flags, + unsigned int value); + +int sem_broadcast_np(sem_t *sem); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SEMAPHORE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/signal.h b/kernel/xenomai-v3.2.4/include/cobalt/signal.h new file mode 100644 index 0000000..62694f9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/signal.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <signal.h> + +#ifndef _COBALT_SIGNAL_H +#define _COBALT_SIGNAL_H + +/* Re-read in case we came from selective __need* block. */ +#include_next <signal.h> +#include <cobalt/wrappers.h> +#include <cobalt/uapi/signal.h> + +#ifndef sigev_notify_thread_id +#define sigev_notify_thread_id _sigev_un._tid +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +int cobalt_sigshadow_handler(int sig, siginfo_t *si, + void *ctxt); + +void cobalt_sigdebug_handler(int sig, siginfo_t *si, + void *context); + +COBALT_DECL(int, sigpending(sigset_t *set)); + +COBALT_DECL(int, sigwait(const sigset_t *set, int *sig)); + +COBALT_DECL(int, sigwaitinfo(const sigset_t *set, siginfo_t *si)); + +COBALT_DECL(int, sigtimedwait(const sigset_t *set, siginfo_t *si, + const struct timespec *timeout)); + +COBALT_DECL(int, kill(pid_t pid, int sig)); + +COBALT_DECL(int, sigqueue(pid_t pid, int sig, + const union sigval value)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SIGNAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/stdio.h b/kernel/xenomai-v3.2.4/include/cobalt/stdio.h new file mode 100644 index 0000000..5b9df80 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/stdio.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <stdio.h> + +#ifndef _COBALT_STDIO_H +#define _COBALT_STDIO_H + +#include <stddef.h> +#include <stdarg.h> +#include <xeno_config.h> +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +COBALT_DECL(int, vfprintf(FILE *stream, const char *fmt, va_list args)); + +#ifdef CONFIG_XENO_FORTIFY + +COBALT_DECL(int, __vfprintf_chk(FILE *stream, int level, + const char *fmt, va_list ap)); + +COBALT_DECL(int, __vprintf_chk(int flag, + const char *fmt, va_list ap)); + +COBALT_DECL(int, __printf_chk(int flag, const char *fmt, ...)); + +COBALT_DECL(int, __fprintf_chk(FILE *fp, int flag, const char *fmt, ...)); + +int __rt_vfprintf_chk(FILE *stream, int level, + const char *fmt, va_list args); + +void __rt_vsyslog_chk(int priority, int level, + const char *fmt, va_list args); + +#endif /* CONFIG_XENO_FORTIFY */ + +COBALT_DECL(int, vprintf(const char *fmt, va_list args)); + +COBALT_DECL(int, fprintf(FILE *stream, const char *fmt, ...)); + +COBALT_DECL(int, printf(const char *fmt, ...)); + +COBALT_DECL(int, puts(const char *s)); + +COBALT_DECL(int, fputs(const char *s, FILE *stream)); + +#ifndef putchar +COBALT_DECL(int, putchar(int c)); +#else +static inline int __real_putchar(int c) +{ + return putchar(c); +} +int __wrap_putchar(int c); +int __cobalt_putchar(int c); +#undef putchar +#define putchar putchar +#endif + +#ifndef fputc +COBALT_DECL(int, fputc(int c, FILE *stream)); +#else +static inline int __real_fputc(int c, FILE *stream) +{ + return fputc(c, stream); +} +int __wrap_fputc(int c, FILE *stream); +int __cobalt_fputc(int c, FILE *stream); +#undef fputc +#define fputc fputc +#endif + +COBALT_DECL(size_t, + fwrite(const void *ptr, size_t sz, size_t nmemb, FILE *stream)); + +COBALT_DECL(int, fclose(FILE *stream)); + +int rt_vfprintf(FILE *stream, const char *format, va_list args); + +int rt_vprintf(const char *format, va_list args); + +int rt_fprintf(FILE *stream, const char *format, ...); + +int rt_printf(const char *format, ...); + +int rt_puts(const char *s); + +int rt_fputs(const char *s, FILE *stream); + +int rt_fputc(int c, FILE *stream); + +int rt_putchar(int c); + +size_t rt_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream); + +void rt_syslog(int priority, const char *format, ...); + +void rt_vsyslog(int priority, const char *format, va_list args); + +int rt_print_init(size_t buffer_size, const char *name); + +const char *rt_print_buffer_name(void); + +void rt_print_flush_buffers(void); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* !_COBALT_STDIO_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/stdlib.h b/kernel/xenomai-v3.2.4/include/cobalt/stdlib.h new file mode 100644 index 0000000..6b664f5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/stdlib.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <stdlib.h> + +#ifndef _COBALT_STDLIB_H +#define _COBALT_STDLIB_H + +/* Re-read in case we came from selective __need* block. */ +#include_next <stdlib.h> +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +COBALT_DECL(void, free(void *ptr)); + +COBALT_DECL(void *, malloc(size_t size)); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* !_COBALT_STDLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am new file mode 100644 index 0000000..099cc27 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am @@ -0,0 +1,10 @@ +includesubdir = $(includedir)/cobalt/sys + +includesub_HEADERS = \ + cobalt.h \ + ioctl.h \ + mman.h \ + select.h \ + socket.h \ + time.h \ + timerfd.h diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h new file mode 100644 index 0000000..46096e8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_SYS_COBALT_H +#define _COBALT_SYS_COBALT_H + +#include <sys/types.h> +#include <signal.h> +#include <pthread.h> +#include <sched.h> +#include <semaphore.h> +#include <errno.h> +#include <stdio.h> +#include <time.h> +#include <boilerplate/atomic.h> +#include <boilerplate/list.h> +#include <cobalt/uapi/kernel/synch.h> +#include <cobalt/uapi/kernel/vdso.h> +#include <cobalt/uapi/corectl.h> +#include <cobalt/uapi/mutex.h> +#include <cobalt/uapi/event.h> +#include <cobalt/uapi/monitor.h> +#include <cobalt/uapi/thread.h> +#include <cobalt/uapi/cond.h> +#include <cobalt/uapi/sem.h> +#include <cobalt/ticks.h> + +#define cobalt_commit_memory(p) __cobalt_commit_memory(p, sizeof(*p)) + +struct cobalt_tsd_hook { + void (*create_tsd)(void); + void (*delete_tsd)(void); + struct pvholder next; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +int cobalt_extend(unsigned int magic); + +int cobalt_corectl(int request, void *buf, size_t bufsz); + +int cobalt_thread_stat(pid_t pid, + struct cobalt_threadstat *stat); + +int cobalt_serial_debug(const char *fmt, ...); + +void __cobalt_commit_memory(void *p, size_t len); + +void cobalt_thread_harden(void); + +void cobalt_thread_relax(void); + +int cobalt_thread_join(pthread_t thread); + +pid_t cobalt_thread_pid(pthread_t thread); + +int cobalt_thread_mode(void); + +int cobalt_monitor_init(cobalt_monitor_t *mon, + clockid_t clk_id, int flags); + +int cobalt_monitor_destroy(cobalt_monitor_t *mon); + +int cobalt_monitor_enter(cobalt_monitor_t *mon); + +int cobalt_monitor_exit(cobalt_monitor_t *mon); + +int cobalt_monitor_wait(cobalt_monitor_t *mon, int event, + const struct timespec *ts); + +void cobalt_monitor_grant(cobalt_monitor_t *mon, + struct xnthread_user_window *u_window); + +int cobalt_monitor_grant_sync(cobalt_monitor_t *mon, + struct xnthread_user_window *u_window); + +void cobalt_monitor_grant_all(cobalt_monitor_t *mon); + +int cobalt_monitor_grant_all_sync(cobalt_monitor_t *mon); + +void cobalt_monitor_drain(cobalt_monitor_t *mon); + +int cobalt_monitor_drain_sync(cobalt_monitor_t *mon); + +void cobalt_monitor_drain_all(cobalt_monitor_t *mon); + +int cobalt_monitor_drain_all_sync(cobalt_monitor_t *mon); + +int cobalt_event_init(cobalt_event_t *event, + unsigned int value, + int flags); + +int cobalt_event_post(cobalt_event_t *event, + unsigned int bits); + +int cobalt_event_wait(cobalt_event_t *event, + unsigned int bits, + unsigned int *bits_r, + int mode, + const struct timespec *timeout); + +unsigned long cobalt_event_clear(cobalt_event_t *event, + unsigned int bits); + +int cobalt_event_inquire(cobalt_event_t *event, + struct cobalt_event_info *info, + pid_t *waitlist, size_t waitsz); + +int cobalt_event_destroy(cobalt_event_t *event); + +int cobalt_sem_inquire(sem_t *sem, struct cobalt_sem_info *info, + pid_t *waitlist, size_t waitsz); + +int cobalt_sched_weighted_prio(int policy, + const struct sched_param_ex *param_ex); + +void cobalt_register_tsd_hook(struct cobalt_tsd_hook *th); + +void cobalt_assert_nrt(void); + +unsigned long long cobalt_read_tsc(void); + +extern int __cobalt_control_bind; + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SYS_COBALT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h new file mode 100644 index 0000000..553aa56 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <sys/ioctl.h> + +#ifndef _COBALT_SYS_IOCTL_H +#define _COBALT_SYS_IOCTL_H + +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, ioctl(int fildes, unsigned int request, ...)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SYS_IOCTL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h new file mode 100644 index 0000000..75a00da --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <sys/mman.h> + +#ifndef _COBALT_SYS_MMAN_H +#define _COBALT_SYS_MMAN_H + +#include <sys/types.h> +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(void *, mmap(void *addr, size_t length, int prot, int flags, + int fd, off_t offset)); + +#if defined(_LARGEFILE64_SOURCE) || defined(_GNU_SOURCE) +COBALT_DECL(void *, mmap64(void *addr, size_t length, int prot, int flags, + int fd, off64_t offset)); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SYS_MMAN_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/select.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/select.h new file mode 100644 index 0000000..76e8476 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/select.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <sys/select.h> + +#ifndef _COBALT_SYS_SELECT_H +#define _COBALT_SYS_SELECT_H + +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, select(int __nfds, fd_set *__restrict __readfds, + fd_set *__restrict __writefds, + fd_set *__restrict __exceptfds, + struct timeval *__restrict __timeout)); +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SYS_SELECT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h new file mode 100644 index 0000000..156b493 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <sys/socket.h> + +#ifndef _COBALT_SYS_SOCKET_H +#define _COBALT_SYS_SOCKET_H + +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, socket(int protocol_family, + int socket_type, int protocol)); + +COBALT_DECL(ssize_t, recvmsg(int fd, + struct msghdr *msg, int flags)); + +COBALT_DECL(int, recvmmsg(int fd, + struct mmsghdr *msgvec, unsigned int vlen, + unsigned int flags, struct timespec *timeout)); + +COBALT_DECL(ssize_t, sendmsg(int fd, + const struct msghdr *msg, int flags)); + +COBALT_DECL(int, sendmmsg(int fd, + struct mmsghdr *msgvec, unsigned int vlen, + unsigned int flags)); + +COBALT_DECL(ssize_t, recvfrom(int fd, void *buf, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen)); + +COBALT_DECL(ssize_t, sendto(int fd, const void *buf, size_t len, int flags, + const struct sockaddr *to, socklen_t tolen)); + +COBALT_DECL(ssize_t, recv(int fd, void *buf, + size_t len, int flags)); + +COBALT_DECL(ssize_t, send(int fd, const void *buf, + size_t len, int flags)); + +COBALT_DECL(int, getsockopt(int fd, int level, int optname, + void *optval, socklen_t *optlen)); + +COBALT_DECL(int, setsockopt(int fd, int level, int optname, + const void *optval, socklen_t optlen)); + +COBALT_DECL(int, bind(int fd, const struct sockaddr *my_addr, + socklen_t addrlen)); + +COBALT_DECL(int, connect(int fd, const struct sockaddr *serv_addr, + socklen_t addrlen)); + +COBALT_DECL(int, listen(int fd, int backlog)); + +COBALT_DECL(int, accept(int fd, struct sockaddr *addr, + socklen_t *addrlen)); + +COBALT_DECL(int, getsockname(int fd, struct sockaddr *name, + socklen_t *namelen)); + +COBALT_DECL(int, getpeername(int fd, struct sockaddr *name, + socklen_t *namelen)); + +COBALT_DECL(int, shutdown(int fd, int how)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SYS_SOCKET_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/time.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/time.h new file mode 100644 index 0000000..38f5a34 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/time.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <sys/time.h> + +#ifndef _COBALT_SYS_TIME_H +#define _COBALT_SYS_TIME_H + +#include <cobalt/wrappers.h> + +struct timezone; + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(int, gettimeofday(struct timeval *tv, + struct timezone *tz)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_SYS_TIME_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h new file mode 100644 index 0000000..a7df836 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_SYS_TIMERFD_H +#define _COBALT_SYS_TIMERFD_H + +#pragma GCC system_header +#include_next <sys/timerfd.h> +#include <cobalt/wrappers.h> +#include <cobalt/uapi/time.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +COBALT_DECL(int, timerfd_create(int clockid, int flags)); + +COBALT_DECL(int, timerfd_settime(int fd, int flags, + const struct itimerspec *new_value, + struct itimerspec *old_value)); + +COBALT_DECL(int, timerfd_gettime(int fd, struct itimerspec *curr_value)); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _COBALT_SYS_TIMERFD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/syslog.h b/kernel/xenomai-v3.2.4/include/cobalt/syslog.h new file mode 100644 index 0000000..236c8a2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/syslog.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <syslog.h> + +#ifndef _COBALT_SYSLOG_H +#define _COBALT_SYSLOG_H + +#include <stdarg.h> +#include <xeno_config.h> +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +COBALT_DECL(void, syslog(int priority, const char *fmt, ...)); + +COBALT_DECL(void, vsyslog(int priority, + const char *fmt, va_list ap)); + +#ifdef CONFIG_XENO_FORTIFY + +COBALT_DECL(void, __vsyslog_chk(int priority, int level, + const char *fmt, va_list ap)); + +COBALT_DECL(void, __syslog_chk(int pri, int flag, + const char *fmt, ...)); + +#endif /* CONFIG_XENO_FORTIFY */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* !_COBALT_SYSLOG_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/ticks.h b/kernel/xenomai-v3.2.4/include/cobalt/ticks.h new file mode 100644 index 0000000..e59d86d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/ticks.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_TICKS_H +#define _COBALT_TICKS_H + +#include <stdbool.h> +#include <cobalt/uapi/kernel/types.h> + +/* + * Depending on the underlying pipeline support, we may represent time + * stamps as count of nanoseconds (Dovetail), or as values of the + * hardware tick counter (aka TSC) available with the platform + * (I-pipe). In the latter - legacy - case, we need to convert from + * TSC values to nanoseconds and conversely via scaled maths. This + * indirection will go away once support for the I-pipe is removed. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned long long __cobalt_tsc_clockfreq; + +static inline bool cobalt_use_legacy_tsc(void) +{ + return !!__cobalt_tsc_clockfreq; +} + +xnsticks_t __cobalt_tsc_to_ns(xnsticks_t ticks); + +xnsticks_t __cobalt_tsc_to_ns_rounded(xnsticks_t ticks); + +xnsticks_t __cobalt_ns_to_tsc(xnsticks_t ns); + +static inline +xnsticks_t cobalt_ns_to_ticks(xnsticks_t ns) +{ + if (cobalt_use_legacy_tsc()) + return __cobalt_ns_to_tsc(ns); + + return ns; +} + +static inline +xnsticks_t cobalt_ticks_to_ns(xnsticks_t ticks) +{ + if (cobalt_use_legacy_tsc()) + return __cobalt_tsc_to_ns(ticks); + + return ticks; +} + +static inline +xnsticks_t cobalt_ticks_to_ns_rounded(xnsticks_t ticks) +{ + if (cobalt_use_legacy_tsc()) + return __cobalt_tsc_to_ns_rounded(ticks); + + return ticks; +} + +unsigned long long cobalt_divrem_billion(unsigned long long value, + unsigned long *rem); +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_TICKS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/time.h b/kernel/xenomai-v3.2.4/include/cobalt/time.h new file mode 100644 index 0000000..e3f355c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/time.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <time.h> + +#ifndef _COBALT_TIME_H +#define _COBALT_TIME_H + +/* Re-read in case we came from selective __need* block. */ +#include_next <time.h> +#include <cobalt/wrappers.h> +#include <cobalt/uapi/time.h> + +#ifdef __cplusplus +extern "C" { +#endif + +struct timex; + +COBALT_DECL(int, clock_getres(clockid_t clock_id, + struct timespec *tp)); + +COBALT_DECL(int, clock_gettime(clockid_t clock_id, + struct timespec *tp)); + +COBALT_DECL(int, clock_settime(clockid_t clock_id, + const struct timespec *tp)); + +COBALT_DECL(int, clock_adjtime(clockid_t clock_id, + struct timex *tx)); + +COBALT_DECL(int, clock_nanosleep(clockid_t clock_id, + int flags, + const struct timespec *rqtp, + struct timespec *rmtp)); + +COBALT_DECL(time_t, time(time_t *t)); + +COBALT_DECL(int, nanosleep(const struct timespec *rqtp, + struct timespec *rmtp)); + +COBALT_DECL(int, timer_create(clockid_t clockid, + const struct sigevent *__restrict__ evp, + timer_t * __restrict__ timerid)); + +COBALT_DECL(int, timer_delete(timer_t timerid)); + +COBALT_DECL(int, timer_settime(timer_t timerid, + int flags, + const struct itimerspec *value, + struct itimerspec *ovalue)); + +COBALT_DECL(int, timer_gettime(timer_t timerid, + struct itimerspec *value)); + +COBALT_DECL(int, timer_getoverrun(timer_t timerid)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_TIME_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/trace.h new file mode 100644 index 0000000..b2f9d95 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/trace.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_TRACE_H +#define _COBALT_TRACE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include <stdarg.h> + +int xntrace_max_begin(unsigned long v); + +int xntrace_max_end(unsigned long v); + +int xntrace_max_reset(void); + +int xntrace_user_start(void); + +int xntrace_user_stop(unsigned long v); + +int xntrace_user_freeze(unsigned long v, int once); + +int xntrace_special(unsigned char id, unsigned long v); + +int xntrace_special_u64(unsigned char id, unsigned long long v); + +void xntrace_latpeak_freeze(int delay); + +int xnftrace_vprintf(const char *format, va_list args); +int xnftrace_printf(const char *format, ...); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/tunables.h b/kernel/xenomai-v3.2.4/include/cobalt/tunables.h new file mode 100644 index 0000000..67ac77a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/tunables.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_TUNABLES_H +#define _COBALT_TUNABLES_H + +#include <boilerplate/tunables.h> +#include <sys/cobalt.h> + +#ifdef __cplusplus +extern "C" { +#endif + +extern int __cobalt_main_prio; + +extern int __cobalt_print_bufsz; + +extern int __cobalt_print_bufcount; + +extern int __cobalt_print_syncdelay; + +static inline define_config_tunable(main_prio, int, prio) +{ + __cobalt_main_prio = prio; +} + +static inline read_config_tunable(main_prio, int) +{ + return __cobalt_main_prio; +} + +static inline define_config_tunable(print_buffer_size, int, size) +{ + __cobalt_print_bufsz = size; +} + +static inline read_config_tunable(print_buffer_size, int) +{ + return __cobalt_print_bufsz; +} + +static inline define_config_tunable(print_buffer_count, int, count) +{ + __cobalt_print_bufcount = count; +} + +static inline read_config_tunable(print_buffer_count, int) +{ + return __cobalt_print_bufcount; +} + +static inline define_config_tunable(print_sync_delay, int, delay_ms) +{ + __cobalt_print_syncdelay = delay_ms; +} + +static inline read_config_tunable(print_sync_delay, int) +{ + return __cobalt_print_syncdelay; +} + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_TUNABLES_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am new file mode 100644 index 0000000..d887213 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am @@ -0,0 +1,18 @@ +includesubdir = $(includedir)/cobalt/uapi + +includesub_HEADERS = \ + cond.h \ + corectl.h \ + event.h \ + monitor.h \ + mutex.h \ + sched.h \ + sem.h \ + signal.h \ + thread.h \ + time.h + +noinst_HEADERS = \ + syscall.h + +SUBDIRS = asm-generic kernel diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am new file mode 100644 index 0000000..9e7b0d4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am @@ -0,0 +1,5 @@ + +noinst_HEADERS = \ + arith.h \ + features.h \ + syscall.h diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h new file mode 100644 index 0000000..d01d01e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h @@ -0,0 +1,365 @@ +/** + * Generic arithmetic/conversion routines. + * Copyright © 2005 Stelian Pop. + * Copyright © 2005 Gilles Chanteperdrix. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H +#define _COBALT_UAPI_ASM_GENERIC_ARITH_H + +#ifndef xnarch_u64tou32 +#define xnarch_u64tou32(ull, h, l) ({ \ + union { \ + unsigned long long _ull; \ + struct endianstruct _s; \ + } _u; \ + _u._ull = (ull); \ + (h) = _u._s._h; \ + (l) = _u._s._l; \ +}) +#endif /* !xnarch_u64tou32 */ + +#ifndef xnarch_u64fromu32 +#define xnarch_u64fromu32(h, l) ({ \ + union { \ + unsigned long long _ull; \ + struct endianstruct _s; \ + } _u; \ + _u._s._h = (h); \ + _u._s._l = (l); \ + _u._ull; \ +}) +#endif /* !xnarch_u64fromu32 */ + +#ifndef xnarch_ullmul +static inline __attribute__((__const__)) unsigned long long +xnarch_generic_ullmul(const unsigned m0, const unsigned m1) +{ + return (unsigned long long) m0 * m1; +} +#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1)) +#endif /* !xnarch_ullmul */ + +#ifndef xnarch_ulldiv +static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull, + const unsigned uld, + unsigned long *const rp) +{ + const unsigned r = do_div(ull, uld); + + if (rp) + *rp = r; + + return ull; +} +#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp)) +#endif /* !xnarch_ulldiv */ + +#ifndef xnarch_uldivrem +#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp))) +#endif /* !xnarch_uldivrem */ + +#ifndef xnarch_divmod64 +static inline unsigned long long +xnarch_generic_divmod64(unsigned long long a, + unsigned long long b, + unsigned long long *rem) +{ + unsigned long long q; +#if defined(__KERNEL__) && BITS_PER_LONG < 64 + unsigned long long + xnarch_generic_full_divmod64(unsigned long long a, + unsigned long long b, + unsigned long long *rem); + if (b <= 0xffffffffULL) { + unsigned long r; + q = xnarch_ulldiv(a, b, &r); + if (rem) + *rem = r; + } else { + if (a < b) { + if (rem) + *rem = a; + return 0; + } + + return xnarch_generic_full_divmod64(a, b, rem); + } +#else /* !(__KERNEL__ && BITS_PER_LONG < 64) */ + q = a / b; + if (rem) + *rem = a % b; +#endif /* !(__KERNEL__ && BITS_PER_LONG < 64) */ + return q; +} +#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp)) +#endif /* !xnarch_divmod64 */ + +#ifndef xnarch_imuldiv +static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i, + int mult, + int div) +{ + /* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */ + const unsigned long long ull = xnarch_ullmul(i, mult); + return xnarch_uldivrem(ull, div, NULL); +} +#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d)) +#endif /* !xnarch_imuldiv */ + +#ifndef xnarch_imuldiv_ceil +static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i, + int mult, + int div) +{ + /* Same as xnarch_generic_imuldiv, rounding up. */ + const unsigned long long ull = xnarch_ullmul(i, mult); + return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL); +} +#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d)) +#endif /* !xnarch_imuldiv_ceil */ + +/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits. + Building block for llimd. Without const qualifiers, gcc reload registers + after each call to uldivrem. */ +static inline unsigned long long +xnarch_generic_div96by32(const unsigned long long h, + const unsigned l, + const unsigned d, + unsigned long *const rp) +{ + unsigned long rh; + const unsigned qh = xnarch_uldivrem(h, d, &rh); + const unsigned long long t = xnarch_u64fromu32(rh, l); + const unsigned ql = xnarch_uldivrem(t, d, rp); + + return xnarch_u64fromu32(qh, ql); +} + +#ifndef xnarch_llimd +static inline __attribute__((__const__)) +unsigned long long xnarch_generic_ullimd(const unsigned long long op, + const unsigned m, + const unsigned d) +{ + unsigned int oph, opl, tlh, tll; + unsigned long long th, tl; + + xnarch_u64tou32(op, oph, opl); + tl = xnarch_ullmul(opl, m); + xnarch_u64tou32(tl, tlh, tll); + th = xnarch_ullmul(oph, m); + th += tlh; + + return xnarch_generic_div96by32(th, tll, d, NULL); +} + +static inline __attribute__((__const__)) long long +xnarch_generic_llimd (long long op, unsigned m, unsigned d) +{ + long long ret; + int sign = 0; + + if (op < 0LL) { + sign = 1; + op = -op; + } + ret = xnarch_generic_ullimd(op, m, d); + + return sign ? -ret : ret; +} +#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d)) +#endif /* !xnarch_llimd */ + +#ifndef _xnarch_u96shift +#define xnarch_u96shift(h, m, l, s) ({ \ + unsigned int _l = (l); \ + unsigned int _m = (m); \ + unsigned int _s = (s); \ + _l >>= _s; \ + _l |= (_m << (32 - _s)); \ + _m >>= _s; \ + _m |= ((h) << (32 - _s)); \ + xnarch_u64fromu32(_m, _l); \ +}) +#endif /* !xnarch_u96shift */ + +static inline long long xnarch_llmi(int i, int j) +{ + /* Fast 32x32->64 signed multiplication */ + return (long long) i * j; +} + +#ifndef xnarch_llmulshft +/* Fast scaled-math-based replacement for long long multiply-divide */ +static inline long long +xnarch_generic_llmulshft(const long long op, + const unsigned m, + const unsigned s) +{ + unsigned int oph, opl, tlh, tll, thh, thl; + unsigned long long th, tl; + + xnarch_u64tou32(op, oph, opl); + tl = xnarch_ullmul(opl, m); + xnarch_u64tou32(tl, tlh, tll); + th = xnarch_llmi(oph, m); + th += tlh; + xnarch_u64tou32(th, thh, thl); + + return xnarch_u96shift(thh, thl, tll, s); +} +#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s)) +#endif /* !xnarch_llmulshft */ + +#ifdef XNARCH_HAVE_NODIV_LLIMD + +/* Representation of a 32 bits fraction. */ +struct xnarch_u32frac { + unsigned long long frac; + unsigned integ; +}; + +static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f, + const unsigned m, + const unsigned d) +{ + /* + * Avoid clever compiler optimizations to occur when d is + * known at compile-time. The performance of this function is + * not critical since it is only called at init time. + */ + volatile unsigned vol_d = d; + f->integ = m / d; + f->frac = xnarch_generic_div96by32 + (xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL); +} + +#ifndef xnarch_nodiv_imuldiv +static inline __attribute__((__const__)) unsigned +xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f) +{ + return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op; +} +#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f)) +#endif /* xnarch_nodiv_imuldiv */ + +#ifndef xnarch_nodiv_imuldiv_ceil +static inline __attribute__((__const__)) unsigned +xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f) +{ + unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U; + return (full >> 32) + f.integ * op; +} +#define xnarch_nodiv_imuldiv_ceil(op, f) \ + xnarch_generic_nodiv_imuldiv_ceil((op),(f)) +#endif /* xnarch_nodiv_imuldiv_ceil */ + +#ifndef xnarch_nodiv_ullimd + +#ifndef xnarch_add96and64 +#error "xnarch_add96and64 must be implemented." +#endif + +static inline __attribute__((__const__)) unsigned long long +xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m) +{ + /* Compute high 64 bits of multiplication 64 bits x 64 bits. */ + register unsigned long long t0, t1, t2, t3; + register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l; + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(m, mh, ml); + t0 = xnarch_ullmul(opl, ml); + xnarch_u64tou32(t0, t0h, t0l); + t3 = xnarch_ullmul(oph, mh); + xnarch_u64tou32(t3, t3h, t3l); + xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31); + t1 = xnarch_ullmul(oph, ml); + xnarch_u64tou32(t1, t1h, t1l); + xnarch_add96and64(t3h, t3l, t0h, t1h, t1l); + t2 = xnarch_ullmul(opl, mh); + xnarch_u64tou32(t2, t2h, t2l); + xnarch_add96and64(t3h, t3l, t0h, t2h, t2l); + + return xnarch_u64fromu32(t3h, t3l); +} + +static inline unsigned long long +xnarch_generic_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + unsigned int integ) +{ + return xnarch_mul64by64_high(op, frac) + integ * op; +} +#define xnarch_nodiv_ullimd(op, f, i) xnarch_generic_nodiv_ullimd((op),(f), (i)) +#endif /* !xnarch_nodiv_ullimd */ + +#ifndef xnarch_nodiv_llimd +static inline __attribute__((__const__)) long long +xnarch_generic_nodiv_llimd(long long op, unsigned long long frac, + unsigned int integ) +{ + long long ret; + int sign = 0; + + if (op < 0LL) { + sign = 1; + op = -op; + } + ret = xnarch_nodiv_ullimd(op, frac, integ); + + return sign ? -ret : ret; +} +#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ)) +#endif /* !xnarch_nodiv_llimd */ + +#endif /* XNARCH_HAVE_NODIV_LLIMD */ + +static inline void xnarch_init_llmulshft(const unsigned m_in, + const unsigned d_in, + unsigned *m_out, + unsigned *s_out) +{ + /* + * Avoid clever compiler optimizations to occur when d is + * known at compile-time. The performance of this function is + * not critical since it is only called at init time. + */ + volatile unsigned int vol_d = d_in; + unsigned long long mult; + + *s_out = 31; + while (1) { + mult = ((unsigned long long)m_in) << *s_out; + do_div(mult, vol_d); + if (mult <= 0x7FFFFFFF) + break; + (*s_out)--; + } + *m_out = (unsigned int)mult; +} + +#define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); }) +#define xnarch_uldiv(ull, d) xnarch_uldivrem(ull, d, NULL) +#define xnarch_ulmod(ull, d) ({ unsigned long _rem; \ + xnarch_uldivrem(ull,d,&_rem); _rem; }) + +#define xnarch_div64(a,b) xnarch_divmod64((a),(b),NULL) +#define xnarch_mod64(a,b) ({ unsigned long long _rem; \ + xnarch_divmod64((a),(b),&_rem); _rem; }) + +#endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h new file mode 100644 index 0000000..8a4927c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_ASM_GENERIC_FEATURES_H +#define _COBALT_UAPI_ASM_GENERIC_FEATURES_H + +#include <linux/types.h> + +#define XNFEAT_STRING_LEN 64 + +struct cobalt_featinfo { + /** Real-time clock frequency */ + __u64 clock_freq; + /** Offset of nkvdso in the sem heap. */ + __u32 vdso_offset; + /** ABI revision level. */ + __u32 feat_abirev; + /** Available feature set. */ + __u32 feat_all; + /** Mandatory features (when requested). */ + __u32 feat_man; + /** Requested feature set. */ + __u32 feat_req; + /** Missing features. */ + __u32 feat_mis; + char feat_all_s[XNFEAT_STRING_LEN]; + char feat_man_s[XNFEAT_STRING_LEN]; + char feat_req_s[XNFEAT_STRING_LEN]; + char feat_mis_s[XNFEAT_STRING_LEN]; + /* Architecture-specific features. */ + struct cobalt_featinfo_archdep feat_arch; +}; + +#define __xn_feat_smp 0x80000000 +#define __xn_feat_nosmp 0x40000000 +#define __xn_feat_fastsynch 0x20000000 +#define __xn_feat_nofastsynch 0x10000000 +#define __xn_feat_control 0x08000000 +#define __xn_feat_prioceiling 0x04000000 + +#ifdef CONFIG_SMP +#define __xn_feat_smp_mask __xn_feat_smp +#else +#define __xn_feat_smp_mask __xn_feat_nosmp +#endif + +/* + * Revisit: all archs currently support fast locking, and there is no + * reason for any future port not to provide this. This will be + * written in stone at the next ABI update, when fastsynch support is + * dropped from the optional feature set. + */ +#define __xn_feat_fastsynch_mask __xn_feat_fastsynch + +/* List of generic features kernel or userland may support */ +#define __xn_feat_generic_mask \ + (__xn_feat_smp_mask | \ + __xn_feat_fastsynch_mask | \ + __xn_feat_prioceiling) + +/* + * List of features both sides have to agree on: If userland supports + * it, the kernel has to provide it, too. This means backward + * compatibility between older userland and newer kernel may be + * supported for those features, but forward compatibility between + * newer userland and older kernel cannot. + */ +#define __xn_feat_generic_man_mask \ + (__xn_feat_fastsynch | \ + __xn_feat_nofastsynch | \ + __xn_feat_nosmp | \ + __xn_feat_prioceiling) + +static inline +const char *get_generic_feature_label(unsigned int feature) +{ + switch (feature) { + case __xn_feat_smp: + return "smp"; + case __xn_feat_nosmp: + return "nosmp"; + case __xn_feat_fastsynch: + return "fastsynch"; + case __xn_feat_nofastsynch: + return "nofastsynch"; + case __xn_feat_control: + return "control"; + case __xn_feat_prioceiling: + return "prioceiling"; + default: + return 0; + } +} + +static inline int check_abi_revision(unsigned long abirev) +{ + return abirev == XENOMAI_ABI_REV; +} + +#endif /* !_COBALT_UAPI_ASM_GENERIC_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h new file mode 100644 index 0000000..b38b241 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_ASM_GENERIC_SYSCALL_H +#define _COBALT_UAPI_ASM_GENERIC_SYSCALL_H + +#include <linux/types.h> +#include <asm/xenomai/uapi/features.h> +#include <asm/xenomai/uapi/syscall.h> + +#define __COBALT_SYSCALL_BIT 0x10000000 + +struct cobalt_bindreq { + /** Features userland requires. */ + __u32 feat_req; + /** ABI revision userland uses. */ + __u32 abi_rev; + /** Features the Cobalt core provides. */ + struct cobalt_featinfo feat_ret; +}; + +#define COBALT_SECONDARY 0 +#define COBALT_PRIMARY 1 + +#endif /* !_COBALT_UAPI_ASM_GENERIC_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h new file mode 100644 index 0000000..b1106c7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h @@ -0,0 +1,39 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_COND_H +#define _COBALT_UAPI_COND_H + +#include <cobalt/uapi/mutex.h> + +#define COBALT_COND_MAGIC 0x86860505 + +struct cobalt_cond_state { + __u32 pending_signals; + __u32 mutex_state_offset; +}; + +union cobalt_cond_union { + pthread_cond_t native_cond; + struct cobalt_cond_shadow { + __u32 magic; + __u32 state_offset; + xnhandle_t handle; + } shadow_cond; +}; + +#endif /* !_COBALT_UAPI_COND_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h new file mode 100644 index 0000000..98d989d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_CORECTL_H +#define _COBALT_UAPI_CORECTL_H + +#define _CC_COBALT_GET_VERSION 0 +#define _CC_COBALT_GET_NR_PIPES 1 +#define _CC_COBALT_GET_NR_TIMERS 2 + +#define _CC_COBALT_GET_DEBUG 3 +# define _CC_COBALT_DEBUG_ASSERT 1 +# define _CC_COBALT_DEBUG_CONTEXT 2 +# define _CC_COBALT_DEBUG_LOCKING 4 +# define _CC_COBALT_DEBUG_USER 8 +# define _CC_COBALT_DEBUG_MUTEX_RELAXED 16 +# define _CC_COBALT_DEBUG_MUTEX_SLEEP 32 +/* bit 6 (64) formerly used for DEBUG_POSIX_SYNCHRO */ +# define _CC_COBALT_DEBUG_LEGACY 128 +# define _CC_COBALT_DEBUG_TRACE_RELAX 256 +# define _CC_COBALT_DEBUG_NET 512 + +#define _CC_COBALT_GET_POLICIES 4 +# define _CC_COBALT_SCHED_FIFO 1 +# define _CC_COBALT_SCHED_RR 2 +# define _CC_COBALT_SCHED_WEAK 4 +# define _CC_COBALT_SCHED_SPORADIC 8 +# define _CC_COBALT_SCHED_QUOTA 16 +# define _CC_COBALT_SCHED_TP 32 + +#define _CC_COBALT_GET_WATCHDOG 5 +#define _CC_COBALT_GET_CORE_STATUS 6 +#define _CC_COBALT_START_CORE 7 +#define _CC_COBALT_STOP_CORE 8 + +#define _CC_COBALT_GET_NET_CONFIG 9 +# define _CC_COBALT_NET 0x00000001 +# define _CC_COBALT_NET_ETH_P_ALL 0x00000002 +# define _CC_COBALT_NET_IPV4 0x00000004 +# define _CC_COBALT_NET_ICMP 0x00000008 +# define _CC_COBALT_NET_NETROUTING 0x00000010 +# define _CC_COBALT_NET_ROUTER 0x00000020 +# define _CC_COBALT_NET_UDP 0x00000040 +# define _CC_COBALT_NET_AF_PACKET 0x00000080 +# define _CC_COBALT_NET_TDMA 0x00000100 +# define _CC_COBALT_NET_NOMAC 0x00000200 +# define _CC_COBALT_NET_CFG 0x00000400 +# define _CC_COBALT_NET_CAP 0x00000800 +# define _CC_COBALT_NET_PROXY 0x00001000 + + +enum cobalt_run_states { + COBALT_STATE_DISABLED, + COBALT_STATE_RUNNING, + COBALT_STATE_STOPPED, + COBALT_STATE_TEARDOWN, + COBALT_STATE_WARMUP, +}; + +#endif /* !_COBALT_UAPI_CORECTL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h new file mode 100644 index 0000000..8710e8e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_EVENT_H +#define _COBALT_UAPI_EVENT_H + +#include <cobalt/uapi/kernel/types.h> + +struct cobalt_event_state { + __u32 value; + __u32 flags; +#define COBALT_EVENT_PENDED 0x1 + __u32 nwaiters; +}; + +struct cobalt_event; + +/* Creation flags. */ +#define COBALT_EVENT_FIFO 0x0 +#define COBALT_EVENT_PRIO 0x1 +#define COBALT_EVENT_SHARED 0x2 + +/* Wait mode. */ +#define COBALT_EVENT_ALL 0x0 +#define COBALT_EVENT_ANY 0x1 + +struct cobalt_event_shadow { + __u32 state_offset; + __u32 flags; + xnhandle_t handle; +}; + +struct cobalt_event_info { + unsigned int value; + int flags; + int nrwait; +}; + +typedef struct cobalt_event_shadow cobalt_event_t; + +#endif /* !_COBALT_UAPI_EVENT_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am new file mode 100644 index 0000000..12e1b37 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am @@ -0,0 +1,12 @@ +includesubdir = $(includedir)/cobalt/uapi/kernel + +includesub_HEADERS = \ + heap.h \ + limits.h \ + pipe.h \ + synch.h \ + thread.h \ + trace.h \ + types.h \ + urw.h \ + vdso.h diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h new file mode 100644 index 0000000..75e7289 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_HEAP_H +#define _COBALT_UAPI_KERNEL_HEAP_H + +#include <linux/types.h> + +#define COBALT_MEMDEV_PRIVATE "memdev-private" +#define COBALT_MEMDEV_SHARED "memdev-shared" +#define COBALT_MEMDEV_SYS "memdev-sys" + +struct cobalt_memdev_stat { + __u32 size; + __u32 free; +}; + +#define MEMDEV_RTIOC_STAT _IOR(RTDM_CLASS_MEMORY, 0, struct cobalt_memdev_stat) + +#endif /* !_COBALT_UAPI_KERNEL_HEAP_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h new file mode 100644 index 0000000..22017c5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_LIMITS_H +#define _COBALT_UAPI_KERNEL_LIMITS_H + +#define XNOBJECT_NAME_LEN 32 + +#endif /* !_COBALT_UAPI_KERNEL_LIMITS_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h new file mode 100644 index 0000000..688ee0c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_PIPE_H +#define _COBALT_UAPI_KERNEL_PIPE_H + +#define XNPIPE_IOCTL_BASE 'p' + +#define XNPIPEIOC_GET_NRDEV _IOW(XNPIPE_IOCTL_BASE, 0, int) +#define XNPIPEIOC_IFLUSH _IO(XNPIPE_IOCTL_BASE, 1) +#define XNPIPEIOC_OFLUSH _IO(XNPIPE_IOCTL_BASE, 2) +#define XNPIPEIOC_FLUSH XNPIPEIOC_OFLUSH +#define XNPIPEIOC_SETSIG _IO(XNPIPE_IOCTL_BASE, 3) + +#define XNPIPE_NORMAL 0x0 +#define XNPIPE_URGENT 0x1 + +#define XNPIPE_IFLUSH 0x1 +#define XNPIPE_OFLUSH 0x2 + +#define XNPIPE_MINOR_AUTO (-1) + +#endif /* !_COBALT_UAPI_KERNEL_PIPE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h new file mode 100644 index 0000000..a7cb9fb --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_SYNCH_H +#define _COBALT_UAPI_KERNEL_SYNCH_H + +#include <cobalt/uapi/kernel/types.h> + +/* Creation flags */ +#define XNSYNCH_FIFO 0x0 +#define XNSYNCH_PRIO 0x1 +#define XNSYNCH_PI 0x2 +#define XNSYNCH_DREORD 0x4 +#define XNSYNCH_OWNER 0x8 +#define XNSYNCH_PP 0x10 + +/* Fast lock API */ +static inline int xnsynch_fast_is_claimed(xnhandle_t handle) +{ + return (handle & XNSYNCH_FLCLAIM) != 0; +} + +static inline xnhandle_t xnsynch_fast_claimed(xnhandle_t handle) +{ + return handle | XNSYNCH_FLCLAIM; +} + +static inline xnhandle_t xnsynch_fast_ceiling(xnhandle_t handle) +{ + return handle | XNSYNCH_FLCEIL; +} + +static inline int +xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh) +{ + return (xnhandle_get_id((xnhandle_t)atomic_read(fastlock)) == ownerh) ? + 0 : -EPERM; +} + +static inline +int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh) +{ + xnhandle_t h; + + h = (xnhandle_t)atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh); + if (h != XN_NO_HANDLE) { + if (xnhandle_get_id(h) == new_ownerh) + return -EBUSY; + + return -EAGAIN; + } + + return 0; +} + +static inline +int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh) +{ + return (xnhandle_t)atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE) + == cur_ownerh; +} + +/* Local/shared property */ +static inline int xnsynch_is_shared(xnhandle_t handle) +{ + return (handle & XNSYNCH_PSHARED) != 0; +} + +#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h new file mode 100644 index 0000000..664def0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_THREAD_H +#define _COBALT_UAPI_KERNEL_THREAD_H + +#include <cobalt/uapi/kernel/types.h> + +/** + * @ingroup cobalt_core_thread + * @defgroup cobalt_core_thread_states Thread state flags + * @brief Bits reporting permanent or transient states of threads + * @{ + */ + +/* State flags (shared) */ + +#define XNSUSP 0x00000001 /**< Suspended. */ +#define XNPEND 0x00000002 /**< Sleep-wait for a resource. */ +#define XNDELAY 0x00000004 /**< Delayed */ +#define XNREADY 0x00000008 /**< Linked to the ready queue. */ +#define XNDORMANT 0x00000010 /**< Not started yet */ +#define XNZOMBIE 0x00000020 /**< Zombie thread in deletion process */ +#define XNMAPPED 0x00000040 /**< Thread is mapped to a linux task */ +#define XNRELAX 0x00000080 /**< Relaxed shadow thread (blocking bit) */ +#define XNHELD 0x00000200 /**< Thread is held to process emergency. */ +#define XNBOOST 0x00000400 /**< PI/PP boost undergoing */ +#define XNSSTEP 0x00000800 /**< Single-stepped by debugger */ +#define XNLOCK 0x00001000 /**< Scheduler lock control (pseudo-bit, not in ->state) */ +#define XNRRB 0x00002000 /**< Undergoes a round-robin scheduling */ +#define XNWARN 0x00004000 /**< Issue SIGDEBUG on error detection */ +#define XNFPU 0x00008000 /**< Thread uses FPU */ +#define XNROOT 0x00010000 /**< Root thread (that is, Linux/IDLE) */ +#define XNWEAK 0x00020000 /**< Non real-time shadow (from the WEAK class) */ +#define XNUSER 0x00040000 /**< Shadow thread running in userland */ +#define XNJOINED 0x00080000 /**< Another thread waits for joining this thread */ +#define XNTRAPLB 0x00100000 /**< Trap lock break (i.e. may not sleep with sched lock) */ +#define XNDEBUG 0x00200000 /**< User-level debugging enabled */ +#define XNDBGSTOP 0x00400000 /**< Stopped for synchronous debugging */ + +/** @} */ + +/** + * @ingroup cobalt_core_thread + * @defgroup cobalt_core_thread_info Thread information flags + * @brief Bits reporting events notified to threads + * @{ + */ + +/* Information flags (shared) */ + +#define XNTIMEO 0x00000001 /**< Woken up due to a timeout condition */ +#define XNRMID 0x00000002 /**< Pending on a removed resource */ +#define XNBREAK 0x00000004 /**< Forcibly awaken from a wait state */ +#define XNKICKED 0x00000008 /**< Forced out of primary mode */ +#define XNWAKEN 0x00000010 /**< Thread waken up upon resource availability */ +#define XNROBBED 0x00000020 /**< Robbed from resource ownership */ +#define XNCANCELD 0x00000040 /**< Cancellation request is pending */ +#define XNPIALERT 0x00000080 /**< Priority inversion alert (SIGDEBUG sent) */ +#define XNSCHEDP 0x00000100 /**< schedparam propagation is pending */ +#define XNCONTHI 0x00000200 /**< Continue in primary mode after debugging */ + +/* Local information flags (private to current thread) */ + +#define XNMOVED 0x00000001 /**< CPU migration in primary mode occurred */ +#define XNLBALERT 0x00000002 /**< Scheduler lock break alert (SIGDEBUG sent) */ +#define XNDESCENT 0x00000004 /**< Adaptive transitioning to secondary mode */ +#define XNSYSRST 0x00000008 /**< Thread awaiting syscall restart after signal */ +#define XNHICCUP 0x00000010 /**< Just left from ptracing */ + +/** @} */ + +/* + * Must follow strictly the declaration order of the state flags + * defined above. Status symbols are defined as follows: + * + * 'S' -> Forcibly suspended. + * 'w'/'W' -> Waiting for a resource, with or without timeout. + * 'D' -> Delayed (without any other wait condition). + * 'R' -> Runnable. + * 'U' -> Unstarted or dormant. + * 'X' -> Relaxed shadow. + * 'H' -> Held in emergency. + * 'b' -> Priority boost undergoing. + * 'T' -> Ptraced and stopped. + * 'l' -> Locks scheduler. + * 'r' -> Undergoes round-robin. + * 't' -> Runtime mode errors notified. + * 'L' -> Lock breaks trapped. + * 's' -> Ptraced, stopped synchronously. + */ +#define XNTHREAD_STATE_LABELS "SWDRU..X.HbTlrt.....L.s" + +struct xnthread_user_window { + __u32 state; + __u32 info; + __u32 grant_value; + __u32 pp_pending; +}; + +#endif /* !_COBALT_UAPI_KERNEL_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h new file mode 100644 index 0000000..a1add30 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_TRACE_H +#define _COBALT_UAPI_KERNEL_TRACE_H + +#define __xntrace_op_max_begin 0 +#define __xntrace_op_max_end 1 +#define __xntrace_op_max_reset 2 +#define __xntrace_op_user_start 3 +#define __xntrace_op_user_stop 4 +#define __xntrace_op_user_freeze 5 +#define __xntrace_op_special 6 +#define __xntrace_op_special_u64 7 +#define __xntrace_op_latpeak_freeze 8 + +#endif /* !_COBALT_UAPI_KERNEL_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h new file mode 100644 index 0000000..2c931c2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_TYPES_H +#define _COBALT_UAPI_KERNEL_TYPES_H + +#include <linux/types.h> +#include <cobalt/uapi/kernel/limits.h> + +typedef __u64 xnticks_t; + +typedef __s64 xnsticks_t; + +typedef __u32 xnhandle_t; + +#define XN_NO_HANDLE ((xnhandle_t)0) +#define XN_HANDLE_INDEX_MASK ((xnhandle_t)0xf0000000) + +/* Fixed bits (part of the identifier) */ +#define XNSYNCH_PSHARED ((xnhandle_t)0x40000000) + +/* Transient bits (expressing a status) */ +#define XNSYNCH_FLCLAIM ((xnhandle_t)0x80000000) /* Contended. */ +#define XNSYNCH_FLCEIL ((xnhandle_t)0x20000000) /* Ceiling active. */ + +#define XN_HANDLE_TRANSIENT_MASK (XNSYNCH_FLCLAIM|XNSYNCH_FLCEIL) + +/* + * Strip all special bits from the handle, only retaining the object + * index value in the registry. + */ +static inline xnhandle_t xnhandle_get_index(xnhandle_t handle) +{ + return handle & ~XN_HANDLE_INDEX_MASK; +} + +/* + * Strip the transient bits from the handle, only retaining the fixed + * part making the identifier. + */ +static inline xnhandle_t xnhandle_get_id(xnhandle_t handle) +{ + return handle & ~XN_HANDLE_TRANSIENT_MASK; +} + +/* + * Our representation of time specs at the kernel<->user interface + * boundary at the moment, until we have fully transitioned to a + * y2038-safe implementation in libcobalt. Once done, those legacy + * types will be removed. + */ +struct __user_old_timespec { + long tv_sec; + long tv_nsec; +}; + +struct __user_old_itimerspec { + struct __user_old_timespec it_interval; + struct __user_old_timespec it_value; +}; + +struct __user_old_timeval { + long tv_sec; + long tv_usec; +}; + +/* Lifted from include/uapi/linux/timex.h. */ +struct __user_old_timex { + unsigned int modes; /* mode selector */ + __kernel_long_t offset; /* time offset (usec) */ + __kernel_long_t freq; /* frequency offset (scaled ppm) */ + __kernel_long_t maxerror;/* maximum error (usec) */ + __kernel_long_t esterror;/* estimated error (usec) */ + int status; /* clock command/status */ + __kernel_long_t constant;/* pll time constant */ + __kernel_long_t precision;/* clock precision (usec) (read only) */ + __kernel_long_t tolerance;/* clock frequency tolerance (ppm) + * (read only) + */ + struct __user_old_timeval time; /* (read only, except for ADJ_SETOFFSET) */ + __kernel_long_t tick; /* (modified) usecs between clock ticks */ + + __kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */ + __kernel_long_t jitter; /* pps jitter (us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + __kernel_long_t stabil; /* pps stability (scaled ppm) (ro) */ + __kernel_long_t jitcnt; /* jitter limit exceeded (ro) */ + __kernel_long_t calcnt; /* calibration intervals (ro) */ + __kernel_long_t errcnt; /* calibration errors (ro) */ + __kernel_long_t stbcnt; /* stability limit exceeded (ro) */ + + int tai; /* TAI offset (ro) */ + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; +}; + +#endif /* !_COBALT_UAPI_KERNEL_TYPES_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h new file mode 100644 index 0000000..fcfde21 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_URW_H +#define _COBALT_UAPI_KERNEL_URW_H + +#include <linux/types.h> + +/* + * A restricted version of the kernel seqlocks with a slightly + * different interface, allowing for unsynced reads with concurrent + * write detection, without serializing writers. Caller should + * provide for proper locking to deal with concurrent updates. + * + * urw_t lock = URW_INITIALIZER; + * urwstate_t tmp; + * + * unsynced_read_block(&tmp, &lock) { + * (will redo until clean read)... + * } + * + * unsynced_write_block(&tmp, &lock) { + * ... + * } + * + * This code was inspired by Wolfgang Mauerer's linux/seqlock.h + * adaptation for Xenomai 2.6 to support the VDSO feature. + */ + +typedef struct { + __u32 sequence; +} urw_t; + +typedef struct { + __u32 token; + __u32 dirty; +} urwstate_t; + +#define URW_INITIALIZER { 0 } +#define DEFINE_URW(__name) urw_t __name = URW_INITIALIZER + +#ifndef READ_ONCE +#define READ_ONCE ACCESS_ONCE +#endif + +static inline void __try_read_start(const urw_t *urw, urwstate_t *tmp) +{ + __u32 token; +repeat: + token = READ_ONCE(urw->sequence); + smp_rmb(); + if (token & 1) { + cpu_relax(); + goto repeat; + } + + tmp->token = token; + tmp->dirty = 1; +} + +static inline void __try_read_end(const urw_t *urw, urwstate_t *tmp) +{ + smp_rmb(); + if (urw->sequence != tmp->token) { + __try_read_start(urw, tmp); + return; + } + + tmp->dirty = 0; +} + +static inline void __do_write_start(urw_t *urw, urwstate_t *tmp) +{ + urw->sequence++; + tmp->dirty = 1; + smp_wmb(); +} + +static inline void __do_write_end(urw_t *urw, urwstate_t *tmp) +{ + smp_wmb(); + tmp->dirty = 0; + urw->sequence++; +} + +static inline void unsynced_rw_init(urw_t *urw) +{ + urw->sequence = 0; +} + +#define unsynced_read_block(__tmp, __urw) \ + for (__try_read_start(__urw, __tmp); \ + (__tmp)->dirty; __try_read_end(__urw, __tmp)) + +#define unsynced_write_block(__tmp, __urw) \ + for (__do_write_start(__urw, __tmp); \ + (__tmp)->dirty; __do_write_end(__urw, __tmp)) + +#endif /* !_COBALT_UAPI_KERNEL_URW_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h new file mode 100644 index 0000000..5b9b1b6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_VDSO_H +#define _COBALT_UAPI_KERNEL_VDSO_H + +#include <cobalt/uapi/kernel/urw.h> + +/* + * I-pipe only. Dovetail enables the common vDSO for getting + * CLOCK_REALTIME timestamps from the out-of-band stage + * (XNVDSO_FEAT_HOST_REALTIME is cleared in this case). + */ +struct xnvdso_hostrt_data { + __u64 wall_sec; + __u64 wtom_sec; + __u64 cycle_last; + __u64 mask; + __u32 wall_nsec; + __u32 wtom_nsec; + __u32 mult; + __u32 shift; + __u32 live; + urw_t lock; +}; + +/* + * Data shared between the Cobalt kernel and applications, which lives + * in the shared memory heap (COBALT_MEMDEV_SHARED). + * xnvdso_hostrt_data.features tells which data is present. Notice + * that struct xnvdso may only grow, but never shrink. + */ +struct xnvdso { + __u64 features; + /* XNVDSO_FEAT_HOST_REALTIME */ + struct xnvdso_hostrt_data hostrt_data; + /* XNVDSO_FEAT_WALLCLOCK_OFFSET */ + __u64 wallclock_offset; +}; + +/* For each shared feature, add a flag below. */ + +#define XNVDSO_FEAT_HOST_REALTIME 0x0000000000000001ULL +#define XNVDSO_FEAT_WALLCLOCK_OFFSET 0x0000000000000002ULL + +static inline int xnvdso_test_feature(struct xnvdso *vdso, + __u64 feature) +{ + return (vdso->features & feature) != 0; +} + +#endif /* !_COBALT_UAPI_KERNEL_VDSO_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h new file mode 100644 index 0000000..6e54daf --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_MONITOR_H +#define _COBALT_UAPI_MONITOR_H + +#include <cobalt/uapi/kernel/types.h> + +struct cobalt_monitor_state { + atomic_t owner; + __u32 flags; +#define COBALT_MONITOR_GRANTED 0x01 +#define COBALT_MONITOR_DRAINED 0x02 +#define COBALT_MONITOR_SIGNALED 0x03 /* i.e. GRANTED or DRAINED */ +#define COBALT_MONITOR_BROADCAST 0x04 +#define COBALT_MONITOR_PENDED 0x08 +}; + +struct cobalt_monitor; + +struct cobalt_monitor_shadow { + __u32 state_offset; + __u32 flags; + xnhandle_t handle; +#define COBALT_MONITOR_SHARED 0x1 +#define COBALT_MONITOR_WAITGRANT 0x0 +#define COBALT_MONITOR_WAITDRAIN 0x1 +}; + +typedef struct cobalt_monitor_shadow cobalt_monitor_t; + +#endif /* !_COBALT_UAPI_MONITOR_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h new file mode 100644 index 0000000..75e34f9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h @@ -0,0 +1,44 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_MUTEX_H +#define _COBALT_UAPI_MUTEX_H + +#include <cobalt/uapi/kernel/types.h> + +#define COBALT_MUTEX_MAGIC 0x86860303 + +struct cobalt_mutex_state { + atomic_t owner; + __u32 flags; +#define COBALT_MUTEX_COND_SIGNAL 0x00000001 +#define COBALT_MUTEX_ERRORCHECK 0x00000002 + __u32 ceiling; +}; + +union cobalt_mutex_union { + pthread_mutex_t native_mutex; + struct cobalt_mutex_shadow { + __u32 magic; + __u32 lockcnt; + __u32 state_offset; + xnhandle_t handle; + struct cobalt_mutexattr attr; + } shadow_mutex; +}; + +#endif /* !_COBALT_UAPI_MUTEX_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h new file mode 100644 index 0000000..1409587 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SCHED_H +#define _COBALT_UAPI_SCHED_H + +#include <cobalt/uapi/kernel/types.h> + +#define SCHED_COBALT 42 +#define SCHED_WEAK 43 + +#ifndef SCHED_SPORADIC +#define SCHED_SPORADIC 10 +#define sched_ss_low_priority sched_u.ss.__sched_low_priority +#define sched_ss_repl_period sched_u.ss.__sched_repl_period +#define sched_ss_init_budget sched_u.ss.__sched_init_budget +#define sched_ss_max_repl sched_u.ss.__sched_max_repl +#endif /* !SCHED_SPORADIC */ + +struct __sched_ss_param { + int __sched_low_priority; + struct __user_old_timespec __sched_repl_period; + struct __user_old_timespec __sched_init_budget; + int __sched_max_repl; +}; + +#define sched_rr_quantum sched_u.rr.__sched_rr_quantum + +struct __sched_rr_param { + struct __user_old_timespec __sched_rr_quantum; +}; + +#ifndef SCHED_TP +#define SCHED_TP 11 +#define sched_tp_partition sched_u.tp.__sched_partition +#endif /* !SCHED_TP */ + +struct __sched_tp_param { + int __sched_partition; +}; + +struct sched_tp_window { + struct __user_old_timespec offset; + struct __user_old_timespec duration; + int ptid; +}; + +enum { + sched_tp_install, + sched_tp_uninstall, + sched_tp_start, + sched_tp_stop, +}; + +struct __sched_config_tp { + int op; + int nr_windows; + struct sched_tp_window windows[0]; +}; + +#define sched_tp_confsz(nr_win) \ + (sizeof(struct __sched_config_tp) + nr_win * sizeof(struct sched_tp_window)) + +#ifndef SCHED_QUOTA +#define SCHED_QUOTA 12 +#define sched_quota_group sched_u.quota.__sched_group +#endif /* !SCHED_QUOTA */ + +struct __sched_quota_param { + int __sched_group; +}; + +enum { + sched_quota_add, + sched_quota_remove, + sched_quota_force_remove, + sched_quota_set, + sched_quota_get, +}; + +struct __sched_config_quota { + int op; + union { + struct { + int pshared; + } add; + struct { + int tgid; + } remove; + struct { + int tgid; + int quota; + int quota_peak; + } set; + struct { + int tgid; + } get; + }; + struct __sched_quota_info { + int tgid; + int quota; + int quota_peak; + int quota_sum; + } info; +}; + +#define sched_quota_confsz() sizeof(struct __sched_config_quota) + +struct sched_param_ex { + int sched_priority; + union { + struct __sched_ss_param ss; + struct __sched_rr_param rr; + struct __sched_tp_param tp; + struct __sched_quota_param quota; + } sched_u; +}; + +union sched_config { + struct __sched_config_tp tp; + struct __sched_config_quota quota; +}; + +#endif /* !_COBALT_UAPI_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h new file mode 100644 index 0000000..01a9b55 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h @@ -0,0 +1,56 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SEM_H +#define _COBALT_UAPI_SEM_H + +#include <cobalt/uapi/kernel/types.h> + +#define COBALT_SEM_MAGIC (0x86860707) +#define COBALT_NAMED_SEM_MAGIC (0x86860D0D) + +struct cobalt_sem; + +struct cobalt_sem_state { + atomic_t value; + __u32 flags; +}; + +union cobalt_sem_union { + sem_t native_sem; + struct cobalt_sem_shadow { + __u32 magic; + __s32 state_offset; + xnhandle_t handle; + } shadow_sem; +}; + +struct cobalt_sem_info { + unsigned int value; + int flags; + int nrwait; +}; + +#define SEM_FIFO 0x1 +#define SEM_PULSE 0x2 +#define SEM_PSHARED 0x4 +#define SEM_REPORT 0x8 +#define SEM_WARNDEL 0x10 +#define SEM_RAWCLOCK 0x20 +#define SEM_NOBUSYDEL 0x40 + +#endif /* !_COBALT_UAPI_SEM_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h new file mode 100644 index 0000000..8a7ea15 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SIGNAL_H +#define _COBALT_UAPI_SIGNAL_H + +/* + * Those are pseudo-signals only available with pthread_kill() to + * suspend/resume/unblock threads synchronously, force them out of + * primary mode or even demote them to the SCHED_OTHER class via the + * low-level nucleus interface. Can't block those signals, queue them, + * or even set them in a sigset. Those are nasty, strictly anti-POSIX + * things; we do provide them nevertheless only because we are mean + * people doing harmful code for no valid reason. Can't go against + * your nature, right? Nah... (this said, don't blame us for POSIX, + * we are not _that_ mean). + */ +#define SIGSUSP (SIGRTMAX + 1) +#define SIGRESM (SIGRTMAX + 2) +#define SIGRELS (SIGRTMAX + 3) +#define SIGKICK (SIGRTMAX + 4) +#define SIGDEMT (SIGRTMAX + 5) + +/* + * Regular POSIX signals with specific handling by Xenomai. + */ +#define SIGSHADOW SIGWINCH +#define sigshadow_action(code) ((code) & 0xff) +#define sigshadow_arg(code) (((code) >> 8) & 0xff) +#define sigshadow_int(action, arg) ((action) | ((arg) << 8)) + +/* SIGSHADOW action codes. */ +#define SIGSHADOW_ACTION_HARDEN 1 +#define SIGSHADOW_ACTION_BACKTRACE 2 +#define SIGSHADOW_ACTION_HOME 3 +#define SIGSHADOW_BACKTRACE_DEPTH 16 + +#define SIGDEBUG SIGXCPU +#define sigdebug_code(si) ((si)->si_value.sival_int) +#define sigdebug_reason(si) (sigdebug_code(si) & 0xff) +#define sigdebug_marker 0xfccf0000 +#define sigdebug_marked(si) \ + ((sigdebug_code(si) & 0xffff0000) == sigdebug_marker) + +/* Possible values of sigdebug_reason() */ +#define SIGDEBUG_UNDEFINED 0 +#define SIGDEBUG_MIGRATE_SIGNAL 1 +#define SIGDEBUG_MIGRATE_SYSCALL 2 +#define SIGDEBUG_MIGRATE_FAULT 3 +#define SIGDEBUG_MIGRATE_PRIOINV 4 +#define SIGDEBUG_NOMLOCK 5 +#define SIGDEBUG_WATCHDOG 6 +#define SIGDEBUG_RESCNT_IMBALANCE 7 +#define SIGDEBUG_LOCK_BREAK 8 +#define SIGDEBUG_MUTEX_SLEEP 9 + +#define COBALT_DELAYMAX 2147483647U + +/* + * Internal accessors to extra siginfo/sigevent fields, extending some + * existing base field. The extra data should be grouped in a + * dedicated struct type. The extra space is taken from the padding + * area available from the original structure definitions. + * + * e.g. getting the address of the following extension to + * _sifields._rt from siginfo_t, + * + * struct bar { + * int foo; + * }; + * + * would be noted as: + * + * siginfo_t si; + * struct bar *p = __cobalt_si_extra(&si, _rt, struct bar); + * + * This code is shared between kernel and user space. Proper + * definitions of siginfo_t and sigevent_t should have been read prior + * to including this file. + * + * CAUTION: this macro does not handle alignment issues for the extra + * data. The extra type definition should take care of this. + */ +#ifdef __OPTIMIZE__ +extern void *__siginfo_overflow(void); +static inline +const void *__check_si_overflow(size_t fldsz, size_t extrasz, const void *p) +{ + siginfo_t *si __attribute__((unused)); + + if (fldsz + extrasz <= sizeof(si->_sifields)) + return p; + + return __siginfo_overflow(); +} +#define __cobalt_si_extra(__si, __basefield, __type) \ + ((__type *)__check_si_overflow(sizeof(__si->_sifields.__basefield), \ + sizeof(__type), &(__si->_sifields.__basefield) + 1)) +#else +#define __cobalt_si_extra(__si, __basefield, __type) \ + ((__type *)((&__si->_sifields.__basefield) + 1)) +#endif + +/* Same approach, this time for extending sigevent_t. */ + +#ifdef __OPTIMIZE__ +extern void *__sigevent_overflow(void); +static inline +const void *__check_sev_overflow(size_t fldsz, size_t extrasz, const void *p) +{ + sigevent_t *sev __attribute__((unused)); + + if (fldsz + extrasz <= sizeof(sev->_sigev_un)) + return p; + + return __sigevent_overflow(); +} +#define __cobalt_sev_extra(__sev, __basefield, __type) \ + ((__type *)__check_sev_overflow(sizeof(__sev->_sigev_un.__basefield), \ + sizeof(__type), &(__sev->_sigev_un.__basefield) + 1)) +#else +#define __cobalt_sev_extra(__sev, __basefield, __type) \ + ((__type *)((&__sev->_sigev_un.__basefield) + 1)) +#endif + +#endif /* !_COBALT_UAPI_SIGNAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h new file mode 100644 index 0000000..1523ddd --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SYSCALL_H +#define _COBALT_UAPI_SYSCALL_H + +#include <cobalt/uapi/asm-generic/syscall.h> + +#define sc_cobalt_bind 0 +#define sc_cobalt_thread_create 1 +#define sc_cobalt_thread_getpid 2 +#define sc_cobalt_thread_setmode 3 +#define sc_cobalt_thread_setname 4 +#define sc_cobalt_thread_join 5 +#define sc_cobalt_thread_kill 6 +#define sc_cobalt_thread_setschedparam_ex 7 +#define sc_cobalt_thread_getschedparam_ex 8 +#define sc_cobalt_thread_getstat 9 +#define sc_cobalt_sem_init 10 +#define sc_cobalt_sem_destroy 11 +#define sc_cobalt_sem_post 12 +#define sc_cobalt_sem_wait 13 +#define sc_cobalt_sem_trywait 14 +#define sc_cobalt_sem_getvalue 15 +#define sc_cobalt_sem_open 16 +#define sc_cobalt_sem_close 17 +#define sc_cobalt_sem_unlink 18 +#define sc_cobalt_sem_timedwait 19 +#define sc_cobalt_sem_inquire 20 +#define sc_cobalt_sem_broadcast_np 21 +#define sc_cobalt_clock_getres 22 +#define sc_cobalt_clock_gettime 23 +#define sc_cobalt_clock_settime 24 +#define sc_cobalt_clock_nanosleep 25 +#define sc_cobalt_mutex_init 26 +#define sc_cobalt_mutex_check_init 27 +#define sc_cobalt_mutex_destroy 28 +#define sc_cobalt_mutex_lock 29 +#define sc_cobalt_mutex_timedlock 30 +#define sc_cobalt_mutex_trylock 31 +#define sc_cobalt_mutex_unlock 32 +#define sc_cobalt_cond_init 33 +#define sc_cobalt_cond_destroy 34 +#define sc_cobalt_cond_wait_prologue 35 +#define sc_cobalt_cond_wait_epilogue 36 +#define sc_cobalt_mq_open 37 +#define sc_cobalt_mq_close 38 +#define sc_cobalt_mq_unlink 39 +#define sc_cobalt_mq_getattr 40 +#define sc_cobalt_mq_timedsend 41 +#define sc_cobalt_mq_timedreceive 42 +#define sc_cobalt_mq_notify 43 +#define sc_cobalt_sched_minprio 44 +#define sc_cobalt_sched_maxprio 45 +#define sc_cobalt_sched_weightprio 46 +#define sc_cobalt_sched_yield 47 +#define sc_cobalt_sched_setscheduler_ex 48 +#define sc_cobalt_sched_getscheduler_ex 49 +#define sc_cobalt_sched_setconfig_np 50 +#define sc_cobalt_sched_getconfig_np 51 +#define sc_cobalt_timer_create 52 +#define sc_cobalt_timer_delete 53 +#define sc_cobalt_timer_settime 54 +#define sc_cobalt_timer_gettime 55 +#define sc_cobalt_timer_getoverrun 56 +#define sc_cobalt_timerfd_create 57 +#define sc_cobalt_timerfd_settime 58 +#define sc_cobalt_timerfd_gettime 59 +#define sc_cobalt_sigwait 60 +#define sc_cobalt_sigwaitinfo 61 +#define sc_cobalt_sigtimedwait 62 +#define sc_cobalt_sigpending 63 +#define sc_cobalt_kill 64 +#define sc_cobalt_sigqueue 65 +#define sc_cobalt_monitor_init 66 +#define sc_cobalt_monitor_destroy 67 +#define sc_cobalt_monitor_enter 68 +#define sc_cobalt_monitor_wait 69 +#define sc_cobalt_monitor_sync 70 +#define sc_cobalt_monitor_exit 71 +#define sc_cobalt_event_init 72 +#define sc_cobalt_event_wait 73 +#define sc_cobalt_event_sync 74 +#define sc_cobalt_event_destroy 75 +#define sc_cobalt_event_inquire 76 +#define sc_cobalt_open 77 +#define sc_cobalt_socket 78 +#define sc_cobalt_close 79 +#define sc_cobalt_ioctl 80 +#define sc_cobalt_read 81 +#define sc_cobalt_write 82 +#define sc_cobalt_recvmsg 83 +#define sc_cobalt_sendmsg 84 +#define sc_cobalt_mmap 85 +#define sc_cobalt_select 86 +#define sc_cobalt_fcntl 87 +#define sc_cobalt_migrate 88 +#define sc_cobalt_archcall 89 +#define sc_cobalt_trace 90 +#define sc_cobalt_corectl 91 +#define sc_cobalt_get_current 92 +/* 93: formerly mayday */ +#define sc_cobalt_backtrace 94 +#define sc_cobalt_serialdbg 95 +#define sc_cobalt_extend 96 +#define sc_cobalt_ftrace_puts 97 +#define sc_cobalt_recvmmsg 98 +#define sc_cobalt_sendmmsg 99 +#define sc_cobalt_clock_adjtime 100 +#define sc_cobalt_thread_setschedprio 101 +#define sc_cobalt_sem_timedwait64 102 +#define sc_cobalt_clock_gettime64 103 +#define sc_cobalt_clock_settime64 104 +#define sc_cobalt_clock_nanosleep64 105 +#define sc_cobalt_clock_getres64 106 +#define sc_cobalt_clock_adjtime64 107 +#define sc_cobalt_mutex_timedlock64 108 +#define sc_cobalt_mq_timedsend64 109 +#define sc_cobalt_mq_timedreceive64 110 +#define sc_cobalt_sigtimedwait64 111 +#define sc_cobalt_monitor_wait64 112 +#define sc_cobalt_event_wait64 113 +#define sc_cobalt_recvmmsg64 114 + +#define __NR_COBALT_SYSCALLS 128 /* Power of 2 */ + +#endif /* !_COBALT_UAPI_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h new file mode 100644 index 0000000..07602db --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_THREAD_H +#define _COBALT_UAPI_THREAD_H + +#include <cobalt/uapi/kernel/thread.h> + +#define PTHREAD_WARNSW XNWARN +#define PTHREAD_LOCK_SCHED XNLOCK +#define PTHREAD_DISABLE_LOCKBREAK XNTRAPLB +#define PTHREAD_CONFORMING 0 + +struct cobalt_mutexattr { + int type : 3; + int protocol : 3; + int pshared : 1; + int __pad : 1; + int ceiling : 8; /* prio-1, (XN)SCHED_FIFO range. */ +}; + +struct cobalt_condattr { + int clock : 7; + int pshared : 1; +}; + +struct cobalt_threadstat { + __u64 xtime; + __u64 timeout; + __u64 msw; + __u64 csw; + __u64 xsc; + __u32 status; + __u32 pf; + int cpu; + int cprio; + char name[XNOBJECT_NAME_LEN]; + char personality[XNOBJECT_NAME_LEN]; +}; + +#endif /* !_COBALT_UAPI_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h new file mode 100644 index 0000000..411baf5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_TIME_H +#define _COBALT_UAPI_TIME_H + +#ifndef CLOCK_MONOTONIC_RAW +#define CLOCK_MONOTONIC_RAW 4 +#endif + +/* + * Additional clock ids we manage are supposed not to collide with any + * of the POSIX and Linux kernel definitions so that no ambiguities + * arise when porting applications in both directions. + * + * 0 .. 31 regular POSIX/linux clock ids. + * 32 .. 63 statically reserved Cobalt clocks + * 64 .. 127 dynamically registered Cobalt clocks (external) + * + * CAUTION: clock ids must fit within a 7bit value, see + * include/cobalt/uapi/thread.h (e.g. cobalt_condattr). + */ +#define __COBALT_CLOCK_STATIC(nr) ((clockid_t)(nr + 32)) + +#define CLOCK_HOST_REALTIME __COBALT_CLOCK_STATIC(0) + +#define COBALT_MAX_EXTCLOCKS 64 + +#define __COBALT_CLOCK_EXT(nr) ((clockid_t)(nr) | (1 << 6)) +#define __COBALT_CLOCK_EXT_P(id) ((int)(id) >= 64 && (int)(id) < 128) +#define __COBALT_CLOCK_EXT_INDEX(id) ((int)(id) & ~(1 << 6)) + +/* + * Additional timerfd defines + * + * when passing TFD_WAKEUP to timer_settime, any timer expiration + * unblocks the thread having issued timer_settime. + */ +#define TFD_WAKEUP (1 << 2) + +#endif /* !_COBALT_UAPI_TIME_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/unistd.h b/kernel/xenomai-v3.2.4/include/cobalt/unistd.h new file mode 100644 index 0000000..fe3992a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/unistd.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <unistd.h> + +#ifndef _COBALT_UNISTD_H +#define _COBALT_UNISTD_H + +#include <cobalt/wrappers.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COBALT_DECL(ssize_t, read(int fd, void *buf, size_t nbyte)); + +COBALT_DECL(ssize_t, write(int fd, const void *buf, size_t nbyte)); + +COBALT_DECL(int, close(int fildes)); + +COBALT_DECL(unsigned int, sleep(unsigned int seconds)); + +COBALT_DECL(int, usleep(useconds_t usec)); + +#ifdef __cplusplus +} +#endif + +#endif /* !_COBALT_UNISTD_H */ diff --git a/kernel/xenomai-v3.2.4/include/cobalt/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/wrappers.h new file mode 100644 index 0000000..7e061ca --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/cobalt/wrappers.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_WRAPPERS_H +#define _COBALT_WRAPPERS_H + +#include <boilerplate/compiler.h> + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#define __WRAP(call) __wrap_ ## call +#define __STD(call) __real_ ## call +#define __COBALT(call) __cobalt_ ## call +#define __RT(call) __COBALT(call) +#define COBALT_DECL(T, P) \ + __typeof__(T) __RT(P); \ + __typeof__(T) __STD(P); \ + __typeof__(T) __WRAP(P) + +/* + * + * Each "foo" Cobalt routine shadowing a POSIX service may be + * overriden by an external library (see --with-cobalt-override + * option), in which case we generate the following symbols: + * + * __real_foo() => Original POSIX implementation. + * __cobalt_foo() => Cobalt implementation. + * __wrap_foo() => Weak alias to __cobalt_foo(), may be + * overriden. + * + * In the latter case, the external library shall provide its own + * implementation of __wrap_foo(), overriding Cobalt's foo() + * version. The original Cobalt implementation can still be + * referenced as __COBALT(foo). + */ +#define COBALT_IMPL(T, I, A) \ +__typeof__(T) __wrap_ ## I A __attribute__((alias("__cobalt_" __stringify(I)), weak)); \ +__typeof__(T) __cobalt_ ## I A + +#endif /* !_COBALT_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/Makefile.am b/kernel/xenomai-v3.2.4/include/copperplate/Makefile.am new file mode 100644 index 0000000..5baa09b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/Makefile.am @@ -0,0 +1,19 @@ +includesubdir = $(includedir)/copperplate + +includesub_HEADERS = \ + clockobj.h \ + cluster.h \ + debug.h \ + eventobj.h \ + heapobj.h \ + reference.h \ + registry.h \ + semobj.h \ + syncobj.h \ + threadobj.h \ + timerobj.h \ + traceobj.h \ + tunables.h + +noinst_HEADERS = \ + registry-obstack.h diff --git a/kernel/xenomai-v3.2.4/include/copperplate/clockobj.h b/kernel/xenomai-v3.2.4/include/copperplate/clockobj.h new file mode 100644 index 0000000..dde18bd --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/clockobj.h @@ -0,0 +1,263 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_CLOCKOBJ_H +#define _COPPERPLATE_CLOCKOBJ_H + +#include <pthread.h> +#include <xeno_config.h> +#include <boilerplate/time.h> +#include <boilerplate/list.h> +#include <boilerplate/lock.h> +#include <boilerplate/limits.h> + +/* + * The Copperplate clock shall be monotonic unless the threading + * library has restrictions to support this over Mercury. + * + * In the normal case, this means that ongoing delays and timeouts + * won't be affected when the host system date is changed. In the + * restricted case by contrast, ongoing delays and timeouts may be + * impacted by changes to the host system date. + * + * The implementation maintains a per-clock epoch value, so that + * different emulators can have different (virtual) system dates. + */ +#ifdef CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED +#define CLOCK_COPPERPLATE CLOCK_REALTIME +#else +#define CLOCK_COPPERPLATE CLOCK_MONOTONIC +#endif + +struct clockobj { + pthread_mutex_t lock; + struct timespec epoch; + struct timespec offset; +#ifndef CONFIG_XENO_LORES_CLOCK_DISABLED + unsigned int resolution; + unsigned int frequency; +#endif +}; + +#define zero_time ((struct timespec){ .tv_sec = 0, .tv_nsec = 0 }) + +#ifdef __cplusplus +extern "C" { +#endif + +void clockobj_set_date(struct clockobj *clkobj, ticks_t ticks); + +void clockobj_get_date(struct clockobj *clkobj, ticks_t *pticks); + +ticks_t clockobj_get_time(struct clockobj *clkobj); + +void clockobj_get_distance(struct clockobj *clkobj, + const struct itimerspec *itm, + struct timespec *delta); + +void clockobj_caltime_to_timeout(struct clockobj *clkobj, const struct tm *tm, + unsigned long rticks, struct timespec *ts); + +void clockobj_caltime_to_ticks(struct clockobj *clkobj, const struct tm *tm, + unsigned long rticks, ticks_t *pticks); + +void clockobj_ticks_to_caltime(struct clockobj *clkobj, + ticks_t ticks, + struct tm *tm, + unsigned long *rticks); + +void clockobj_convert_clocks(struct clockobj *clkobj, + const struct timespec *in, + clockid_t clk_id, + struct timespec *out); + +int clockobj_set_resolution(struct clockobj *clkobj, + unsigned int resolution_ns); + +int clockobj_init(struct clockobj *clkobj, + unsigned int resolution_ns); + +int clockobj_destroy(struct clockobj *clkobj); + +#ifndef CONFIG_XENO_LORES_CLOCK_DISABLED + +void __clockobj_ticks_to_timeout(struct clockobj *clkobj, clockid_t clk_id, + ticks_t ticks, struct timespec *ts); + +void __clockobj_ticks_to_timespec(struct clockobj *clkobj, + ticks_t ticks, struct timespec *ts); +#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */ + +#ifdef __cplusplus +} +#endif + +#ifdef CONFIG_XENO_COBALT + +#include <cobalt/ticks.h> +#include <cobalt/sys/cobalt.h> + +/* + * The Cobalt core exclusively deals with aperiodic timings, so a + * Cobalt _tick_ is actually a _TSC_ unit. In contrast, Copperplate + * deals with _TSC_ units and periodic _ticks_ which duration depend + * on the clock resolution. Therefore, Cobalt ticks are strictly + * equivalent to Copperplate TSC units, and Copperplate ticks are + * periods of the reference clockobj which Cobalt does not know about. + */ + +static inline ticks_t clockobj_get_tsc(void) +{ + /* Guaranteed to be the source of CLOCK_COPPERPLATE. */ + return cobalt_read_tsc(); +} + +static inline sticks_t clockobj_ns_to_tsc(sticks_t ns) +{ + return cobalt_ns_to_ticks(ns); +} + +static inline sticks_t clockobj_tsc_to_ns(sticks_t tsc) +{ + return cobalt_ticks_to_ns(tsc); +} + +static inline +void clockobj_ns_to_timespec(ticks_t ns, struct timespec *ts) +{ + unsigned long rem; + + ts->tv_sec = (time_t)cobalt_divrem_billion(ns, &rem); + ts->tv_nsec = (long)rem; +} + +#else /* CONFIG_XENO_MERCURY */ + +ticks_t clockobj_get_tsc(void); + +static inline sticks_t clockobj_ns_to_tsc(sticks_t ns) +{ + return ns; +} + +static inline sticks_t clockobj_tsc_to_ns(sticks_t tsc) +{ + return tsc; +} + +static inline +void clockobj_ns_to_timespec(ticks_t ns, struct timespec *ts) +{ + ts->tv_sec = ns / 1000000000ULL; + ts->tv_nsec = ns - (ts->tv_sec * 1000000000ULL); +} + +#endif /* CONFIG_XENO_MERCURY */ + +#ifdef CONFIG_XENO_LORES_CLOCK_DISABLED + +static inline +void __clockobj_ticks_to_timeout(struct clockobj *clkobj, + clockid_t clk_id, + ticks_t ticks, struct timespec *ts) +{ + struct timespec now, delta; + + __RT(clock_gettime(clk_id, &now)); + clockobj_ns_to_timespec(ticks, &delta); + timespec_add(ts, &now, &delta); +} + +static inline +void __clockobj_ticks_to_timespec(struct clockobj *clkobj, + ticks_t ticks, struct timespec *ts) +{ + clockobj_ns_to_timespec(ticks, ts); +} + +static inline +void clockobj_ticks_to_timespec(struct clockobj *clkobj, + ticks_t ticks, struct timespec *ts) +{ + __clockobj_ticks_to_timespec(clkobj, ticks, ts); +} + +static inline +unsigned int clockobj_get_resolution(struct clockobj *clkobj) +{ + return 1; +} + +static inline +unsigned int clockobj_get_frequency(struct clockobj *clkobj) +{ + return 1000000000; +} + +static inline sticks_t clockobj_ns_to_ticks(struct clockobj *clkobj, + sticks_t ns) +{ + return ns; +} + +static inline sticks_t clockobj_ticks_to_ns(struct clockobj *clkobj, + sticks_t ticks) +{ + return ticks; +} + +#else /* !CONFIG_XENO_LORES_CLOCK_DISABLED */ + +static inline +void clockobj_ticks_to_timespec(struct clockobj *clkobj, + ticks_t ticks, struct timespec *ts) +{ + __clockobj_ticks_to_timespec(clkobj, ticks, ts); +} + +static inline +unsigned int clockobj_get_resolution(struct clockobj *clkobj) +{ + return clkobj->resolution; +} + +static inline +unsigned int clockobj_get_frequency(struct clockobj *clkobj) +{ + return clkobj->frequency; +} + +sticks_t clockobj_ns_to_ticks(struct clockobj *clkobj, + sticks_t ns); + +static inline sticks_t clockobj_ticks_to_ns(struct clockobj *clkobj, + sticks_t ticks) +{ + return ticks * clkobj->resolution; +} + +#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */ + +static inline +void clockobj_ticks_to_timeout(struct clockobj *clkobj, + ticks_t ticks, struct timespec *ts) +{ + __clockobj_ticks_to_timeout(clkobj, CLOCK_COPPERPLATE, ticks, ts); +} + +#endif /* _COPPERPLATE_CLOCKOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/cluster.h b/kernel/xenomai-v3.2.4/include/copperplate/cluster.h new file mode 100644 index 0000000..2ca07c6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/cluster.h @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_CLUSTER_H +#define _COPPERPLATE_CLUSTER_H + +#include <boilerplate/hash.h> +#include <copperplate/syncobj.h> + +#ifdef CONFIG_XENO_PSHARED + +struct clusterobj { + pid_t cnode; + struct hashobj hobj; +}; + +struct dictionary { + struct hash_table table; + struct hashobj hobj; +}; + +struct cluster { + struct dictionary *d; +}; + +struct syndictionary { + struct hash_table table; + struct syncobj sobj; + struct hashobj hobj; +}; + +struct syncluster { + struct syndictionary *d; +}; + +struct pvclusterobj { + struct pvhashobj hobj; +}; + +struct pvcluster { + struct pvhash_table table; +}; + +struct pvsyncluster { + struct pvcluster c; + struct syncobj sobj; +}; + +static inline +const void *clusterobj_key(const struct clusterobj *cobj) +{ + return __memptr(__main_heap, cobj->hobj.key); +} + +static inline +size_t clusterobj_keylen(const struct clusterobj *cobj) +{ + return cobj->hobj.len; +} + +static inline +pid_t clusterobj_cnode(const struct clusterobj *cobj) +{ + return cobj->cnode; +} + +static inline +const void *pvclusterobj_key(const struct pvclusterobj *cobj) +{ + return cobj->hobj.key; +} + +static inline +size_t pvclusterobj_keylen(const struct pvclusterobj *cobj) +{ + return cobj->hobj.len; +} + +static inline +pid_t pvclusterobj_cnode(const struct pvclusterobj *cobj) +{ + return -1; +} + +#else /* !CONFIG_XENO_PSHARED */ + +struct clusterobj { + struct pvhashobj hobj; +}; + +struct cluster { + struct pvhash_table table; +}; + +struct syncluster { + struct cluster c; + struct syncobj sobj; +}; + +#define pvclusterobj clusterobj +#define pvcluster cluster +#define pvsyncluster syncluster + +static inline +const void *clusterobj_key(const struct pvclusterobj *cobj) +{ + return cobj->hobj.key; +} + +static inline +size_t clusterobj_keylen(const struct pvclusterobj *cobj) +{ + return cobj->hobj.len; +} + +static inline +pid_t clusterobj_cnode(const struct pvclusterobj *cobj) +{ + return -1; +} + +static inline +const void *pvclusterobj_key(const struct pvclusterobj *cobj) +{ + return clusterobj_key(cobj); +} + +static inline +size_t pvclusterobj_keylen(const struct pvclusterobj *cobj) +{ + return clusterobj_keylen(cobj); +} + +static inline +pid_t pvclusterobj_cnode(const struct pvclusterobj *cobj) +{ + return clusterobj_cnode(cobj); +} + +#endif /* !CONFIG_XENO_PSHARED */ + +struct syncluster_wait_struct { + union { + dref_type(char *) name_ref; + const char *name; + }; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +int pvcluster_init(struct pvcluster *c, const char *name); + +void pvcluster_destroy(struct pvcluster *c); + +int pvcluster_addobj(struct pvcluster *c, const char *name, + struct pvclusterobj *cobj); + +int pvcluster_addobj_dup(struct pvcluster *c, const char *name, + struct pvclusterobj *cobj); + +int pvcluster_delobj(struct pvcluster *c, + struct pvclusterobj *cobj); + +struct pvclusterobj *pvcluster_findobj(struct pvcluster *c, + const char *name); + +int pvcluster_walk(struct pvcluster *c, + int (*walk)(struct pvcluster *c, + struct pvclusterobj *cobj)); + +int pvsyncluster_init(struct pvsyncluster *sc, const char *name); + +void pvsyncluster_destroy(struct pvsyncluster *sc); + +int pvsyncluster_addobj(struct pvsyncluster *sc, const char *name, + struct pvclusterobj *cobj); + +int pvsyncluster_delobj(struct pvsyncluster *sc, + struct pvclusterobj *cobj); + +int pvsyncluster_findobj(struct pvsyncluster *sc, + const char *name, + const struct timespec *timeout, + struct pvclusterobj **cobjp) __must_check; + +#ifdef CONFIG_XENO_PSHARED + +int cluster_init(struct cluster *c, const char *name); + +int cluster_addobj(struct cluster *c, const char *name, + struct clusterobj *cobj); + +int cluster_addobj_dup(struct cluster *c, const char *name, + struct clusterobj *cobj); + +int cluster_delobj(struct cluster *c, + struct clusterobj *cobj); + +struct clusterobj *cluster_findobj(struct cluster *c, + const char *name); + +int cluster_walk(struct cluster *c, + int (*walk)(struct cluster *c, + struct clusterobj *cobj)); + +int syncluster_init(struct syncluster *sc, const char *name); + +int syncluster_addobj(struct syncluster *sc, const char *name, + struct clusterobj *cobj); + +int syncluster_delobj(struct syncluster *sc, + struct clusterobj *cobj); + +int syncluster_findobj(struct syncluster *sc, + const char *name, + const struct timespec *timeout, + struct clusterobj **cobjp) __must_check; + +#else /* !CONFIG_XENO_PSHARED */ + +static inline int cluster_init(struct cluster *c, const char *name) +{ + return pvcluster_init(c, name); +} + +static inline int cluster_addobj(struct cluster *c, const char *name, + struct clusterobj *cobj) +{ + return pvcluster_addobj(c, name, cobj); +} + +static inline int cluster_addobj_dup(struct cluster *c, const char *name, + struct clusterobj *cobj) +{ + return pvcluster_addobj_dup(c, name, cobj); +} + +static inline int cluster_delobj(struct cluster *c, + struct clusterobj *cobj) +{ + return pvcluster_delobj(c, cobj); +} + +static inline struct clusterobj *cluster_findobj(struct cluster *c, + const char *name) +{ + return pvcluster_findobj(c, name); +} + +static inline int cluster_walk(struct cluster *c, + int (*walk)(struct cluster *c, + struct clusterobj *cobj)) +{ + return pvcluster_walk(c, walk); +} + +static inline int syncluster_init(struct syncluster *sc, + const char *name) +{ + return pvsyncluster_init(sc, name); +} + +static inline int syncluster_addobj(struct syncluster *sc, + const char *name, + struct clusterobj *cobj) +{ + return pvsyncluster_addobj(sc, name, cobj); +} + +static inline int syncluster_delobj(struct syncluster *sc, + struct clusterobj *cobj) +{ + return pvsyncluster_delobj(sc, cobj); +} + +static inline __must_check +int syncluster_findobj(struct syncluster *sc, + const char *name, + const struct timespec *timeout, + struct clusterobj **cobjp) +{ + return pvsyncluster_findobj(sc, name, timeout, cobjp); +} + +#endif /* !CONFIG_XENO_PSHARED */ + +#ifdef __cplusplus +} +#endif + +#endif /* _COPPERPLATE_CLUSTER_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/debug.h b/kernel/xenomai-v3.2.4/include/copperplate/debug.h new file mode 100644 index 0000000..027f12c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/debug.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_DEBUG_H +#define _COPPERPLATE_DEBUG_H + +#include <boilerplate/debug.h> + +#ifdef CONFIG_XENO_DEBUG + +struct threadobj; + +#define debug(__fmt, __args...) \ + do { \ + struct threadobj *__thobj = threadobj_current(); \ + if (__thobj == NULL || \ + (__thobj->status & __THREAD_S_DEBUG) != 0) \ + __debug(__thobj ? __thobj->name : NULL, __fmt, ##__args); \ + } while (0) + +#else /* !CONFIG_XENO_DEBUG */ + +#define debug(fmt, args...) do { } while (0) + +#endif /* !CONFIG_XENO_DEBUG */ + +#endif /* _COPPERPLATE_DEBUG_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/eventobj.h b/kernel/xenomai-v3.2.4/include/copperplate/eventobj.h new file mode 100644 index 0000000..3fc9416 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/eventobj.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_EVENTOBJ_H +#define _COPPERPLATE_EVENTOBJ_H + +#include <boilerplate/compiler.h> +#include <copperplate/reference.h> + +struct eventobj_waitentry { + pid_t pid; + char name[32]; +}; + +#ifdef CONFIG_XENO_COBALT + +#include <cobalt/uapi/event.h> + +struct eventobj_corespec { + cobalt_event_t event; +}; + +struct eventobj_wait_struct { +}; + +#define EVOBJ_FIFO COBALT_EVENT_FIFO +#define EVOBJ_PRIO COBALT_EVENT_PRIO + +#define EVOBJ_ALL COBALT_EVENT_ALL +#define EVOBJ_ANY COBALT_EVENT_ANY + +#else /* CONFIG_XENO_MERCURY */ + +#include <copperplate/syncobj.h> + +struct eventobj_corespec { + struct syncobj sobj; + unsigned int value; + int flags; +}; + +struct eventobj_wait_struct { + unsigned int value; + int mode; +}; + +#define EVOBJ_FIFO 0x0 +#define EVOBJ_PRIO 0x1 + +#define EVOBJ_ALL 0x0 +#define EVOBJ_ANY 0x1 + +#endif /* CONFIG_XENO_MERCURY */ + +struct eventobj { + struct eventobj_corespec core; + fnref_type(void (*)(struct eventobj *evobj)) finalizer; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +int eventobj_init(struct eventobj *evobj, + unsigned int value, int flags, + fnref_type(void (*)(struct eventobj *evobj)) finalizer) __must_check; + +int eventobj_destroy(struct eventobj *evobj); + +void eventobj_uninit(struct eventobj *evobj); + +int eventobj_post(struct eventobj *evobj, + unsigned int bits); + +int eventobj_wait(struct eventobj *evobj, + unsigned int bits, + unsigned int *bits_r, + int mode, + const struct timespec *timeout) __must_check; + +int eventobj_clear(struct eventobj *evobj, + unsigned int bits, + unsigned int *bits_r); + +int eventobj_inquire(struct eventobj *evobj, size_t waitsz, + struct eventobj_waitentry *waitlist, + unsigned int *bits_r); + +#ifdef __cplusplus +} +#endif + +#endif /* _COPPERPLATE_EVENTOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/heapobj.h b/kernel/xenomai-v3.2.4/include/copperplate/heapobj.h new file mode 100644 index 0000000..f8d14a3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/heapobj.h @@ -0,0 +1,529 @@ +/* + * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_HEAPOBJ_H +#define _COPPERPLATE_HEAPOBJ_H + +#include <sys/types.h> +#include <stdint.h> +#include <string.h> +#include <assert.h> +#include <errno.h> +#include <pthread.h> +#include <xeno_config.h> +#include <boilerplate/wrappers.h> +#include <boilerplate/list.h> +#include <copperplate/reference.h> +#include <boilerplate/lock.h> +#include <copperplate/debug.h> + +struct heapobj { + union { + dref_type(void *) pool_ref; + void *pool; + }; + size_t size; + char name[32]; +#ifdef CONFIG_XENO_PSHARED + char fsname[256]; +#endif +}; + +struct sysgroup { + int thread_count; + struct listobj thread_list; + int heap_count; + struct listobj heap_list; + pthread_mutex_t lock; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +int heapobj_pkg_init_private(void); + +int __heapobj_init_private(struct heapobj *hobj, const char *name, + size_t size, void *mem); + +int heapobj_init_array_private(struct heapobj *hobj, const char *name, + size_t size, int elems); +#ifdef __cplusplus +} +#endif + +#ifdef CONFIG_XENO_TLSF + +size_t get_used_size(void *pool); +void destroy_memory_pool(void *pool); +size_t add_new_area(void *pool, size_t size, void *mem); +void *malloc_ex(size_t size, void *pool); +void free_ex(void *pool, void *ptr); +void *tlsf_malloc(size_t size); +void tlsf_free(void *ptr); +size_t malloc_usable_size_ex(void *ptr, void *pool); + +static inline +void pvheapobj_destroy(struct heapobj *hobj) +{ + destroy_memory_pool(hobj->pool); +} + +static inline +int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem) +{ + hobj->size = add_new_area(hobj->pool, size, mem); + if (hobj->size == (size_t)-1) + return __bt(-EINVAL); + + return 0; +} + +static inline +void *pvheapobj_alloc(struct heapobj *hobj, size_t size) +{ + return malloc_ex(size, hobj->pool); +} + +static inline +void pvheapobj_free(struct heapobj *hobj, void *ptr) +{ + free_ex(ptr, hobj->pool); +} + +static inline +size_t pvheapobj_validate(struct heapobj *hobj, void *ptr) +{ + return malloc_usable_size_ex(ptr, hobj->pool); +} + +static inline +size_t pvheapobj_inquire(struct heapobj *hobj) +{ + return get_used_size(hobj->pool); +} + +static inline void *pvmalloc(size_t size) +{ + return tlsf_malloc(size); +} + +static inline void pvfree(void *ptr) +{ + tlsf_free(ptr); +} + +static inline char *pvstrdup(const char *ptr) +{ + char *str; + + str = (char *)pvmalloc(strlen(ptr) + 1); + if (str == NULL) + return NULL; + + return strcpy(str, ptr); +} + +#elif defined(CONFIG_XENO_HEAPMEM) + +#include <stdlib.h> +#include <boilerplate/heapmem.h> + +extern struct heap_memory heapmem_main; + +static inline +void pvheapobj_destroy(struct heapobj *hobj) +{ + heapmem_destroy((struct heap_memory *)hobj->pool); + if (hobj->pool != (void *)&heapmem_main) + __STD(free(hobj->pool)); +} + +static inline +int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem) +{ + return heapmem_extend((struct heap_memory *)hobj->pool, + mem, size); +} + +static inline +void *pvheapobj_alloc(struct heapobj *hobj, size_t size) +{ + return heapmem_alloc((struct heap_memory *)hobj->pool, size); +} + +static inline +void pvheapobj_free(struct heapobj *hobj, void *ptr) +{ + heapmem_free((struct heap_memory *)hobj->pool, ptr); +} + +static inline +size_t pvheapobj_validate(struct heapobj *hobj, void *ptr) +{ + ssize_t size = heapmem_check((struct heap_memory *)hobj->pool, ptr); + return size < 0 ? 0 : size; +} + +static inline +size_t pvheapobj_inquire(struct heapobj *hobj) +{ + return heapmem_used_size((struct heap_memory *)hobj->pool); +} + +static inline void *pvmalloc(size_t size) +{ + return heapmem_alloc(&heapmem_main, size); +} + +static inline void pvfree(void *ptr) +{ + heapmem_free(&heapmem_main, ptr); +} + +static inline char *pvstrdup(const char *ptr) +{ + char *str; + + str = (char *)pvmalloc(strlen(ptr) + 1); + if (str == NULL) + return NULL; + + return strcpy(str, ptr); +} + +#else /* !CONFIG_XENO_HEAPMEM, i.e. malloc */ + +#include <stdlib.h> + +static inline void *pvmalloc(size_t size) +{ + /* + * NOTE: We don't want debug _nrt assertions to trigger when + * running over Cobalt if the user picked this allocator, so + * we make sure to call the glibc directly, not the Cobalt + * wrappers. + */ + return __STD(malloc(size)); +} + +static inline void pvfree(void *ptr) +{ + __STD(free(ptr)); +} + +static inline char *pvstrdup(const char *ptr) +{ + return strdup(ptr); +} + +void pvheapobj_destroy(struct heapobj *hobj); + +int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem); + +void *pvheapobj_alloc(struct heapobj *hobj, size_t size); + +void pvheapobj_free(struct heapobj *hobj, void *ptr); + +size_t pvheapobj_inquire(struct heapobj *hobj); + +size_t pvheapobj_validate(struct heapobj *hobj, void *ptr); + +#endif /* !CONFIG_XENO_HEAPMEM */ + +#ifdef CONFIG_XENO_PSHARED + +extern void *__main_heap; + +extern struct hash_table *__main_catalog; +#define main_catalog (*((struct hash_table *)__main_catalog)) + +extern struct sysgroup *__main_sysgroup; + +struct sysgroup_memspec { + /** next member in sysgroup list. */ + struct holder next; +}; + +static inline void *mainheap_ptr(memoff_t off) +{ + return off ? (void *)__memptr(__main_heap, off) : NULL; +} + +static inline memoff_t mainheap_off(void *addr) +{ + return addr ? (memoff_t)__memoff(__main_heap, addr) : 0; +} + +/* + * ptr shall point to a block of memory allocated within the main heap + * if non-null; such address is always 8-byte aligned. Handles of + * shared heap pointers are returned with bit #0 set, which serves as + * a special tag detected in mainhead_deref(). A null pointer is + * always translated as a null handle. + */ +#define mainheap_ref(ptr, type) \ + ({ \ + type handle; \ + assert(__builtin_types_compatible_p(typeof(type), unsigned long) || \ + __builtin_types_compatible_p(typeof(type), uintptr_t)); \ + assert(ptr == NULL || __memchk(__main_heap, ptr)); \ + handle = (type)mainheap_off(ptr); \ + handle|1; \ + }) +/* + * Handles of shared heap-based pointers have bit #0 set. Other values + * are not translated, and the return value is the original handle + * cast to a pointer. A null handle is always returned unchanged. + */ +#define mainheap_deref(handle, type) \ + ({ \ + type *ptr; \ + assert(__builtin_types_compatible_p(typeof(handle), unsigned long) || \ + __builtin_types_compatible_p(typeof(handle), uintptr_t)); \ + ptr = (handle & 1) ? (type *)mainheap_ptr(handle & ~1UL) : (type *)handle; \ + ptr; \ + }) + +static inline void +__sysgroup_add(struct sysgroup_memspec *obj, struct listobj *q, int *countp) +{ + write_lock_nocancel(&__main_sysgroup->lock); + (*countp)++; + list_append(&obj->next, q); + write_unlock(&__main_sysgroup->lock); +} + +#define sysgroup_add(__group, __obj) \ + __sysgroup_add(__obj, &(__main_sysgroup->__group ## _list), \ + &(__main_sysgroup->__group ## _count)) + +static inline void +__sysgroup_remove(struct sysgroup_memspec *obj, int *countp) +{ + write_lock_nocancel(&__main_sysgroup->lock); + (*countp)--; + list_remove(&obj->next); + write_unlock(&__main_sysgroup->lock); +} + +#define sysgroup_remove(__group, __obj) \ + __sysgroup_remove(__obj, &(__main_sysgroup->__group ## _count)) + +static inline void sysgroup_lock(void) +{ + read_lock_nocancel(&__main_sysgroup->lock); +} + +static inline void sysgroup_unlock(void) +{ + read_unlock(&__main_sysgroup->lock); +} + +#define sysgroup_count(__group) \ + (__main_sysgroup->__group ## _count) + +#define for_each_sysgroup(__obj, __tmp, __group) \ + list_for_each_entry_safe(__obj, __tmp, &(__main_sysgroup->__group ## _list), next) + +int heapobj_pkg_init_shared(void); + +int heapobj_init(struct heapobj *hobj, const char *name, + size_t size); + +static inline int __heapobj_init(struct heapobj *hobj, const char *name, + size_t size, void *unused) +{ + /* Can't work on user-defined memory in shared mode. */ + return heapobj_init(hobj, name, size); +} + +int heapobj_init_array(struct heapobj *hobj, const char *name, + size_t size, int elems); + +void heapobj_destroy(struct heapobj *hobj); + +int heapobj_extend(struct heapobj *hobj, + size_t size, void *mem); + +void *heapobj_alloc(struct heapobj *hobj, + size_t size); + +void heapobj_free(struct heapobj *hobj, + void *ptr); + +size_t heapobj_validate(struct heapobj *hobj, + void *ptr); + +size_t heapobj_inquire(struct heapobj *hobj); + +size_t heapobj_get_size(struct heapobj *hobj); + +int heapobj_bind_session(const char *session); + +void heapobj_unbind_session(void); + +int heapobj_unlink_session(const char *session); + +void *xnmalloc(size_t size); + +void xnfree(void *ptr); + +char *xnstrdup(const char *ptr); + +#else /* !CONFIG_XENO_PSHARED */ + +struct sysgroup_memspec { +}; + +/* + * Whether an object is laid in some shared heap. Never if pshared + * mode is disabled. + */ +static inline int pshared_check(void *heap, void *addr) +{ + return 0; +} + +#ifdef __cplusplus +#define __check_ref_width(__dst, __src) \ + ({ \ + assert(sizeof(__dst) >= sizeof(__src)); \ + (typeof(__dst))__src; \ + }) +#else +#define __check_ref_width(__dst, __src) \ + __builtin_choose_expr( \ + sizeof(__dst) >= sizeof(__src), (typeof(__dst))__src, \ + ((void)0)) +#endif + +#define mainheap_ref(ptr, type) \ + ({ \ + type handle; \ + handle = __check_ref_width(handle, ptr); \ + assert(ptr == NULL || __memchk(__main_heap, ptr)); \ + handle; \ + }) +#define mainheap_deref(handle, type) \ + ({ \ + type *ptr; \ + ptr = __check_ref_width(ptr, handle); \ + ptr; \ + }) + +#define sysgroup_add(__group, __obj) do { } while (0) +#define sysgroup_remove(__group, __obj) do { } while (0) + +static inline int heapobj_pkg_init_shared(void) +{ + return 0; +} + +static inline int __heapobj_init(struct heapobj *hobj, const char *name, + size_t size, void *mem) +{ + return __heapobj_init_private(hobj, name, size, mem); +} + +static inline int heapobj_init(struct heapobj *hobj, const char *name, + size_t size) +{ + return __heapobj_init_private(hobj, name, size, NULL); +} + +static inline int heapobj_init_array(struct heapobj *hobj, const char *name, + size_t size, int elems) +{ + return heapobj_init_array_private(hobj, name, size, elems); +} + +static inline void heapobj_destroy(struct heapobj *hobj) +{ + pvheapobj_destroy(hobj); +} + +static inline int heapobj_extend(struct heapobj *hobj, + size_t size, void *mem) +{ + return pvheapobj_extend(hobj, size, mem); +} + +static inline void *heapobj_alloc(struct heapobj *hobj, + size_t size) +{ + return pvheapobj_alloc(hobj, size); +} + +static inline void heapobj_free(struct heapobj *hobj, + void *ptr) +{ + pvheapobj_free(hobj, ptr); +} + +static inline size_t heapobj_validate(struct heapobj *hobj, + void *ptr) +{ + return pvheapobj_validate(hobj, ptr); +} + +static inline size_t heapobj_inquire(struct heapobj *hobj) +{ + return pvheapobj_inquire(hobj); +} + +static inline int heapobj_bind_session(const char *session) +{ + return -ENOSYS; +} + +static inline int heapobj_unlink_session(const char *session) +{ + return 0; +} + +static inline void heapobj_unbind_session(void) { } + +static inline void *xnmalloc(size_t size) +{ + return pvmalloc(size); +} + +static inline void xnfree(void *ptr) +{ + pvfree(ptr); +} + +static inline char *xnstrdup(const char *ptr) +{ + return pvstrdup(ptr); +} + +#endif /* !CONFIG_XENO_PSHARED */ + +static inline const char *heapobj_name(struct heapobj *hobj) +{ + return hobj->name; +} + +static inline size_t heapobj_size(struct heapobj *hobj) +{ + return hobj->size; +} + +#endif /* _COPPERPLATE_HEAPOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/reference.h b/kernel/xenomai-v3.2.4/include/copperplate/reference.h new file mode 100644 index 0000000..8f6f76c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/reference.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COPPERPLATE_REFERENCE_H +#define _COPPERPLATE_REFERENCE_H + +#include <boilerplate/limits.h> +#include <boilerplate/scope.h> +#include <boilerplate/setup.h> + +#define libcopperplate_tag 0 /* Library tag - unique and constant. */ +#define libcopperplate_cbi 1 /* Callback binary interface level. */ + +#ifdef CONFIG_XENO_PSHARED +/* + * Layout of a function reference handle in shared memory (32-bit + * value): + * + * xxHHHHHHHHHHHHHHHHHHHHLLLLLPPPPP + * + * where: 'P' => function index in the per-library array + * 'L' => library tag + * 'H' => symbol hash value (symname + cbi) + * 'x' => unassigned + * + * NOTE: handle value -1 is kept for representing a NULL function + * pointer; bit #31 should remain unassigned and cleared for this + * purpose. + */ + +struct __fnref { + void (*fn)(void); + unsigned int hash; +}; + +#define __refvar(l, s) __ ## l ## __ref__ ## s +#define __refmangle(l, h, p) (((h & 0xfffff) << 10)|((l & 0x1f) << 5)|(p & 0x1f)) +#define __refhash(r) (((r) >> 10) & 0xfffffU) +#define __reftag(r) (((r) >> 5) & 0x1f) +#define __refpos(r) ((r) & 0x1f) +#define __refchk(v, r) \ + ({ \ + int __tag = __reftag(r), __pos = __refpos(r); \ + typeof(v) __p = (typeof(v))__fnrefs[__tag][__pos].fn; \ + assert(__fnrefs[__tag][__pos].hash == __refhash(r)); \ + assert(__p != NULL); \ + __p; \ + }) +#define fnref_type(t) int +#define fnref_null -1 +static inline int __fnref_nofn(void *fnaddr) +{ + return fnaddr == NULL; +} +#define fnref_put(l, s) (__fnref_nofn((void *)(s)) ? fnref_null : __refvar(l, s)) +#define fnref_get(v, r) ((v) = (r) < 0 ? NULL : __refchk(v, r)) +#define fnref_register(l, s) \ + int __refvar(l, s); \ + static void __early_ctor __ifnref_ ## s(void) \ + { \ + __refvar(l, s) = __fnref_register(#l, l ## _tag, \ + l ## _cbi, \ + #s, (void (*)(void))s); \ + } +#define fnref_declare(l, s) extern int __refvar(l, s) + +#define MAX_FNLIBS 16 /* max=32 */ +#define MAX_FNREFS 16 /* max=32 */ + +extern struct __fnref __fnrefs[MAX_FNLIBS][MAX_FNREFS]; + +int __fnref_register(const char *libname, + int libtag, int cbirev, + const char *symname, void (*fn)(void)); + +#else /* !CONFIG_XENO_PSHARED */ + +#define fnref_type(t) __typeof__(t) +#define fnref_null NULL +#define fnref_put(l, s) (s) +#define fnref_get(v, r) ((v) = (r)) +#define fnref_register(l, s) +#define fnref_declare(l, s) + +#endif /* !CONFIG_XENO_PSHARED */ + +#endif /* _COPPERPLATE_REFERENCE_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h b/kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h new file mode 100644 index 0000000..f3d1a17 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COPPERPLATE_REGISTRY_OBSTACK_H +#define _COPPERPLATE_REGISTRY_OBSTACK_H + +#include <copperplate/registry.h> + +#ifdef CONFIG_XENO_REGISTRY + +#include <stdlib.h> +#include <boilerplate/obstack.h> +#include <copperplate/heapobj.h> + +struct threadobj; +struct syncobj; + +/* + * Obstacks are grown from handlers called by the fusefs server + * thread, which has no real-time requirement: malloc/free is fine for + * memory management. + */ +#define obstack_chunk_alloc malloc +#define obstack_chunk_free free + +struct threadobj; + +struct fsobstack { + struct obstack obstack; + void *data; + size_t len; +}; + +struct fsobstack_syncops { + int (*prepare_cache)(struct fsobstack *o, + struct obstack *cache, int item_count); + size_t (*collect_data)(void *p, struct threadobj *thobj); + size_t (*format_data)(struct fsobstack *o, void *p); +}; + +struct syncobj; + +#ifdef __cplusplus +extern "C" { +#endif + +void fsobstack_grow_string(struct fsobstack *o, + const char *s); + +void fsobstack_grow_char(struct fsobstack *o, + char c); + +int fsobstack_grow_format(struct fsobstack *o, + const char *fmt, ...); + +int fsobstack_grow_file(struct fsobstack *o, + const char *path); + +int fsobstack_grow_syncobj_grant(struct fsobstack *o, + struct syncobj *sobj, + struct fsobstack_syncops *ops); + +int fsobstack_grow_syncobj_drain(struct fsobstack *o, + struct syncobj *sobj, + struct fsobstack_syncops *ops); + +ssize_t fsobstack_pull(struct fsobstack *o, + char *buf, size_t size); + +ssize_t fsobj_obstack_read(struct fsobj *fsobj, + char *buf, size_t size, off_t offset, + void *priv); + +int fsobj_obstack_release(struct fsobj *fsobj, void *priv); + +#ifdef __cplusplus +} +#endif + +static inline void fsobstack_init(struct fsobstack *o) +{ + obstack_init(&o->obstack); + o->data = NULL; + o->len = 0; +} + +static inline void fsobstack_destroy(struct fsobstack *o) +{ + obstack_free(&o->obstack, NULL); +} + +static inline void fsobstack_finish(struct fsobstack *o) +{ + o->len = obstack_object_size(&o->obstack); + o->data = obstack_finish(&o->obstack); +} + +static inline +void registry_init_file_obstack(struct fsobj *fsobj, + const struct registry_operations *ops) +{ + registry_init_file(fsobj, ops, sizeof(struct fsobstack)); +} + +#else /* !CONFIG_XENO_REGISTRY */ + +static inline +void registry_init_file_obstack(struct fsobj *fsobj, + const struct registry_operations *ops) +{ } + +#endif /* !CONFIG_XENO_REGISTRY */ + +#endif /* !_COPPERPLATE_REGISTRY_OBSTACK_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/registry.h b/kernel/xenomai-v3.2.4/include/copperplate/registry.h new file mode 100644 index 0000000..c94c902 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/registry.h @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_REGISTRY_H +#define _COPPERPLATE_REGISTRY_H + +#include <sys/types.h> +#include <pthread.h> +#include <fcntl.h> +#include <boilerplate/list.h> +#include <boilerplate/hash.h> +#include <boilerplate/obstack.h> + +struct fsobj; + +#define REGISTRY_SHARED 1 +#define REGISTRY_ANON 2 + +#ifdef CONFIG_XENO_REGISTRY + +struct registry_operations { + int (*open)(struct fsobj *fsobj, void *priv); + int (*release)(struct fsobj *fsobj, void *priv); + ssize_t (*read)(struct fsobj *fsobj, + char *buf, size_t size, off_t offset, + void *priv); + ssize_t (*write)(struct fsobj *fsobj, + const char *buf, size_t size, off_t offset, + void *priv); +}; + +struct regfs_dir; + +struct fsobj { + pthread_mutex_t lock; + char *path; + const char *basename; + int mode; + size_t privsz; + struct regfs_dir *dir; + struct timespec ctime; + struct timespec mtime; + const struct registry_operations *ops; + struct pvholder link; + struct pvhashobj hobj; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +int registry_add_dir(const char *fmt, ...); + +int registry_init_file(struct fsobj *fsobj, + const struct registry_operations *ops, + size_t privsz); + +int registry_add_file(struct fsobj *fsobj, + int mode, + const char *fmt, ...); + +void registry_destroy_file(struct fsobj *fsobj); + +void registry_touch_file(struct fsobj *fsobj); + +int __registry_pkg_init(const char *arg0, + char *mountpt, + int flags); + +int registry_pkg_init(const char *arg0, + int flags); + +void registry_pkg_destroy(void); + +#ifdef __cplusplus +} +#endif + +#else /* !CONFIG_XENO_REGISTRY */ + +struct fsobj { +}; + +struct registry_operations { +}; + +static inline +int registry_add_dir(const char *fmt, ...) +{ + return 0; +} + +static inline +void registry_init_file(struct fsobj *fsobj, + const struct registry_operations *ops, + size_t privsz) +{ +} + +static inline +int registry_add_file(struct fsobj *fsobj, + int mode, + const char *fmt, ...) +{ + return 0; +} + +static inline +void registry_destroy_file(struct fsobj *fsobj) +{ +} + +static inline +void registry_touch_file(struct fsobj *fsobj) +{ +} + +static inline +int __registry_pkg_init(const char *arg0, + char *mountpt, int flags) +{ + return 0; +} + +static inline +int registry_pkg_init(const char *arg0, + int flags) +{ + return 0; +} + +static inline +void registry_pkg_destroy(void) +{ +} + +#endif /* !CONFIG_XENO_REGISTRY */ + +#endif /* !_COPPERPLATE_REGISTRY_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/semobj.h b/kernel/xenomai-v3.2.4/include/copperplate/semobj.h new file mode 100644 index 0000000..4d9e0a5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/semobj.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_SEMOBJ_H +#define _COPPERPLATE_SEMOBJ_H + +#include <boilerplate/compiler.h> +#include <copperplate/reference.h> + +struct semobj_waitentry { + pid_t pid; + char name[32]; +}; + +#ifdef CONFIG_XENO_COBALT + +#include <semaphore.h> + +struct semobj_corespec { + sem_t sem; +}; + +#else /* CONFIG_XENO_MERCURY */ + +#include <copperplate/syncobj.h> + +struct semobj_corespec { + struct syncobj sobj; + int flags; + int value; +}; + +#endif /* CONFIG_XENO_MERCURY */ + +struct semobj { + struct semobj_corespec core; + fnref_type(void (*)(struct semobj *smobj)) finalizer; +}; + +#define SEMOBJ_PRIO 0x1 +#define SEMOBJ_PULSE 0x2 +#define SEMOBJ_WARNDEL 0x4 + +#ifdef __cplusplus +extern "C" { +#endif + +int semobj_init(struct semobj *smobj, + int flags, int value, + fnref_type(void (*)(struct semobj *smobj)) finalizer); + +int semobj_destroy(struct semobj *smobj); + +void semobj_uninit(struct semobj *smobj); + +int semobj_post(struct semobj *smobj); + +int semobj_broadcast(struct semobj *smobj); + +int semobj_wait(struct semobj *smobj, + const struct timespec *timeout) __must_check; + +int semobj_getvalue(struct semobj *smobj, int *sval); + +int semobj_inquire(struct semobj *smobj, size_t waitsz, + struct semobj_waitentry *waitlist, + int *val_r); + +#ifdef __cplusplus +} +#endif + +#endif /* _COPPERPLATE_SEMOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/syncobj.h b/kernel/xenomai-v3.2.4/include/copperplate/syncobj.h new file mode 100644 index 0000000..66dee02 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/syncobj.h @@ -0,0 +1,233 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_SYNCOBJ_H +#define _COPPERPLATE_SYNCOBJ_H + +#include <pthread.h> +#include <time.h> +#include <boilerplate/list.h> +#include <boilerplate/lock.h> +#include <copperplate/reference.h> + +/* syncobj->flags */ +#define SYNCOBJ_FIFO 0x0 +#define SYNCOBJ_PRIO 0x1 +#define SYNCOBJ_LOCKED 0x2 + +/* threadobj->wait_status */ +#define SYNCOBJ_FLUSHED 0x1 +#define SYNCOBJ_SIGNALED 0x2 +#define SYNCOBJ_DRAINWAIT 0x4 + +#define SYNCOBJ_MAGIC 0xf9f99f9f + +struct threadobj; + +struct syncstate { + int state; +}; + +#ifdef CONFIG_XENO_COBALT + +#include <boilerplate/atomic.h> +#include <cobalt/uapi/monitor.h> + +struct syncobj_corespec { + cobalt_monitor_t monitor; +}; + +#else /* CONFIG_XENO_MERCURY */ + +struct syncobj_corespec { + pthread_mutex_t lock; + pthread_cond_t drain_sync; +}; + +#endif /* CONFIG_XENO_MERCURY */ + +struct syncobj { + unsigned int magic; + int flags; + int wait_count; + struct listobj grant_list; + int grant_count; + struct listobj drain_list; + int drain_count; + struct syncobj_corespec core; + fnref_type(void (*)(struct syncobj *sobj)) finalizer; +}; + +#define syncobj_for_each_grant_waiter(sobj, pos) \ + list_for_each_entry(pos, &(sobj)->grant_list, wait_link) + +#define syncobj_for_each_grant_waiter_safe(sobj, pos, tmp) \ + list_for_each_entry_safe(pos, tmp, &(sobj)->grant_list, wait_link) + +#define syncobj_for_each_drain_waiter(sobj, pos) \ + list_for_each_entry(pos, &(sobj)->drain_list, wait_link) + +#define syncobj_for_each_drain_waiter_safe(sobj, pos, tmp) \ + list_for_each_entry_safe(pos, tmp, &(sobj)->drain_list, wait_link) + +void __syncobj_cleanup_wait(struct syncobj *sobj, + struct threadobj *thobj); + +#ifdef CONFIG_XENO_DEBUG + +static inline void __syncobj_tag_locked(struct syncobj *sobj) +{ + sobj->flags |= SYNCOBJ_LOCKED; +} + +static inline void __syncobj_tag_unlocked(struct syncobj *sobj) +{ + assert(sobj->flags & SYNCOBJ_LOCKED); + sobj->flags &= ~SYNCOBJ_LOCKED; +} + +static inline void __syncobj_check_locked(struct syncobj *sobj) +{ + assert(sobj->flags & SYNCOBJ_LOCKED); +} + +#else /* !CONFIG_XENO_DEBUG */ + +static inline void __syncobj_tag_locked(struct syncobj *sobj) +{ +} + +static inline void __syncobj_tag_unlocked(struct syncobj *sobj) +{ +} + +static inline void __syncobj_check_locked(struct syncobj *sobj) +{ +} + +#endif /* !CONFIG_XENO_DEBUG */ + +#ifdef __cplusplus +extern "C" { +#endif + +int __syncobj_broadcast_drain(struct syncobj *sobj, int reason); + +int __syncobj_broadcast_grant(struct syncobj *sobj, int reason); + +int syncobj_init(struct syncobj *sobj, clockid_t clk_id, int flags, + fnref_type(void (*)(struct syncobj *sobj)) finalizer) __must_check; + +int syncobj_wait_grant(struct syncobj *sobj, + const struct timespec *timeout, + struct syncstate *syns) __must_check; + +struct threadobj *syncobj_grant_one(struct syncobj *sobj); + +void syncobj_grant_to(struct syncobj *sobj, + struct threadobj *thobj); + +struct threadobj *syncobj_peek_grant(struct syncobj *sobj); + +struct threadobj *syncobj_peek_drain(struct syncobj *sobj); + +int syncobj_lock(struct syncobj *sobj, + struct syncstate *syns) __must_check; + +void syncobj_unlock(struct syncobj *sobj, + struct syncstate *syns); + +int syncobj_wait_drain(struct syncobj *sobj, + const struct timespec *timeout, + struct syncstate *syns) __must_check; + +int syncobj_destroy(struct syncobj *sobj, + struct syncstate *syns); + +void syncobj_uninit(struct syncobj *sobj); + +static inline int syncobj_grant_wait_p(struct syncobj *sobj) +{ + __syncobj_check_locked(sobj); + + return !list_empty(&sobj->grant_list); +} + +static inline int syncobj_count_grant(struct syncobj *sobj) +{ + __syncobj_check_locked(sobj); + + return sobj->grant_count; +} + +static inline int syncobj_count_drain(struct syncobj *sobj) +{ + __syncobj_check_locked(sobj); + + return sobj->drain_count; +} + +static inline int syncobj_drain_wait_p(struct syncobj *sobj) +{ + __syncobj_check_locked(sobj); + + return !list_empty(&sobj->drain_list); +} + +static inline int syncobj_drain(struct syncobj *sobj) +{ + int ret = 0; + + __syncobj_check_locked(sobj); + + if (sobj->drain_count > 0) + ret = __syncobj_broadcast_drain(sobj, SYNCOBJ_SIGNALED); + + return ret; +} + +static inline int syncobj_grant_all(struct syncobj *sobj) +{ + int ret = 0; + + __syncobj_check_locked(sobj); + + if (sobj->grant_count > 0) + ret = __syncobj_broadcast_grant(sobj, SYNCOBJ_SIGNALED); + + return ret; +} + +static inline int syncobj_flush(struct syncobj *sobj) +{ + __syncobj_check_locked(sobj); + + if (sobj->grant_count > 0) + __syncobj_broadcast_grant(sobj, SYNCOBJ_FLUSHED); + + if (sobj->drain_count > 0) + __syncobj_broadcast_drain(sobj, SYNCOBJ_FLUSHED); + + return sobj->wait_count; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _COPPERPLATE_SYNCOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/threadobj.h b/kernel/xenomai-v3.2.4/include/copperplate/threadobj.h new file mode 100644 index 0000000..c836341 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/threadobj.h @@ -0,0 +1,589 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_THREADOBJ_H +#define _COPPERPLATE_THREADOBJ_H + +#include <time.h> +#include <semaphore.h> +#include <signal.h> +#include <pthread.h> +#include <stdint.h> +#include <boilerplate/list.h> +#include <boilerplate/lock.h> +#include <boilerplate/sched.h> +#include <copperplate/clockobj.h> +#include <copperplate/heapobj.h> + +#ifdef CONFIG_XENO_COBALT + +#include <cobalt/uapi/kernel/types.h> + +struct xnthread_user_window; + +struct threadobj_corespec { + xnhandle_t handle; + union { + __u32 u_winoff; + struct xnthread_user_window *u_window; + }; +}; + +struct threadobj_stat { + /** Execution time in primary mode (ns). */ + ticks_t xtime; + /** Current timeout value (ns). */ + ticks_t timeout; + /** Number of primary->secondary mode switches. */ + uint64_t msw; + /** Number of context switches. */ + uint64_t csw; + /** Number of Xenomai syscalls. */ + uint64_t xsc; + /** Current CPU for thread. */ + int cpu; + /** Scheduler lock nesting count. */ + int schedlock; + /** Cobalt thread status bits. */ + unsigned int status; + /** Number of page faults. */ + uint32_t pf; +}; + +#define SCHED_CORE SCHED_COBALT + +static inline +void threadobj_save_timeout(struct threadobj_corespec *corespec, + const struct timespec *timeout) +{ + /* + * We retrieve this information from the nucleus directly via + * cobalt_thread_stat(). + */ +} + +#ifdef CONFIG_XENO_PSHARED + +static inline struct xnthread_user_window * +threadobj_get_window(struct threadobj_corespec *corespec) +{ + extern void *cobalt_umm_shared; + return (struct xnthread_user_window *) + ((caddr_t)cobalt_umm_shared + corespec->u_winoff); +} + +#else /* !CONFIG_XENO_PSHARED */ + +static inline struct xnthread_user_window * +threadobj_get_window(struct threadobj_corespec *corespec) +{ + return corespec->u_window; +} + +#endif /* !CONFIG_XENO_PSHARED */ + +#else /* CONFIG_XENO_MERCURY */ + +#include <sys/time.h> + +struct threadobj_corespec { + pthread_cond_t grant_sync; + int policy_unlocked; + struct sched_param_ex schedparam_unlocked; + timer_t rr_timer; + /** Timeout reported by sysregd. */ + struct timespec timeout; +#ifdef CONFIG_XENO_WORKAROUND_CONDVAR_PI + int policy_unboosted; + struct sched_param_ex schedparam_unboosted; +#endif +}; + +struct threadobj_stat { + /** Current timeout value (ns). */ + ticks_t timeout; + /** Current CPU for thread. */ + int cpu; + /** Scheduler lock nesting count. */ + int schedlock; + /** Mercury thread status bits. */ + unsigned int status; +}; + +#define SCHED_CORE SCHED_FIFO + +static inline +void threadobj_save_timeout(struct threadobj_corespec *corespec, + const struct timespec *timeout) +{ + if (timeout) + corespec->timeout = *timeout; +} + +#endif /* CONFIG_XENO_MERCURY */ + +/* + * threadobj->status, updated with ->lock held. + */ +#define __THREAD_S_STARTED (1 << 0) /* threadobj_start() called. */ +#define __THREAD_S_WARMUP (1 << 1) /* threadobj_prologue() not called yet. */ +#define __THREAD_S_ABORTED (1 << 2) /* Cancelled before start. */ +#define __THREAD_S_LOCKED (1 << 3) /* threadobj_lock() granted (debug only). */ +#define __THREAD_S_ACTIVE (1 << 4) /* Running user code. */ +#define __THREAD_S_SUSPENDED (1 << 5) /* Suspended via threadobj_suspend(). */ +#define __THREAD_S_SAFE (1 << 6) /* TCB release deferred. */ +#define __THREAD_S_PERIODIC (1 << 7) /* Periodic timer set. */ +#define __THREAD_S_DEBUG (1 << 31) /* Debug mode enabled. */ +/* + * threadobj->run_state, locklessly updated by "current", merged + * with ->status bits by threadobj_get_status(). + */ +#define __THREAD_S_RUNNING 0 +#define __THREAD_S_DORMANT (1 << 16) +#define __THREAD_S_WAIT (1 << 17) +#define __THREAD_S_TIMEDWAIT (1 << 18) +#define __THREAD_S_DELAYED (1 << 19) +#define __THREAD_S_BREAK (__THREAD_S_DELAYED|(1 << 20)) + +/* threadobj mode bits */ +#define __THREAD_M_LOCK (1 << 0) /* Toggle scheduler lock. */ +#define __THREAD_M_WARNSW (1 << 1) /* Toggle switch warning bit. */ +#define __THREAD_M_CONFORMING (1 << 2) /* Switch to conforming mode. */ +#define __THREAD_M_SPARE0 (1 << 16) +#define __THREAD_M_SPARE1 (1 << 17) +#define __THREAD_M_SPARE2 (1 << 18) +#define __THREAD_M_SPARE3 (1 << 19) +#define __THREAD_M_SPARE4 (1 << 20) +#define __THREAD_M_SPARE5 (1 << 21) +#define __THREAD_M_SPARE6 (1 << 22) +#define __THREAD_M_SPARE7 (1 << 23) + +/* + * We need to use a valid address here. The object will never be dereferenced + * when it is identified as IRQ context, so the pthread key itself is fine. + */ +#define THREADOBJ_IRQCONTEXT ((struct threadobj *)&threadobj_tskey) + +struct traceobj; +struct syncobj; + +struct threadobj { + unsigned int magic; /* Must be first. */ + pthread_t ptid; + pthread_mutex_t lock; + + int schedlock_depth; + int cancel_state; + int status; + int run_state; + int policy; + struct sched_param_ex schedparam; + int global_priority; + pid_t cnode; + pid_t pid; + char name[32]; + + void (*finalizer)(struct threadobj *thobj); + int core_offset; + int *errno_pointer; + /* Those members belong exclusively to the syncobj code. */ + struct syncobj *wait_sobj; + struct holder wait_link; + int wait_status; + int wait_prio; + dref_type(void *) wait_union; + size_t wait_size; + timer_t periodic_timer; + + struct threadobj_corespec core; + struct timespec tslice; + pthread_cond_t barrier; + struct traceobj *tracer; + sem_t *cancel_sem; + struct sysgroup_memspec memspec; + struct backtrace_data btd; +}; + +struct threadobj_init_data { + unsigned int magic; + cpu_set_t affinity; + int policy; + struct sched_param_ex param_ex; + void (*finalizer)(struct threadobj *thobj); +}; + +extern int threadobj_high_prio; + +extern int threadobj_irq_prio; + +extern pthread_key_t threadobj_tskey; + +#ifdef HAVE_TLS + +extern __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL))) +struct threadobj *__threadobj_current; + +static inline void threadobj_set_current(struct threadobj *thobj) +{ + __threadobj_current = thobj; + pthread_setspecific(threadobj_tskey, thobj); +} + +static inline struct threadobj *__threadobj_get_current(void) +{ + return __threadobj_current; +} + +#else /* !HAVE_TLS */ + +static inline void threadobj_set_current(struct threadobj *thobj) +{ + pthread_setspecific(threadobj_tskey, thobj); +} + +static inline struct threadobj *__threadobj_get_current(void) +{ + return (struct threadobj *)pthread_getspecific(threadobj_tskey); +} + +#endif /* !HAVE_TLS */ + +static inline struct threadobj *threadobj_current(void) +{ + struct threadobj *thobj = __threadobj_get_current(); + return thobj == NULL || thobj == THREADOBJ_IRQCONTEXT ? NULL : thobj; +} + +#ifdef CONFIG_XENO_DEBUG + +static inline void __threadobj_tag_locked(struct threadobj *thobj) +{ + thobj->status |= __THREAD_S_LOCKED; +} + +static inline void __threadobj_tag_unlocked(struct threadobj *thobj) +{ + assert(thobj->status & __THREAD_S_LOCKED); + thobj->status &= ~__THREAD_S_LOCKED; +} + +static inline void __threadobj_check_locked(struct threadobj *thobj) +{ + assert(thobj->status & __THREAD_S_LOCKED); +} + +#else /* !CONFIG_XENO_DEBUG */ + +static inline void __threadobj_tag_locked(struct threadobj *thobj) +{ +} + +static inline void __threadobj_tag_unlocked(struct threadobj *thobj) +{ +} + +static inline void __threadobj_check_locked(struct threadobj *thobj) +{ +} + +#endif /* !CONFIG_XENO_DEBUG */ + +#ifdef __cplusplus +extern "C" { +#endif + +void *__threadobj_alloc(size_t tcb_struct_size, + size_t wait_union_size, + int thobj_offset); + +static inline void __threadobj_free(void *p) +{ + xnfree(p); +} + +static inline void threadobj_free(struct threadobj *thobj) +{ + __threadobj_free((unsigned char *)thobj - thobj->core_offset); +} + +int threadobj_init(struct threadobj *thobj, + struct threadobj_init_data *idata) __must_check; + +int threadobj_start(struct threadobj *thobj) __must_check; + +int threadobj_shadow(struct threadobj *thobj, + const char *name); + +int threadobj_prologue(struct threadobj *thobj, + const char *name); + +void threadobj_wait_start(void); + +void threadobj_notify_entry(void); + +int threadobj_cancel(struct threadobj *thobj); + +void threadobj_uninit(struct threadobj *thobj); + +int threadobj_suspend(struct threadobj *thobj); + +int threadobj_resume(struct threadobj *thobj); + +int threadobj_unblock(struct threadobj *thobj); + +int __threadobj_lock_sched(struct threadobj *current); + +int threadobj_lock_sched(void); + +int __threadobj_unlock_sched(struct threadobj *current); + +int threadobj_unlock_sched(void); + +int threadobj_set_schedparam(struct threadobj *thobj, int policy, + const struct sched_param_ex *param_ex); + +int threadobj_set_schedprio(struct threadobj *thobj, int priority); + +int threadobj_set_mode(int clrmask, int setmask, int *mode_r); + +int threadobj_set_periodic(struct threadobj *thobj, + const struct timespec *__restrict__ idate, + const struct timespec *__restrict__ period); + +int threadobj_wait_period(unsigned long *overruns_r) __must_check; + +void threadobj_spin(ticks_t ns); + +int threadobj_stat(struct threadobj *thobj, + struct threadobj_stat *stat); + +int threadobj_sleep(const struct timespec *ts); + +void threadobj_set_current_name(const char *name); + +#ifdef CONFIG_XENO_PSHARED + +static inline int threadobj_local_p(struct threadobj *thobj) +{ + extern pid_t __node_id; + return thobj->cnode == __node_id; +} + +#else /* !CONFIG_XENO_PSHARED */ + +static inline int threadobj_local_p(struct threadobj *thobj) +{ + return 1; +} + +#endif /* !CONFIG_XENO_PSHARED */ + +void threadobj_init_key(void); + +int threadobj_pkg_init(int anon_session); + +#ifdef __cplusplus +} +#endif + +#define threadobj_alloc(T, __mptr, W) \ + ({ \ + void *__p; \ + __p = __threadobj_alloc(sizeof(T), sizeof(W), offsetof(T, __mptr)); \ + __p; \ + }) + +static inline int threadobj_get_policy(struct threadobj *thobj) +{ + return thobj->policy; +} + +static inline int threadobj_get_priority(struct threadobj *thobj) +{ + return thobj->schedparam.sched_priority; +} + +static inline void threadobj_copy_schedparam(struct sched_param_ex *param_ex, + const struct threadobj *thobj) +{ + *param_ex = thobj->schedparam; +} + +static inline int threadobj_lock(struct threadobj *thobj) +{ + int ret; + + ret = write_lock_safe(&thobj->lock, thobj->cancel_state); + if (ret) + return ret; + + __threadobj_tag_locked(thobj); + + return 0; +} + +static inline int threadobj_trylock(struct threadobj *thobj) +{ + int ret; + + ret = write_trylock_safe(&thobj->lock, thobj->cancel_state); + if (ret) + return ret; + + __threadobj_tag_locked(thobj); + + return 0; +} + +static inline int threadobj_unlock(struct threadobj *thobj) +{ + __threadobj_check_locked(thobj); + __threadobj_tag_unlocked(thobj); + return write_unlock_safe(&thobj->lock, thobj->cancel_state); +} + +static inline int threadobj_irq_p(void) +{ + struct threadobj *current = __threadobj_get_current(); + return current == THREADOBJ_IRQCONTEXT; +} + +static inline int threadobj_current_p(void) +{ + return threadobj_current() != NULL; +} + +static inline int __threadobj_lock_sched_once(struct threadobj *current) +{ + if (current->schedlock_depth == 0) + return __threadobj_lock_sched(current); + + return -EBUSY; +} + +static inline int threadobj_lock_sched_once(void) +{ + struct threadobj *current = threadobj_current(); + + if (current->schedlock_depth == 0) + return threadobj_lock_sched(); + + return -EBUSY; +} + +static inline void threadobj_yield(void) +{ + __RT(sched_yield()); +} + +static inline unsigned int threadobj_get_magic(struct threadobj *thobj) +{ + return thobj->magic; +} + +static inline void threadobj_set_magic(struct threadobj *thobj, + unsigned int magic) +{ + thobj->magic = magic; +} + +static inline int threadobj_get_lockdepth(struct threadobj *thobj) +{ + return thobj->schedlock_depth; +} + +static inline int threadobj_get_status(struct threadobj *thobj) +{ + return thobj->status | thobj->run_state; +} + +static inline int threadobj_get_errno(struct threadobj *thobj) +{ + return *thobj->errno_pointer; +} + +#define threadobj_prepare_wait(T) \ + ({ \ + struct threadobj *__thobj = threadobj_current(); \ + assert(__thobj != NULL); \ + assert(sizeof(typeof(T)) <= __thobj->wait_size); \ + __mptr(__thobj->wait_union); \ + }) + +#define threadobj_finish_wait() do { } while (0) + +static inline void *threadobj_get_wait(struct threadobj *thobj) +{ + return __mptr(thobj->wait_union); +} + +static inline const char *threadobj_get_name(struct threadobj *thobj) +{ + return thobj->name; +} + +static inline pid_t threadobj_get_pid(struct threadobj *thobj) +{ + return thobj->pid; +} + +#ifdef CONFIG_XENO_WORKAROUND_CONDVAR_PI + +int threadobj_cond_timedwait(pthread_cond_t *cond, + pthread_mutex_t *lock, + const struct timespec *timeout); + +int threadobj_cond_wait(pthread_cond_t *cond, + pthread_mutex_t *lock); + +int threadobj_cond_signal(pthread_cond_t *cond); + +int threadobj_cond_broadcast(pthread_cond_t *cond); + +#else + +static inline +int threadobj_cond_timedwait(pthread_cond_t *cond, + pthread_mutex_t *lock, + const struct timespec *timeout) +{ + return __RT(pthread_cond_timedwait(cond, lock, timeout)); +} + +static inline +int threadobj_cond_wait(pthread_cond_t *cond, + pthread_mutex_t *lock) +{ + return __RT(pthread_cond_wait(cond, lock)); +} + +static inline +int threadobj_cond_signal(pthread_cond_t *cond) +{ + return __RT(pthread_cond_signal(cond)); +} + +static inline +int threadobj_cond_broadcast(pthread_cond_t *cond) +{ + return __RT(pthread_cond_broadcast(cond)); +} + +#endif /* !CONFIG_XENO_WORKAROUND_CONDVAR_PI */ + +#endif /* _COPPERPLATE_THREADOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/timerobj.h b/kernel/xenomai-v3.2.4/include/copperplate/timerobj.h new file mode 100644 index 0000000..66e2e5c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/timerobj.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_TIMEROBJ_H +#define _COPPERPLATE_TIMEROBJ_H + +#include <pthread.h> +#include <time.h> +#include <boilerplate/list.h> +#include <boilerplate/lock.h> + +struct timerobj { + struct itimerspec itspec; + void (*handler)(struct timerobj *tmobj); + timer_t timer; + pthread_mutex_t lock; + int cancel_state; + struct pvholder next; +}; + +static inline int timerobj_lock(struct timerobj *tmobj) +{ + return write_lock_safe(&tmobj->lock, tmobj->cancel_state); +} + +static inline int timerobj_unlock(struct timerobj *tmobj) +{ + return write_unlock_safe(&tmobj->lock, tmobj->cancel_state); +} + +static inline int timerobj_enabled(const struct timerobj *tmobj) +{ + return tmobj->handler != NULL; +} + +#ifdef __cplusplus +extern "C" { +#endif + +int timerobj_init(struct timerobj *tmobj); + +void timerobj_destroy(struct timerobj *tmobj); + +int timerobj_start(struct timerobj *tmobj, + void (*handler)(struct timerobj *tmobj), + struct itimerspec *it); + +int timerobj_stop(struct timerobj *tmobj); + +int timerobj_pkg_init(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _COPPERPLATE_TIMEROBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/traceobj.h b/kernel/xenomai-v3.2.4/include/copperplate/traceobj.h new file mode 100644 index 0000000..be660aa --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/traceobj.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _COPPERPLATE_TRACEOBJ_H +#define _COPPERPLATE_TRACEOBJ_H + +#include <pthread.h> + +struct threadobj; + +struct traceobj { + pthread_mutex_t lock; + pthread_cond_t join; + const char *label; + int nr_marks; + int cur_mark; + struct tracemark *marks; + int nr_threads; +}; + +#define traceobj_assert(trobj, cond) \ +do { \ + int __ret = (cond); \ + if (!__ret) \ + __traceobj_assert_failed(trobj, __FILE__, __LINE__, __STRING(cond)); \ +} while(0) + +#define traceobj_check(__trobj, __status, __expected) \ +do { \ + if (__status != __expected) \ + __traceobj_check_abort(__trobj, __FILE__, __LINE__, \ + __status, __expected); \ +} while(0) + +#define traceobj_check_warn(__trobj, __status, __expected) \ +do { \ + if (__status != __expected) \ + __traceobj_check_warn(__trobj, __FILE__, __LINE__, \ + __status, __expected); \ +} while(0) + +#define traceobj_mark(trobj, mark) \ + __traceobj_mark(trobj, __FILE__, __LINE__, mark) + +#ifdef __cplusplus +extern "C" { +#endif + +int traceobj_init(struct traceobj *trobj, + const char *label, int nr_marks); + +void traceobj_verify(struct traceobj *trobj, int tseq[], int nr_seq); + +void traceobj_destroy(struct traceobj *trobj); + +void traceobj_enter(struct traceobj *trobj); + +void traceobj_exit(struct traceobj *trobj); + +void traceobj_unwind(struct traceobj *trobj); + +void traceobj_join(struct traceobj *trobj); + +void __traceobj_assert_failed(struct traceobj *trobj, + const char *file, int line, const char *cond); + +void __traceobj_check_abort(struct traceobj *trobj, + const char *file, int line, + int received, int expected); + +void __traceobj_check_warn(struct traceobj *trobj, + const char *file, int line, + int received, int expected); + +void __traceobj_mark(struct traceobj *trobj, + const char *file, int line, int mark); + +#ifdef __cplusplus +} +#endif + +#endif /* _COPPERPLATE_TRACEOBJ_H */ diff --git a/kernel/xenomai-v3.2.4/include/copperplate/tunables.h b/kernel/xenomai-v3.2.4/include/copperplate/tunables.h new file mode 100644 index 0000000..7b45c51 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/copperplate/tunables.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COPPERPLATE_TUNABLES_H +#define _COPPERPLATE_TUNABLES_H + +#include <boilerplate/tunables.h> + +struct copperplate_setup_data { + const char *session_root; + const char *session_label; + const char *registry_root; + int no_registry; + int shared_registry; + size_t mem_pool; + gid_t session_gid; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +extern struct copperplate_setup_data __copperplate_setup_data; + +define_config_tunable(session_label, const char *, label); + +static inline read_config_tunable(session_label, const char *) +{ + return __copperplate_setup_data.session_label; +} + +static inline define_config_tunable(registry_root, const char *, root) +{ + __copperplate_setup_data.registry_root = root; +} + +static inline read_config_tunable(registry_root, const char *) +{ + return __copperplate_setup_data.registry_root; +} + +static inline define_config_tunable(no_registry, int, noreg) +{ + __copperplate_setup_data.no_registry = noreg; +} + +static inline read_config_tunable(no_registry, int) +{ + return __copperplate_setup_data.no_registry; +} + +static inline define_config_tunable(shared_registry, int, shared) +{ + __copperplate_setup_data.shared_registry = shared; +} + +static inline read_config_tunable(shared_registry, int) +{ + return __copperplate_setup_data.shared_registry; +} + +static inline define_config_tunable(mem_pool_size, size_t, size) +{ + __copperplate_setup_data.mem_pool = size; +} + +static inline read_config_tunable(mem_pool_size, size_t) +{ + return __copperplate_setup_data.mem_pool; +} + +static inline define_config_tunable(session_gid, gid_t, gid) +{ + __copperplate_setup_data.session_gid = gid; +} + +static inline read_config_tunable(session_gid, gid_t) +{ + return __copperplate_setup_data.session_gid; +} + +#ifdef __cplusplus +} +#endif + +#endif /* !_COPPERPLATE_TUNABLES_H */ diff --git a/kernel/xenomai-v3.2.4/include/mercury/Makefile.am b/kernel/xenomai-v3.2.4/include/mercury/Makefile.am new file mode 100644 index 0000000..f1af838 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/Makefile.am @@ -0,0 +1,4 @@ + +SUBDIRS = boilerplate + +noinst_HEADERS = pthread.h diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am new file mode 100644 index 0000000..63cf2f6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am @@ -0,0 +1,8 @@ +includesubdir = $(includedir)/mercury/boilerplate + +includesub_HEADERS = \ + sched.h \ + limits.h \ + signal.h \ + trace.h \ + wrappers.h diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h new file mode 100644 index 0000000..ef62334 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _MERCURY_BOILERPLATE_LIMITS_H +#define _MERCURY_BOILERPLATE_LIMITS_H + +#define XNOBJECT_NAME_LEN 32 + +#endif /* _MERCURY_BOILERPLATE_LIMITS_H */ diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h new file mode 100644 index 0000000..30dde60 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _MERCURY_BOILERPLATE_SCHED_H +#define _MERCURY_BOILERPLATE_SCHED_H + +#include <time.h> +#include <sched.h> + +struct __sched_rr_param { + struct timespec __sched_rr_quantum; +}; + +struct sched_param_ex { + int sched_priority; + union { + struct __sched_rr_param rr; + } sched_u; +}; + +#define sched_rr_quantum sched_u.rr.__sched_rr_quantum + +#endif /* _MERCURY_BOILERPLATE_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h new file mode 100644 index 0000000..0405481 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _MERCURY_BOILERPLATE_SIGNAL_H +#define _MERCURY_BOILERPLATE_SIGNAL_H + +#include <signal.h> + +#ifndef sigev_notify_thread_id +#define sigev_notify_thread_id _sigev_un._tid +#endif + +/* Generates reserved signal numbers for Boilerplate/Copperplate. */ +#define __SIGRSVD(n) (SIGRTMIN + 8 + (n)) + +#define SIGSUSP __SIGRSVD(0) /* Suspend request */ +#define SIGRESM __SIGRSVD(1) /* Resume request */ +#define SIGRELS __SIGRSVD(2) /* Syscall abort */ +#define SIGRRB __SIGRSVD(3) /* Round-robin event */ +#define SIGAGENT __SIGRSVD(4) /* Request to remote agent */ +#define SIGPERIOD __SIGRSVD(5) /* Periodic signal */ + +/* Generates private signal numbers for clients, up to SIGRTMAX. */ +#define __SIGPRIV(n) __SIGRSVD(8 + (n)) + +#define SIGSAFE_LOCK_ENTRY(__safelock) \ + do { \ + sigset_t __safeset, __oldsafeset; \ + sigemptyset(&__safeset); \ + sigaddset(&__safeset, SIGSUSP); \ + pthread_sigmask(SIG_BLOCK, &__safeset, &__oldsafeset); \ + push_cleanup_lock(__safelock); \ + write_lock(__safelock); + +#define SIGSAFE_LOCK_EXIT(__safelock) \ + write_unlock(__safelock); \ + pop_cleanup_lock(&__safelock); \ + pthread_sigmask(SIG_SETMASK, &__oldsafeset, NULL); \ + } while (0) + +#endif /* _MERCURY_BOILERPLATE_SIGNAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h new file mode 100644 index 0000000..787b088 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _MERCURY_BOILERPLATE_TRACE_H +#define _MERCURY_BOILERPLATE_TRACE_H + +#include <errno.h> + +static inline int xntrace_max_begin(unsigned long v) +{ + return -ENOSYS; +} + +static inline int xntrace_max_end(unsigned long v) +{ + return -ENOSYS; +} + +static inline int xntrace_max_reset(void) +{ + return -ENOSYS; +} + +static inline int xntrace_user_start(void) +{ + return -ENOSYS; +} + +static inline int xntrace_user_stop(unsigned long v) +{ + return -ENOSYS; +} + +static inline int xntrace_user_freeze(unsigned long v, int once) +{ + return -ENOSYS; +} + +static inline void xntrace_latpeak_freeze(int delay) +{ +} + +static inline int xntrace_special(unsigned char id, unsigned long v) +{ + return -ENOSYS; +} + +static inline int xntrace_special_u64(unsigned char id, unsigned long long v) +{ + return -ENOSYS; +} + +#endif /* _MERCURY_BOILERPLATE_TRACE_H */ diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h new file mode 100644 index 0000000..a62ccb7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _MERCURY_BOILERPLATE_WRAPPERS_H +#define _MERCURY_BOILERPLATE_WRAPPERS_H + +#define __RT(call) call +#define __STD(call) call + +#endif /* _MERCURY_BOILERPLATE_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/include/mercury/pthread.h b/kernel/xenomai-v3.2.4/include/mercury/pthread.h new file mode 100644 index 0000000..dd94305 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/mercury/pthread.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <pthread.h> + +#include <boilerplate/libc.h> diff --git a/kernel/xenomai-v3.2.4/include/psos/Makefile.am b/kernel/xenomai-v3.2.4/include/psos/Makefile.am new file mode 100644 index 0000000..ee7c5c8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/psos/Makefile.am @@ -0,0 +1,5 @@ +includesubdir = $(includedir)/psos + +includesub_HEADERS = \ + psos.h \ + tunables.h diff --git a/kernel/xenomai-v3.2.4/include/psos/psos.h b/kernel/xenomai-v3.2.4/include/psos/psos.h new file mode 100644 index 0000000..1554244 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/psos/psos.h @@ -0,0 +1,376 @@ +/* + * Copyright (C) 2001-2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a pSOS-like API built upon the copperplate library. + * + * pSOS and pSOS+ are registered trademarks of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_PSOS_PSOS_H +#define _XENOMAI_PSOS_PSOS_H + +#include <sys/types.h> + +#ifndef SUCCESS +#define SUCCESS 0 +#endif + +#define T_NOPREEMPT 0x0001 +#define T_PREEMPT 0x0000 +#define T_TSLICE 0x0002 +#define T_NOTSLICE 0x0000 +#define T_NOASR 0x0004 +#define T_ASR 0x0000 +#define T_SUPV 0x2000 +#define T_USER 0x0000 +#define T_LEVELMASK0 0x0000 +#define T_LEVELMASK1 0x0100 +#define T_LEVELMASK2 0x0200 +#define T_LEVELMASK3 0x0300 +#define T_LEVELMASK4 0x0400 +#define T_LEVELMASK5 0x0500 +#define T_LEVELMASK6 0x0600 +#define T_LEVELMASK7 0x0700 +#define T_NOISR 0x0700 +#define T_ISR 0x0000 +#define T_GLOBAL 0x0001 +#define T_LOCAL 0x0000 +#define T_NOFPU 0x0000 +#define T_FPU 0x0002 + +#define RN_PRIOR 0x0002 +#define RN_FIFO 0x0000 +#define RN_DEL 0x0004 +#define RN_NODEL 0x0000 +#define RN_NOWAIT 0x0001 +#define RN_WAIT 0x0000 + +#define SM_GLOBAL 0x0001 +#define SM_LOCAL 0x0000 +#define SM_PRIOR 0x0002 +#define SM_FIFO 0x0000 +#define SM_NOWAIT 0x0001 +#define SM_WAIT 0x0000 + +#define EV_NOWAIT 0x0001 +#define EV_WAIT 0x0000 +#define EV_ANY 0x0002 +#define EV_ALL 0x0000 + +#define K_GLOBAL 0x0001 +#define K_LOCAL 0x0000 + +#define PT_GLOBAL 0x0001 +#define PT_LOCAL 0x0000 +#define PT_DEL 0x0004 +#define PT_NODEL 0x0000 + +#define Q_GLOBAL 0x0001 +#define Q_LOCAL 0x0000 +#define Q_PRIOR 0x0002 +#define Q_FIFO 0x0000 +#define Q_LIMIT 0x0004 +#define Q_NOLIMIT 0x0000 +#define Q_PRIBUF 0x0008 +#define Q_SYSBUF 0x0000 +#define Q_NOWAIT 0x0001 +#define Q_WAIT 0x0000 + +#define ERR_TIMEOUT 0x01 +#define ERR_SSFN 0x03 +#define ERR_NODENO 0x04 +#define ERR_OBJDEL 0x05 +#define ERR_OBJID 0x06 +#define ERR_OBJTYPE 0x07 +#define ERR_OBJTFULL 0x08 +#define ERR_OBJNF 0x09 + +#define ERR_NOTCB 0x0E +#define ERR_NOSTK 0x0F +#define ERR_TINYSTK 0x10 +#define ERR_PRIOR 0x11 +#define ERR_ACTIVE 0x12 +#define ERR_NACTIVE 0x13 +#define ERR_SUSP 0x14 +#define ERR_NOTSUSP 0x15 +#define ERR_SETPRI 0x16 +#define ERR_REGNUM 0x17 + +#define ERR_RNADDR 0x1B +#define ERR_UNITSIZE 0x1C +#define ERR_TINYUNIT 0x1D +#define ERR_TINYRN 0x1E +#define ERR_SEGINUSE 0x1F +#define ERR_TOOBIG 0x21 +#define ERR_NOSEG 0x22 +#define ERR_NOTINRN 0x23 +#define ERR_SEGADDR 0x24 +#define ERR_SEGFREE 0x25 +#define ERR_RNKILLD 0x26 +#define ERR_TATRNDEL 0x27 + +#define ERR_PTADDR 0x28 +#define ERR_BUFSIZE 0x29 +#define ERR_TINYPT 0x2A +#define ERR_BUFINUSE 0x2B +#define ERR_NOBUF 0x2C +#define ERR_BUFADDR 0x2D +#define ERR_BUFFREE 0x2F + +#define ERR_MSGSIZ 0x31 +#define ERR_BUFSIZ 0x32 +#define ERR_NOQCB 0x33 +#define ERR_NOMGB 0x34 +#define ERR_QFULL 0x35 +#define ERR_QKILLD 0x36 +#define ERR_NOMSG 0x37 +#define ERR_TATQDEL 0x38 +#define ERR_MATQDEL 0x39 +#define ERR_VARQ 0x3A +#define ERR_NOTVARQ 0x3B + +#define ERR_NOEVS 0x3C +#define ERR_NOTINASR 0x3E +#define ERR_NOASR 0x3F + +#define ERR_NOSCB 0x41 +#define ERR_NOSEM 0x42 +#define ERR_SKILLD 0x43 +#define ERR_TATSDEL 0x44 + +#define ERR_NOTIME 0x47 +#define ERR_ILLDATE 0x48 +#define ERR_ILLTIME 0x49 +#define ERR_ILLTICKS 0x4A +#define ERR_NOTIMERS 0x4B +#define ERR_BADTMID 0x4C +#define ERR_TMNOTSET 0x4D +#define ERR_TOOLATE 0x4E + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +u_long ev_receive(u_long events, + u_long flags, + u_long timeout, + u_long *events_r); + +u_long ev_send(u_long tid, + u_long events); + +u_long pt_create(const char *name, + void *paddr, + void *laddr, + u_long psize, + u_long bsize, + u_long flags, + u_long *tid_r, + u_long *nbuf_r); + +u_long pt_delete(u_long tid); + +u_long pt_getbuf(u_long tid, + void **bufaddr); + +u_long pt_ident(const char *name, + u_long node, + u_long *ptid_r); + +u_long pt_retbuf(u_long tid, + void *buf); + +u_long q_broadcast(u_long qid, + u_long msgbuf[4], + u_long *count_r); + +u_long q_create(const char *name, + u_long count, + u_long flags, + u_long *qid_r); + +u_long q_delete(u_long qid); + +u_long q_ident(const char *name, + u_long node, + u_long *qid_r); + +u_long q_receive(u_long qid, + u_long flags, + u_long timeout, + u_long msgbuf[4]); + +u_long q_send(u_long qid, + u_long msgbuf[4]); + +u_long q_urgent(u_long qid, + u_long msgbuf[4]); + +u_long q_vcreate(const char *name, + u_long flags, + u_long count, + u_long maxlen, + u_long *qid_r); + +u_long q_vdelete(u_long qid); + +u_long q_vident(const char *name, + u_long node, + u_long *qid_r); + +u_long q_vreceive(u_long qid, + u_long flags, + u_long timeout, + void *msgbuf, + u_long msglen, + u_long *msglen_r); + +u_long q_vsend(u_long qid, + void *msgbuf, + u_long msglen); + +u_long q_vurgent(u_long qid, + void *msgbuf, + u_long msglen); + +u_long q_vbroadcast(u_long qid, + void *msgbuf, + u_long msglen, + u_long *count_r); + +u_long rn_create(const char *name, + void *saddr, + u_long rnsize, + u_long usize, + u_long flags, + u_long *rnid_r, + u_long *asize_r); + +u_long rn_delete(u_long rnid); + +u_long rn_getseg(u_long rnid, + u_long size, + u_long flags, + u_long timeout, + void **segaddr); + +u_long rn_ident(const char *name, + u_long *rnid_r); + +u_long rn_retseg(u_long rnid, + void *segaddr); + +u_long sm_create(const char *name, + u_long count, + u_long flags, + u_long *smid_r); + +u_long sm_delete(u_long smid); + +u_long sm_ident(const char *name, + u_long node, + u_long *smid_r); + +u_long sm_p(u_long smid, + u_long flags, + u_long timeout); + +u_long sm_v(u_long smid); + +u_long t_create(const char *name, + u_long prio, + u_long sstack, + u_long ustack, + u_long flags, + u_long *tid_r); + +u_long t_delete(u_long tid); + +u_long t_getreg(u_long tid, + u_long regnum, + u_long *regvalue_r); + +u_long t_ident(const char *name, + u_long node, + u_long *tid_r); + +u_long t_mode(u_long mask, + u_long newmask, + u_long *oldmode_r); + +u_long t_resume(u_long tid); + +u_long t_setpri(u_long tid, + u_long newprio, + u_long *oldprio_r); + +u_long t_setreg(u_long tid, + u_long regnum, + u_long regvalue); + +u_long t_start(u_long tid, + u_long mode, + void (*entry)(u_long a0, + u_long a1, + u_long a2, + u_long a3), + u_long args[]); + +u_long t_suspend(u_long tid); + +u_long tm_cancel(u_long tmid); + +u_long tm_evafter(u_long ticks, + u_long events, + u_long *tmid_r); + +u_long tm_evevery(u_long ticks, + u_long events, + u_long *tmid_r); + +u_long tm_evwhen(u_long date, + u_long time, + u_long ticks, + u_long events, + u_long *tmid_r); + +u_long tm_get(u_long *date_r, + u_long *time_r, + u_long *ticks_r); + +u_long tm_set(u_long date, + u_long time, + u_long ticks); + +u_long tm_getm(unsigned long long *ns); + +u_long tm_wkafter(u_long ticks); + +u_long tm_wkwhen(u_long date, + u_long time, + u_long ticks); + +int psos_task_normalize_priority(u_long psos_prio); + +u_long psos_task_denormalize_priority(int core_prio); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* !_XENOMAI_PSOS_PSOS_H */ diff --git a/kernel/xenomai-v3.2.4/include/psos/tunables.h b/kernel/xenomai-v3.2.4/include/psos/tunables.h new file mode 100644 index 0000000..2dbd3f0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/psos/tunables.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2001-2010 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a pSOS-like API built upon the copperplate library. + * + * pSOS and pSOS+ are registered trademarks of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_PSOS_TUNABLES_H +#define _XENOMAI_PSOS_TUNABLES_H + +#include <boilerplate/tunables.h> + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +extern int psos_long_names; + +static inline define_config_tunable(long_names, int, on) +{ + psos_long_names = on; +} + +static inline read_config_tunable(long_names, int) +{ + return psos_long_names; +} + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* !_XENOMAI_PSOS_TUNABLES_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/include/rtdm/Makefile.am new file mode 100644 index 0000000..989c46f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/Makefile.am @@ -0,0 +1,20 @@ +includesubdir = $(includedir)/rtdm + +includesub_HEADERS = rtdm.h + +if XENO_COBALT +includesub_HEADERS += \ + analogy.h \ + autotune.h \ + can.h \ + gpio.h \ + gpiopwm.h \ + ipc.h \ + net.h \ + serial.h \ + spi.h \ + testing.h \ + udd.h +endif + +SUBDIRS = uapi diff --git a/kernel/xenomai-v3.2.4/include/rtdm/analogy.h b/kernel/xenomai-v3.2.4/include/rtdm/analogy.h new file mode 100644 index 0000000..066d05a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/analogy.h @@ -0,0 +1,264 @@ +/** + * @file + * Analogy for Linux, library facilities + * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_ANALOGY_H +#define _RTDM_ANALOGY_H + +#include <stdio.h> +#include <sys/types.h> +#include <rtdm/uapi/analogy.h> + +#include "boilerplate/list.h" + +/*! + @addtogroup analogy_lib_descriptor + @{ + */ + +/*! + * @anchor ANALOGY_xxx_DESC @name ANALOGY_xxx_DESC + * @brief Constants used as argument so as to define the description + * depth to recover + * @{ + */ + +/** + * BSC stands for basic descriptor (device data) + */ +#define A4L_BSC_DESC 0x0 + +/** + * CPLX stands for complex descriptor (subdevice + channel + range + * data) + */ +#define A4L_CPLX_DESC 0x1 + + /*! @} ANALOGY_xxx_DESC */ + +/* --- Descriptor structure --- */ + +/*! + * @brief Structure containing device-information useful to users + * @see a4l_get_desc() + */ + +struct a4l_descriptor { + char board_name[A4L_NAMELEN]; + /**< Board name. */ + char driver_name[A4L_NAMELEN]; + /**< Driver name. */ + int nb_subd; + /**< Subdevices count. */ + int idx_read_subd; + /**< Input subdevice index. */ + int idx_write_subd; + /**< Output subdevice index. */ + int fd; + /**< File descriptor. */ + unsigned int magic; + /**< Opaque field. */ + int sbsize; + /**< Data buffer size. */ + void *sbdata; + /**< Data buffer pointer. */ +}; +typedef struct a4l_descriptor a4l_desc_t; + +/*! @} descriptor_sys */ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef DOXYGEN_CPP + +/* --- Level 0 API (not supposed to be used) --- */ + +int a4l_sys_open(const char *fname); + +int a4l_sys_close(int fd); + +int a4l_sys_read(int fd, void *buf, size_t nbyte); + +int a4l_sys_write(int fd, void *buf, size_t nbyte); + +int a4l_sys_attach(int fd, a4l_lnkdesc_t *arg); + +int a4l_sys_detach(int fd); + +int a4l_sys_bufcfg(int fd, unsigned int idx_subd, unsigned long size); + +int a4l_sys_desc(int fd, a4l_desc_t *dsc, int pass); + +int a4l_sys_devinfo(int fd, a4l_dvinfo_t *info); + +int a4l_sys_subdinfo(int fd, a4l_sbinfo_t *info); + +int a4l_sys_nbchaninfo(int fd, unsigned int idx_subd, unsigned int *nb); + +int a4l_sys_chaninfo(int fd, + unsigned int idx_subd, a4l_chinfo_t *info); + +int a4l_sys_nbrnginfo(int fd, + unsigned int idx_subd, + unsigned int idx_chan, unsigned int *nb); + +int a4l_sys_rnginfo(int fd, + unsigned int idx_subd, + unsigned int idx_chan, a4l_rnginfo_t *info); + +/* --- Level 1 API (supposed to be used) --- */ + +int a4l_get_desc(int fd, a4l_desc_t *dsc, int pass); + +int a4l_open(a4l_desc_t *dsc, const char *fname); + +int a4l_close(a4l_desc_t *dsc); + +int a4l_fill_desc(a4l_desc_t *dsc); + +int a4l_get_subdinfo(a4l_desc_t *dsc, + unsigned int subd, a4l_sbinfo_t **info); + +int a4l_get_chinfo(a4l_desc_t *dsc, + unsigned int subd, + unsigned int chan, a4l_chinfo_t **info); + +#define a4l_get_chan_max(x) (1ULL << (x)->nb_bits) + +#define a4l_is_chan_global(x) ((x)->chan_flags & A4L_CHAN_GLOBAL) + +int a4l_get_rnginfo(a4l_desc_t *dsc, + unsigned int subd, + unsigned int chan, + unsigned int rng, a4l_rnginfo_t **info); + +#define a4l_is_rng_global(x) ((x)->flags & A4L_RNG_GLOBAL) + +int a4l_snd_command(a4l_desc_t *dsc, struct a4l_cmd_desc *cmd); + +int a4l_snd_cancel(a4l_desc_t *dsc, unsigned int idx_subd); + +int a4l_set_bufsize(a4l_desc_t *dsc, + unsigned int idx_subd, unsigned long size); + +int a4l_get_bufsize(a4l_desc_t *dsc, + unsigned int idx_subd, unsigned long *size); + +int a4l_set_wakesize(a4l_desc_t *dsc, unsigned long size); + +int a4l_get_wakesize(a4l_desc_t *dsc, unsigned long *size); + +int a4l_mark_bufrw(a4l_desc_t *dsc, + unsigned int idx_subd, + unsigned long cur, unsigned long *newp); + +int a4l_poll(a4l_desc_t *dsc, + unsigned int idx_subd, unsigned long ms_timeout); + +int a4l_mmap(a4l_desc_t *dsc, + unsigned int idx_subd, unsigned long size, void **ptr); + +int a4l_async_read(a4l_desc_t *dsc, + void *buf, size_t nbyte, unsigned long ms_timeout); + +int a4l_async_write(a4l_desc_t *dsc, + void *buf, size_t nbyte, unsigned long ms_timeout); + +int a4l_snd_insnlist(a4l_desc_t *dsc, a4l_insnlst_t *arg); + +int a4l_snd_insn(a4l_desc_t *dsc, a4l_insn_t *arg); + +/* --- Level 2 API (supposed to be used) --- */ + +int a4l_sync_write(a4l_desc_t *dsc, + unsigned int idx_subd, + unsigned int chan_desc, + unsigned int delay, void *buf, size_t nbyte); + +int a4l_sync_read(a4l_desc_t *dsc, + unsigned int idx_subd, + unsigned int chan_desc, + unsigned int delay, void *buf, size_t nbyte); + +int a4l_config_subd(a4l_desc_t *dsc, + unsigned int idx_subd, unsigned int type, ...); + +int a4l_sync_dio(a4l_desc_t *dsc, + unsigned int idx_subd, void *mask, void *buf); + +int a4l_sizeof_chan(a4l_chinfo_t *chan); + +int a4l_sizeof_subd(a4l_sbinfo_t *subd); + +int a4l_find_range(a4l_desc_t *dsc, + unsigned int idx_subd, + unsigned int idx_chan, + unsigned long unit, + double min, double max, a4l_rnginfo_t **rng); + +int a4l_rawtoul(a4l_chinfo_t *chan, unsigned long *dst, void *src, int cnt); + +int a4l_rawtof(a4l_chinfo_t *chan, + a4l_rnginfo_t *rng, float *dst, void *src, int cnt); + +int a4l_rawtod(a4l_chinfo_t *chan, + a4l_rnginfo_t *rng, double *dst, void *src, int cnt); + +int a4l_ultoraw(a4l_chinfo_t *chan, void *dst, unsigned long *src, int cnt); + +int a4l_ftoraw(a4l_chinfo_t *chan, + a4l_rnginfo_t *rng, void *dst, float *src, int cnt); + +int a4l_dtoraw(a4l_chinfo_t *chan, + a4l_rnginfo_t *rng, void *dst, double *src, int cnt); + +int a4l_read_calibration_file(char *name, struct a4l_calibration_data *data); + +int a4l_get_softcal_converter(struct a4l_polynomial *converter, + int subd, int chan, int range, + struct a4l_calibration_data *data ); + +int a4l_rawtodcal(a4l_chinfo_t *chan, double *dst, void *src, + int cnt, struct a4l_polynomial *converter); +int a4l_dcaltoraw(a4l_chinfo_t * chan, void *dst, double *src, int cnt, + struct a4l_polynomial *converter); + +int a4l_math_polyfit(unsigned order, double *r,double orig, + const unsigned dim, double *x, double *y); + +void a4l_math_mean(double *pmean, double *val, unsigned nr); + +void a4l_math_stddev(double *pstddev, + double mean, double *val, unsigned nr); + +void a4l_math_stddev_of_mean(double *pstddevm, + double mean, double *val, unsigned nr); + + + + +#endif /* !DOXYGEN_CPP */ + +#ifdef __cplusplus +} +#endif + +#endif /* !_RTDM_ANALOGY_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/autotune.h b/kernel/xenomai-v3.2.4/include/rtdm/autotune.h new file mode 100644 index 0000000..6a73cf7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/autotune.h @@ -0,0 +1,26 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_AUTOTUNE_H +#define _RTDM_AUTOTUNE_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/autotune.h> + +#endif /* !_RTDM_AUTOTUNE_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/can.h b/kernel/xenomai-v3.2.4/include/rtdm/can.h new file mode 100644 index 0000000..837692b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/can.h @@ -0,0 +1,239 @@ +/** + * @file + * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * @note Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_CAN_H +#define _RTDM_CAN_H + +#include <net/if.h> +#include <rtdm/rtdm.h> +#include <rtdm/uapi/can.h> + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_can CAN Devices + * + * This is the common interface a RTDM-compliant CAN device has to provide. + * Feel free to report bugs and comments on this profile to the "Socketcan" + * mailing list (Socketcan-core@lists.berlios.de) or directly to the authors + * (wg@grandegger.com or Sebastian.Smolorz@stud.uni-hannover.de). + * + * @b Profile @b Revision: 2 + * @n + * @n + * @par Device Characteristics + * @n + * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n + * @n + * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_CAN @n + * @n + * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_RAW @n + * @n + * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_CAN @n + * @n + * + * @par Supported Operations + * @n + * @b Socket @n + * @coretags{secondary-only} + * @n + * Specific return values: + * - -EPROTONOSUPPORT (Protocol is not supported by the driver. + * See @ref CAN_PROTO "CAN protocols" + * for possible protocols.) + * . + * @n + * @n + * @b Close @n + * Blocking calls to any of the @ref Send or @ref Recv "Receive" functions + * will be unblocked when the socket is closed and return with an error. @n + * @n + * @coretags{secondary-only} + * @n + * Specific return values: none @n + * @n + * @n + * @b IOCTL @n + * @coretags{task-unrestricted}. see @ref CANIOCTLs "below" + * Specific return values: see @ref CANIOCTLs "below" @n + * @n + * @n + * @anchor Bind + * @b Bind @n + * Binds a socket to one or all CAN devices (see struct sockaddr_can). If + * a filter list has been defined with setsockopt (see @ref Sockopts), + * it will be used upon reception of CAN frames to decide whether the + * bound socket will receive a frame. If no filter has been defined, the + * socket will receive @b all CAN frames on the specified interface(s). @n + * @n + * Binding to special interface index @c 0 will make the socket receive + * CAN frames from all CAN interfaces. @n + * @n + * Binding to an interface index is also relevant for the @ref Send functions + * because they will transmit a message over the interface the socket is + * bound to when no socket address is given to them. @n + * @n + * @n + * @coretags{secondary-only} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -ENOMEM (Not enough memory to fulfill the operation) + * - -EINVAL (Invalid address family, or invalid length of address structure) + * - -ENODEV (Invalid CAN interface index) + * - -ENOSPC (No enough space for filter list) + * - -EBADF (Socket is about to be closed) + * - -EAGAIN (Too many receivers. Old binding (if any) is still active. + * Close some sockets and try again.) + * . + * @n + * @n + * @anchor Sockopts + * <b>Setsockopt, Getsockopt</b>@n + * These functions allow to set and get various socket options. Currently, + * only CAN raw sockets are supported. @n + * @n + * Supported Levels and Options: + * @n + * - Level @b SOL_CAN_RAW : CAN RAW protocol (see @ref CAN_RAW) + * - Option @ref CAN_RAW_FILTER : CAN filter list + * - Option @ref CAN_RAW_ERR_FILTER : CAN error mask + * - Option @ref CAN_RAW_LOOPBACK : CAN TX loopback to local sockets + * . + * . + * @n + * @coretags{task-unrestricted} + * Specific return values: see links to options above. @n + * @n + * @n + * @anchor Recv + * <b>Recv, Recvfrom, Recvmsg</b> @n + * These functions receive CAN messages from a socket. Only one + * message per call can be received, so only one buffer with the correct length + * must be passed. For @c SOCK_RAW, this is the size of struct can_frame. @n + * @n + * Unlike a call to one of the @ref Send functions, a Recv function will not + * return with an error if an interface is down (due to bus-off or setting + * of stop mode) or in sleep mode. Moreover, in such a case there may still + * be some CAN messages in the socket buffer which could be read out + * successfully. @n + * @n + * It is possible to receive a high precision timestamp with every CAN + * message. The condition is a former instruction to the socket via + * @ref RTCAN_RTIOC_TAKE_TIMESTAMP. The timestamp will be copied to the + * @c msg_control buffer of <TT>struct msghdr</TT> if it points to a valid + * memory location with size of @ref nanosecs_abs_t. If this + * is a NULL pointer the timestamp will be discarded silently. @n + * @n + * @b Note: A @c msg_controllen of @c 0 upon completion of the function call + * indicates that no timestamp is available for that message. + * @n + * @n + * Supported Flags [in]: + * - MSG_DONTWAIT (By setting this flag the operation will only succeed if + * it would not block, i.e. if there is a message in the + * socket buffer. This flag takes precedence over a timeout + * specified by @ref RTCAN_RTIOC_RCV_TIMEOUT.) + * - MSG_PEEK (Receive a message but leave it in the socket buffer. The + * next receive operation will get that message again.) + * . + * @n + * Supported Flags [out]: none @n + * @n + * @coretags{mode-unrestricted} + * @n + * Specific return values: + * - Non-negative value (Indicating the successful reception of a CAN message. + * For @c SOCK_RAW, this is the size of struct can_frame regardless of + * the actual size of the payload.) + * - -EFAULT (It was not possible to access user space memory area at one + * of the specified addresses.) + * - -EINVAL (Unsupported flag detected, or invalid length of socket address + * buffer, or invalid length of message control buffer) + * - -EMSGSIZE (Zero or more than one iovec buffer passed, or buffer too + * small) + * - -EAGAIN (No data available in non-blocking mode) + * - -EBADF (Socket was closed.) + * - -EINTR (Operation was interrupted explicitly or by signal.) + * - -ETIMEDOUT (Timeout) + * . + * @n + * @n + * @anchor Send + * <b>Send, Sendto, Sendmsg</b> @n + * These functions send out CAN messages. Only one message per call can + * be transmitted, so only one buffer with the correct length must be passed. + * For @c SOCK_RAW, this is the size of struct can_frame. @n + * @n + * The following only applies to @c SOCK_RAW: If a socket address of + * struct sockaddr_can is given, only @c can_ifindex is used. It is also + * possible to omit the socket address. Then the interface the socket is + * bound to will be used for sending messages. @n + * @n + * If an interface goes down (due to bus-off or setting of stop mode) all + * senders that were blocked on this interface will be woken up. @n + * @n + * @n + * Supported Flags: + * - MSG_DONTWAIT (By setting this flag the transmit operation will only + * succeed if it would not block. This flag takes precedence + * over a timeout specified by @ref RTCAN_RTIOC_SND_TIMEOUT.) + * . + * @coretags{mode-unrestricted} + * @n + * Specific return values: + * - Non-negative value equal to given buffer size (Indicating the + * successful completion of the function call. See also note.) + * - -EOPNOTSUPP (MSG_OOB flag is not supported.) + * - -EINVAL (Unsupported flag detected @e or: Invalid length of socket + * address @e or: Invalid address family @e or: Data length code + * of CAN frame not between 0 and 15 @e or: CAN standard frame has + * got an ID not between 0 and 2031) + * - -EMSGSIZE (Zero or more than one buffer passed or invalid size of buffer) + * - -EFAULT (It was not possible to access user space memory area at one + * of the specified addresses.) + * - -ENXIO (Invalid CAN interface index - @c 0 is not allowed here - or + * socket not bound or rather bound to all interfaces.) + * - -ENETDOWN (Controller is bus-off or in stopped state.) + * - -ECOMM (Controller is sleeping) + * - -EAGAIN (Cannot transmit without blocking but a non-blocking + * call was requested.) + * - -EINTR (Operation was interrupted explicitly or by signal) + * - -EBADF (Socket was closed.) + * - -ETIMEDOUT (Timeout) + * . + * @b Note: A successful completion of the function call does not implicate a + * successful transmission of the message. + * + * @{ + * + * @anchor CANutils @name CAN example and utility programs + * @{ + * @example rtcanconfig.c + * @example rtcansend.c + * @example rtcanrecv.c + * @example can-rtt.c + * @} + * + * @} + */ +#endif /* !_RTDM_CAN_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/gpio.h b/kernel/xenomai-v3.2.4/include/rtdm/gpio.h new file mode 100644 index 0000000..c61f229 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/gpio.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_GPIO_H +#define _RTDM_GPIO_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/gpio.h> + +#endif /* !_RTDM_GPIO_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h b/kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h new file mode 100644 index 0000000..28cdfc5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h @@ -0,0 +1,24 @@ +/** + * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_PWM_H +#define _RTDM_PWM_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/gpiopwm.h> + +#endif /* !_RTDM_PWM_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/ipc.h b/kernel/xenomai-v3.2.4/include/rtdm/ipc.h new file mode 100644 index 0000000..4f92d47 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/ipc.h @@ -0,0 +1,26 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_IPC_H +#define _RTDM_IPC_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/ipc.h> + +#endif /* !_RTDM_IPC_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/net.h b/kernel/xenomai-v3.2.4/include/rtdm/net.h new file mode 100644 index 0000000..1a667bd --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/net.h @@ -0,0 +1,38 @@ +/*** + * + * rtdm/net.h + * + * RTnet - real-time networking subsystem + * Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * As a special exception to the GNU General Public license, the RTnet + * project allows you to use this header file in unmodified form to produce + * application programs executing in user-space which use RTnet services by + * normal system calls. The resulting executable will not be covered by the + * GNU General Public License merely as a result of this header file use. + * Instead, this header file use will be considered normal use of RTnet and + * not a "derived work" in the sense of the GNU General Public License. + * + */ + +#ifndef _RTDM_NET_H +#define _RTDM_NET_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/net.h> + +#endif /* !_RTDM_NET_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/rtdm.h b/kernel/xenomai-v3.2.4/include/rtdm/rtdm.h new file mode 100644 index 0000000..01f07fe --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/rtdm.h @@ -0,0 +1,59 @@ +/** + * @file + * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de> + * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_RTDM_H +#define _RTDM_RTDM_H + +#include <linux/types.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <fcntl.h> +#include <stddef.h> +#include <stdint.h> +#include <errno.h> +#include <unistd.h> +#include <boilerplate/wrappers.h> + +/** + * @defgroup rtdm RTDM + * + * The Real-Time Driver Model (RTDM) provides a unified interface to + * both users and developers of real-time device + * drivers. Specifically, it addresses the constraints of mixed + * RT/non-RT systems like Xenomai. RTDM conforms to POSIX + * semantics (IEEE Std 1003.1) where available and applicable. + * + * @b API @b Revision: 8 + */ + +/** + * @ingroup rtdm + * @defgroup rtdm_user_api RTDM User API + * + * Application interface to RTDM services + * + * This is the upper interface of RTDM provided to application + * programs both in kernel and user space. Note that certain functions + * may not be implemented by every device. Refer to the @ref + * rtdm_profiles "Device Profiles" for precise information. + */ + +#include <rtdm/uapi/rtdm.h> + +#endif /* !_RTDM_RTDM_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/serial.h b/kernel/xenomai-v3.2.4/include/rtdm/serial.h new file mode 100644 index 0000000..232e96d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/serial.h @@ -0,0 +1,79 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, serial device profile header + * + * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_SERIAL_H +#define _RTDM_SERIAL_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/serial.h> + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_serial Serial Devices + * + * This is the common interface a RTDM-compliant serial device has to + * provide. Feel free to comment on this profile via the Xenomai + * mailing list <xenomai@xenomai.org> or directly to the author + * <jan.kiszka@web.de>. + * + * @b Profile @b Revision: 3 + * @n + * @n + * @par Device Characteristics + * @ref rtdm_driver_flags "Device Flags": @c RTDM_NAMED_DEVICE, @c RTDM_EXCLUSIVE @n + * @n + * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_SERIAL @n + * @n + * Device Name: @c "/dev/rtdm/rtser<N>", N >= 0 @n + * @n + * + * @par Supported Operations + * @b Open @n + * @coretags{secondary-only} + * Specific return values: none @n + * @n + * @b Close @n + * @coretags{secondary-only} + * Specific return values: none @n + * @n + * @b IOCTL @n + * @coretags{task-unrestricted}. See @ref SERIOCTLs "below" @n + * Specific return values: see @ref SERIOCTLs "below" @n + * @n + * @b Read @n + * @coretags{mode-unrestricted} + * Specific return values: + * - -ETIMEDOUT + * - -EINTR (interrupted explicitly or by signal) + * - -EAGAIN (no data available in non-blocking mode) + * - -EBADF (device has been closed while reading) + * - -EIO (hardware error or broken bit stream) + * . + * @n + * @b Write @n + * @coretags{mode-unrestricted} + * Specific return values: + * - -ETIMEDOUT + * - -EINTR (interrupted explicitly or by signal) + * - -EAGAIN (no data written in non-blocking mode) + * - -EBADF (device has been closed while writing) + */ + +#endif /* !_RTDM_SERIAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/spi.h b/kernel/xenomai-v3.2.4/include/rtdm/spi.h new file mode 100644 index 0000000..339a862 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/spi.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_SPI_H +#define _RTDM_SPI_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/spi.h> + +#endif /* !_RTDM_SPI_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/testing.h b/kernel/xenomai-v3.2.4/include/rtdm/testing.h new file mode 100644 index 0000000..2eb8135 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/testing.h @@ -0,0 +1,59 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, testing device profile header + * + * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_TESTING_H +#define _RTDM_TESTING_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/testing.h> + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_testing Testing Devices + * + * This group of devices is intended to provide in-kernel testing + * results. Feel free to comment on this profile via the Xenomai + * mailing list <xenomai@xenomai.org> or directly to the author + * <jan.kiszka@web.de>. + * + * @b Profile @b Revision: 2 + * @n + * @n + * @par Device Characteristics + * @ref rtdm_driver_flags "Device Flags": @c RTDM_NAMED_DEVICE @n + * @n + * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_TESTING @n + * @n + * + * @par Supported Operations + * @b Open @n + * @coretags{secondary-only} + * Specific return values: none @n + * @n + * @b Close @n + * @coretags{secondary-only} + * Specific return values: none @n + * @n + * @b IOCTL @n + * @coretags{task-unrestricted}. See @ref TSTIOCTLs below @n + * Specific return values: see @ref TSTIOCTLs below @n + */ + +#endif /* _RTDM_TESTING_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am b/kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am new file mode 100644 index 0000000..726eb1c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am @@ -0,0 +1,18 @@ +includesubdir = $(includedir)/rtdm/uapi + +includesub_HEADERS = rtdm.h + +if XENO_COBALT +includesub_HEADERS += \ + analogy.h \ + autotune.h \ + can.h \ + gpio.h \ + gpiopwm.h \ + ipc.h \ + net.h \ + serial.h \ + spi.h \ + testing.h \ + udd.h +endif diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h new file mode 100644 index 0000000..2d53168 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h @@ -0,0 +1,743 @@ +/** + * @file + * Analogy for Linux, UAPI bits + * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UAPI_ANALOGY_H +#define _RTDM_UAPI_ANALOGY_H + +/* --- Misc precompilation constant --- */ +#define A4L_NAMELEN 20 + +#define A4L_INFINITE 0 +#define A4L_NONBLOCK (-1) + +/* --- Common Analogy types --- */ + +typedef unsigned short sampl_t; +typedef unsigned long lsampl_t; + +/* MMAP ioctl argument structure */ +struct a4l_mmap_arg { + unsigned int idx_subd; + unsigned long size; + void *ptr; +}; +typedef struct a4l_mmap_arg a4l_mmap_t; + +/* Constants related with buffer size + (might be used with BUFCFG ioctl) */ +#define A4L_BUF_MAXSIZE 0x1000000 +#define A4L_BUF_DEFSIZE 0x10000 +#define A4L_BUF_DEFMAGIC 0xffaaff55 + +/* BUFCFG ioctl argument structure */ +struct a4l_buffer_config { + /* NOTE: with the last buffer implementation, the field + idx_subd became useless; the buffer are now + per-context. So, the buffer size configuration is specific + to an opened device. There is a little exception: we can + define a default buffer size for a device. + So far, a hack is used to implement the configuration of + the default buffer size */ + unsigned int idx_subd; + unsigned long buf_size; +}; +typedef struct a4l_buffer_config a4l_bufcfg_t; + +/* BUFINFO ioctl argument structure */ +struct a4l_buffer_info { + unsigned int idx_subd; + unsigned long buf_size; + unsigned long rw_count; +}; +typedef struct a4l_buffer_info a4l_bufinfo_t; + +/* BUFCFG2 / BUFINFO2 ioctl argument structure */ +struct a4l_buffer_config2 { + unsigned long wake_count; + unsigned long reserved[3]; +}; +typedef struct a4l_buffer_config2 a4l_bufcfg2_t; + +/* POLL ioctl argument structure */ +struct a4l_poll { + unsigned int idx_subd; + unsigned long arg; +}; +typedef struct a4l_poll a4l_poll_t; + +/* DEVCFG ioctl argument structure */ +struct a4l_link_desc { + unsigned char bname_size; + char *bname; + unsigned int opts_size; + void *opts; +}; +typedef struct a4l_link_desc a4l_lnkdesc_t; + +/* DEVINFO ioctl argument structure */ +struct a4l_dev_info { + char board_name[A4L_NAMELEN]; + char driver_name[A4L_NAMELEN]; + int nb_subd; + int idx_read_subd; + int idx_write_subd; +}; +typedef struct a4l_dev_info a4l_dvinfo_t; + +#define CIO 'd' +#define A4L_DEVCFG _IOW(CIO,0,a4l_lnkdesc_t) +#define A4L_DEVINFO _IOR(CIO,1,a4l_dvinfo_t) +#define A4L_SUBDINFO _IOR(CIO,2,a4l_sbinfo_t) +#define A4L_CHANINFO _IOR(CIO,3,a4l_chinfo_arg_t) +#define A4L_RNGINFO _IOR(CIO,4,a4l_rnginfo_arg_t) +#define A4L_CMD _IOWR(CIO,5,a4l_cmd_t) +#define A4L_CANCEL _IOR(CIO,6,unsigned int) +#define A4L_INSNLIST _IOR(CIO,7,unsigned int) +#define A4L_INSN _IOR(CIO,8,unsigned int) +#define A4L_BUFCFG _IOR(CIO,9,a4l_bufcfg_t) +#define A4L_BUFINFO _IOWR(CIO,10,a4l_bufinfo_t) +#define A4L_POLL _IOR(CIO,11,unsigned int) +#define A4L_MMAP _IOWR(CIO,12,unsigned int) +#define A4L_NBCHANINFO _IOR(CIO,13,a4l_chinfo_arg_t) +#define A4L_NBRNGINFO _IOR(CIO,14,a4l_rnginfo_arg_t) + +/* These IOCTLs are bound to be merged with A4L_BUFCFG and A4L_BUFINFO + at the next major release */ +#define A4L_BUFCFG2 _IOR(CIO,15,a4l_bufcfg_t) +#define A4L_BUFINFO2 _IOWR(CIO,16,a4l_bufcfg_t) + +/*! + * @addtogroup analogy_lib_async1 + * @{ + */ + +/*! + * @anchor ANALOGY_CMD_xxx @name ANALOGY_CMD_xxx + * @brief Common command flags definitions + * @{ + */ + +/** + * Do not execute the command, just check it + */ +#define A4L_CMD_SIMUL 0x1 +/** + * Perform data recovery / transmission in bulk mode + */ +#define A4L_CMD_BULK 0x2 +/** + * Perform a command which will write data to the device + */ +#define A4L_CMD_WRITE 0x4 + + /*! @} ANALOGY_CMD_xxx */ + +/*! + * @anchor TRIG_xxx @name TRIG_xxx + * @brief Command triggers flags definitions + * @{ + */ + +/** + * Never trigger + */ +#define TRIG_NONE 0x00000001 +/** + * Trigger now + N ns + */ +#define TRIG_NOW 0x00000002 +/** + * Trigger on next lower level trig + */ +#define TRIG_FOLLOW 0x00000004 +/** + * Trigger at time N ns + */ +#define TRIG_TIME 0x00000008 +/** + * Trigger at rate N ns + */ +#define TRIG_TIMER 0x00000010 +/** + * Trigger when count reaches N + */ +#define TRIG_COUNT 0x00000020 +/** + * Trigger on external signal N + */ +#define TRIG_EXT 0x00000040 +/** + * Trigger on analogy-internal signal N + */ +#define TRIG_INT 0x00000080 +/** + * Driver defined trigger + */ +#define TRIG_OTHER 0x00000100 +/** + * Wake up on end-of-scan + */ +#define TRIG_WAKE_EOS 0x0020 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_MASK 0x00030000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_NEAREST 0x00000000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_DOWN 0x00010000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_UP 0x00020000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_UP_NEXT 0x00030000 + + /*! @} TRIG_xxx */ + +/*! + * @anchor CHAN_RNG_AREF @name Channel macros + * @brief Specific precompilation macros and constants useful for the + * channels descriptors tab located in the command structure + * @{ + */ + +/** + * Channel indication macro + */ +#define CHAN(a) ((a) & 0xffff) +/** + * Range definition macro + */ +#define RNG(a) (((a) & 0xff) << 16) +/** + * Reference definition macro + */ +#define AREF(a) (((a) & 0x03) << 24) +/** + * Flags definition macro + */ +#define FLAGS(a) ((a) & CR_FLAGS_MASK) +/** + * Channel + range + reference definition macro + */ +#define PACK(a, b, c) (a | RNG(b) | AREF(c)) +/** + * Channel + range + reference + flags definition macro + */ +#define PACK_FLAGS(a, b, c, d) (PACK(a, b, c) | FLAGS(d)) + +/** + * Analog reference is analog ground + */ +#define AREF_GROUND 0x00 +/** + * Analog reference is analog common + */ +#define AREF_COMMON 0x01 +/** + * Analog reference is differential + */ +#define AREF_DIFF 0x02 +/** + * Analog reference is undefined + */ +#define AREF_OTHER 0x03 + + /*! @} CHAN_RNG_AREF */ + +#if !defined(DOXYGEN_CPP) + +#define CR_FLAGS_MASK 0xfc000000 +#define CR_ALT_FILTER (1<<26) +#define CR_DITHER CR_ALT_FILTER +#define CR_DEGLITCH CR_ALT_FILTER +#define CR_ALT_SOURCE (1<<27) +#define CR_EDGE (1<<30) +#define CR_INVERT (1<<31) + +#endif /* !DOXYGEN_CPP */ + +/*! + * @brief Structure describing the asynchronous instruction + * @see a4l_snd_command() + */ + +struct a4l_cmd_desc { + unsigned char idx_subd; + /**< Subdevice to which the command will be applied. */ + + unsigned long flags; + /**< Command flags */ + + /* Command trigger characteristics */ + unsigned int start_src; + /**< Start trigger type */ + unsigned int start_arg; + /**< Start trigger argument */ + unsigned int scan_begin_src; + /**< Scan begin trigger type */ + unsigned int scan_begin_arg; + /**< Scan begin trigger argument */ + unsigned int convert_src; + /**< Convert trigger type */ + unsigned int convert_arg; + /**< Convert trigger argument */ + unsigned int scan_end_src; + /**< Scan end trigger type */ + unsigned int scan_end_arg; + /**< Scan end trigger argument */ + unsigned int stop_src; + /**< Stop trigger type */ + unsigned int stop_arg; + /**< Stop trigger argument */ + + unsigned char nb_chan; + /**< Count of channels related with the command */ + unsigned int *chan_descs; + /**< Tab containing channels descriptors */ + + /* Driver specific fields */ + unsigned int valid_simul_stages; + /** < cmd simulation valid stages (driver dependent) */ + + unsigned int data_len; + /**< Driver specific buffer size */ + sampl_t *data; + /**< Driver specific buffer pointer */ +}; +typedef struct a4l_cmd_desc a4l_cmd_t; + +/*! @} analogy_lib_async1 */ + +/* --- Range section --- */ + +/** Constant for internal use only (must not be used by driver + developer). */ +#define A4L_RNG_FACTOR 1000000 + +/** + * Volt unit range flag + */ +#define A4L_RNG_VOLT_UNIT 0x0 +/** + * MilliAmpere unit range flag + */ +#define A4L_RNG_MAMP_UNIT 0x1 +/** + * No unit range flag + */ +#define A4L_RNG_NO_UNIT 0x2 +/** + * External unit range flag + */ +#define A4L_RNG_EXT_UNIT 0x4 + +/** + * Macro to retrieve the range unit from the range flags + */ +#define A4L_RNG_UNIT(x) (x & (A4L_RNG_VOLT_UNIT | \ + A4L_RNG_MAMP_UNIT | \ + A4L_RNG_NO_UNIT | \ + A4L_RNG_EXT_UNIT)) + +/* --- Subdevice flags desc stuff --- */ + +/* TODO: replace ANALOGY_SUBD_AI with ANALOGY_SUBD_ANALOG + and ANALOGY_SUBD_INPUT */ + +/* Subdevice types masks */ +#define A4L_SUBD_MASK_READ 0x80000000 +#define A4L_SUBD_MASK_WRITE 0x40000000 +#define A4L_SUBD_MASK_SPECIAL 0x20000000 + +/*! + * @addtogroup analogy_subdevice + * @{ + */ + +/*! + * @anchor ANALOGY_SUBD_xxx @name Subdevices types + * @brief Flags to define the subdevice type + * @{ + */ + +/** + * Unused subdevice + */ +#define A4L_SUBD_UNUSED (A4L_SUBD_MASK_SPECIAL|0x1) +/** + * Analog input subdevice + */ +#define A4L_SUBD_AI (A4L_SUBD_MASK_READ|0x2) +/** + * Analog output subdevice + */ +#define A4L_SUBD_AO (A4L_SUBD_MASK_WRITE|0x4) +/** + * Digital input subdevice + */ +#define A4L_SUBD_DI (A4L_SUBD_MASK_READ|0x8) +/** + * Digital output subdevice + */ +#define A4L_SUBD_DO (A4L_SUBD_MASK_WRITE|0x10) +/** + * Digital input/output subdevice + */ +#define A4L_SUBD_DIO (A4L_SUBD_MASK_SPECIAL|0x20) +/** + * Counter subdevice + */ +#define A4L_SUBD_COUNTER (A4L_SUBD_MASK_SPECIAL|0x40) +/** + * Timer subdevice + */ +#define A4L_SUBD_TIMER (A4L_SUBD_MASK_SPECIAL|0x80) +/** + * Memory, EEPROM, DPRAM + */ +#define A4L_SUBD_MEMORY (A4L_SUBD_MASK_SPECIAL|0x100) +/** + * Calibration subdevice DACs + */ +#define A4L_SUBD_CALIB (A4L_SUBD_MASK_SPECIAL|0x200) +/** + * Processor, DSP + */ +#define A4L_SUBD_PROC (A4L_SUBD_MASK_SPECIAL|0x400) +/** + * Serial IO subdevice + */ +#define A4L_SUBD_SERIAL (A4L_SUBD_MASK_SPECIAL|0x800) +/** + * Mask which gathers all the types + */ +#define A4L_SUBD_TYPES (A4L_SUBD_UNUSED | \ + A4L_SUBD_AI | \ + A4L_SUBD_AO | \ + A4L_SUBD_DI | \ + A4L_SUBD_DO | \ + A4L_SUBD_DIO | \ + A4L_SUBD_COUNTER | \ + A4L_SUBD_TIMER | \ + A4L_SUBD_MEMORY | \ + A4L_SUBD_CALIB | \ + A4L_SUBD_PROC | \ + A4L_SUBD_SERIAL) + +/*! @} ANALOGY_SUBD_xxx */ + +/*! + * @anchor ANALOGY_SUBD_FT_xxx @name Subdevice features + * @brief Flags to define the subdevice's capabilities + * @{ + */ + +/* Subdevice capabilities */ +/** + * The subdevice can handle command (i.e it can perform asynchronous + * acquisition) + */ +#define A4L_SUBD_CMD 0x1000 +/** + * The subdevice support mmap operations (technically, any driver can + * do it; however, the developer might want that his driver must be + * accessed through read / write + */ +#define A4L_SUBD_MMAP 0x8000 + +/*! @} ANALOGY_SUBD_FT_xxx */ + +/*! + * @anchor ANALOGY_SUBD_ST_xxx @name Subdevice status + * @brief Flags to define the subdevice's status + * @{ + */ + +/* Subdevice status flag(s) */ +/** + * The subdevice is busy, a synchronous or an asynchronous acquisition + * is occuring + */ +#define A4L_SUBD_BUSY_NR 0 +#define A4L_SUBD_BUSY (1 << A4L_SUBD_BUSY_NR) + +/** + * The subdevice is about to be cleaned in the middle of the detach + * procedure + */ +#define A4L_SUBD_CLEAN_NR 1 +#define A4L_SUBD_CLEAN (1 << A4L_SUBD_CLEAN_NR) + + +/*! @} ANALOGY_SUBD_ST_xxx */ + +/* --- Subdevice related IOCTL arguments structures --- */ + +/* SUDBINFO IOCTL argument */ +struct a4l_subd_info { + unsigned long flags; + unsigned long status; + unsigned char nb_chan; +}; +typedef struct a4l_subd_info a4l_sbinfo_t; + +/* CHANINFO / NBCHANINFO IOCTL arguments */ +struct a4l_chan_info { + unsigned long chan_flags; + unsigned char nb_rng; + unsigned char nb_bits; +}; +typedef struct a4l_chan_info a4l_chinfo_t; + +struct a4l_chinfo_arg { + unsigned int idx_subd; + void *info; +}; +typedef struct a4l_chinfo_arg a4l_chinfo_arg_t; + +/* RNGINFO / NBRNGINFO IOCTL arguments */ +struct a4l_rng_info { + long min; + long max; + unsigned long flags; +}; +typedef struct a4l_rng_info a4l_rnginfo_t; + +struct a4l_rng_info_arg { + unsigned int idx_subd; + unsigned int idx_chan; + void *info; +}; +typedef struct a4l_rng_info_arg a4l_rnginfo_arg_t; + +/*! @} */ + +#define A4L_INSN_MASK_READ 0x8000000 +#define A4L_INSN_MASK_WRITE 0x4000000 +#define A4L_INSN_MASK_SPECIAL 0x2000000 + +/*! + * @addtogroup analogy_lib_sync1 + * @{ + */ + +/*! + * @anchor ANALOGY_INSN_xxx @name Instruction type + * @brief Flags to define the type of instruction + * @{ + */ + +/** + * Read instruction + */ +#define A4L_INSN_READ (0 | A4L_INSN_MASK_READ) +/** + * Write instruction + */ +#define A4L_INSN_WRITE (1 | A4L_INSN_MASK_WRITE) +/** + * "Bits" instruction + */ +#define A4L_INSN_BITS (2 | A4L_INSN_MASK_READ | \ + A4L_INSN_MASK_WRITE) +/** + * Configuration instruction + */ +#define A4L_INSN_CONFIG (3 | A4L_INSN_MASK_READ | \ + A4L_INSN_MASK_WRITE) +/** + * Get time instruction + */ +#define A4L_INSN_GTOD (4 | A4L_INSN_MASK_READ | \ + A4L_INSN_MASK_SPECIAL) +/** + * Wait instruction + */ +#define A4L_INSN_WAIT (5 | A4L_INSN_MASK_WRITE | \ + A4L_INSN_MASK_SPECIAL) +/** + * Trigger instruction (to start asynchronous acquisition) + */ +#define A4L_INSN_INTTRIG (6 | A4L_INSN_MASK_WRITE | \ + A4L_INSN_MASK_SPECIAL) + + /*! @} ANALOGY_INSN_xxx */ + +/** + * Maximal wait duration + */ +#define A4L_INSN_WAIT_MAX 100000 + +/*! + * @anchor INSN_CONFIG_xxx @name Configuration instruction type + * @brief Values to define the type of configuration instruction + * @{ + */ + +#define A4L_INSN_CONFIG_DIO_INPUT 0 +#define A4L_INSN_CONFIG_DIO_OUTPUT 1 +#define A4L_INSN_CONFIG_DIO_OPENDRAIN 2 +#define A4L_INSN_CONFIG_ANALOG_TRIG 16 +#define A4L_INSN_CONFIG_ALT_SOURCE 20 +#define A4L_INSN_CONFIG_DIGITAL_TRIG 21 +#define A4L_INSN_CONFIG_BLOCK_SIZE 22 +#define A4L_INSN_CONFIG_TIMER_1 23 +#define A4L_INSN_CONFIG_FILTER 24 +#define A4L_INSN_CONFIG_CHANGE_NOTIFY 25 +#define A4L_INSN_CONFIG_SERIAL_CLOCK 26 +#define A4L_INSN_CONFIG_BIDIRECTIONAL_DATA 27 +#define A4L_INSN_CONFIG_DIO_QUERY 28 +#define A4L_INSN_CONFIG_PWM_OUTPUT 29 +#define A4L_INSN_CONFIG_GET_PWM_OUTPUT 30 +#define A4L_INSN_CONFIG_ARM 31 +#define A4L_INSN_CONFIG_DISARM 32 +#define A4L_INSN_CONFIG_GET_COUNTER_STATUS 33 +#define A4L_INSN_CONFIG_RESET 34 +#define A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR 1001 /* Use CTR as single pulsegenerator */ +#define A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR 1002 /* Use CTR as pulsetraingenerator */ +#define A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER 1003 /* Use the counter as encoder */ +#define A4L_INSN_CONFIG_SET_GATE_SRC 2001 /* Set gate source */ +#define A4L_INSN_CONFIG_GET_GATE_SRC 2002 /* Get gate source */ +#define A4L_INSN_CONFIG_SET_CLOCK_SRC 2003 /* Set master clock source */ +#define A4L_INSN_CONFIG_GET_CLOCK_SRC 2004 /* Get master clock source */ +#define A4L_INSN_CONFIG_SET_OTHER_SRC 2005 /* Set other source */ +#define A4L_INSN_CONFIG_SET_COUNTER_MODE 4097 +#define A4L_INSN_CONFIG_SET_ROUTING 4099 +#define A4L_INSN_CONFIG_GET_ROUTING 4109 + +/*! @} INSN_CONFIG_xxx */ + +/*! + * @anchor ANALOGY_COUNTER_xxx @name Counter status bits + * @brief Status bits for INSN_CONFIG_GET_COUNTER_STATUS + * @{ + */ + +#define A4L_COUNTER_ARMED 0x1 +#define A4L_COUNTER_COUNTING 0x2 +#define A4L_COUNTER_TERMINAL_COUNT 0x4 + + /*! @} ANALOGY_COUNTER_xxx */ + +/*! + * @anchor ANALOGY_IO_DIRECTION @name IO direction + * @brief Values to define the IO polarity + * @{ + */ + +#define A4L_INPUT 0 +#define A4L_OUTPUT 1 +#define A4L_OPENDRAIN 2 + + /*! @} ANALOGY_IO_DIRECTION */ + + +/*! + * @anchor ANALOGY_EV_xxx @name Events types + * @brief Values to define the Analogy events. They might used to send + * some specific events through the instruction interface. + * @{ + */ + +#define A4L_EV_START 0x00040000 +#define A4L_EV_SCAN_BEGIN 0x00080000 +#define A4L_EV_CONVERT 0x00100000 +#define A4L_EV_SCAN_END 0x00200000 +#define A4L_EV_STOP 0x00400000 + +/*! @} ANALOGY_EV_xxx */ + +/*! + * @brief Structure describing the synchronous instruction + * @see a4l_snd_insn() + */ + +struct a4l_instruction { + unsigned int type; + /**< Instruction type */ + unsigned int idx_subd; + /**< Subdevice to which the instruction will be applied. */ + unsigned int chan_desc; + /**< Channel descriptor */ + unsigned int data_size; + /**< Size of the intruction data */ + void *data; + /**< Instruction data */ +}; +typedef struct a4l_instruction a4l_insn_t; + +/*! + * @brief Structure describing the list of synchronous instructions + * @see a4l_snd_insnlist() + */ + +struct a4l_instruction_list { + unsigned int count; + /**< Instructions count */ + a4l_insn_t *insns; + /**< Tab containing the instructions pointers */ +}; +typedef struct a4l_instruction_list a4l_insnlst_t; + +/*! @} analogy_lib_sync1 */ + +struct a4l_calibration_subdev { + a4l_sbinfo_t *info; + char *name; + int slen; + int idx; +}; + +struct a4l_calibration_subdev_data { + int index; + int channel; + int range; + int expansion; + int nb_coeff; + double *coeff; + +}; + +struct a4l_calibration_data { + char *driver_name; + char *board_name; + int nb_ai; + struct a4l_calibration_subdev_data *ai; + int nb_ao; + struct a4l_calibration_subdev_data *ao; +}; + +struct a4l_polynomial { + int expansion; + int order; + int nb_coeff; + double *coeff; +}; + + +#endif /* _RTDM_UAPI_ANALOGY_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h new file mode 100644 index 0000000..ab6cab1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h @@ -0,0 +1,40 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UAPI_AUTOTUNE_H +#define _RTDM_UAPI_AUTOTUNE_H + +#include <linux/types.h> + +#define RTDM_CLASS_AUTOTUNE RTDM_CLASS_MISC +#define RTDM_SUBCLASS_AUTOTUNE 0 + +struct autotune_setup { + __u32 period; + __u32 quiet; +}; + +#define AUTOTUNE_RTIOC_IRQ _IOW(RTDM_CLASS_AUTOTUNE, 0, struct autotune_setup) +#define AUTOTUNE_RTIOC_KERN _IOW(RTDM_CLASS_AUTOTUNE, 1, struct autotune_setup) +#define AUTOTUNE_RTIOC_USER _IOW(RTDM_CLASS_AUTOTUNE, 2, struct autotune_setup) +#define AUTOTUNE_RTIOC_PULSE _IOW(RTDM_CLASS_AUTOTUNE, 3, __u64) +#define AUTOTUNE_RTIOC_RUN _IOR(RTDM_CLASS_AUTOTUNE, 4, __u32) +#define AUTOTUNE_RTIOC_RESET _IO(RTDM_CLASS_AUTOTUNE, 5) + +#endif /* !_RTDM_UAPI_AUTOTUNE_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h new file mode 100644 index 0000000..8d0d837 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h @@ -0,0 +1,905 @@ +/** + * @file + * Real-Time Driver Model for RT-Socket-CAN, CAN device profile header + * + * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * @note Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * This RTDM CAN device profile header is based on: + * + * include/linux/can.h, include/linux/socket.h, net/can/pf_can.h in + * linux-can.patch, a CAN socket framework for Linux + * + * Copyright (C) 2004, 2005, + * Robert Schwebel, Benedikt Spranger, Marc Kleine-Budde, Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_UAPI_CAN_H +#define _RTDM_UAPI_CAN_H + +/** + * @addtogroup rtdm_can + * @{ + */ + +#define RTCAN_PROFILE_VER 2 + +#ifndef AF_CAN + +/** CAN address family */ +#define AF_CAN 29 + +/** CAN protocol family */ +#define PF_CAN AF_CAN + +#endif + +/** CAN socket levels + * + * Used for @ref Sockopts for the particular protocols. + */ +#define SOL_CAN_RAW 103 + +/** Type of CAN id (see @ref CAN_xxx_MASK and @ref CAN_xxx_FLAG) */ +typedef uint32_t can_id_t; +typedef uint32_t canid_t; + +/** Type of CAN error mask */ +typedef can_id_t can_err_mask_t; + +/*! + * @anchor CAN_xxx_MASK @name CAN ID masks + * Bit masks for masking CAN IDs + * @{ */ + +/** Bit mask for extended CAN IDs */ +#define CAN_EFF_MASK 0x1FFFFFFF + +/** Bit mask for standard CAN IDs */ +#define CAN_SFF_MASK 0x000007FF + +/** @} */ + +/*! + * @anchor CAN_xxx_FLAG @name CAN ID flags + * Flags within a CAN ID indicating special CAN frame attributes + * @{ */ +/** Extended frame */ +#define CAN_EFF_FLAG 0x80000000 +/** Remote transmission frame */ +#define CAN_RTR_FLAG 0x40000000 +/** Error frame (see @ref Errors), not valid in struct can_filter */ +#define CAN_ERR_FLAG 0x20000000 +/** Invert CAN filter definition, only valid in struct can_filter */ +#define CAN_INV_FILTER CAN_ERR_FLAG + +/** @} */ + +/*! + * @anchor CAN_PROTO @name Particular CAN protocols + * Possible protocols for the PF_CAN protocol family + * + * Currently only the RAW protocol is supported. + * @{ */ +/** Raw protocol of @c PF_CAN, applicable to socket type @c SOCK_RAW */ +#define CAN_RAW 1 +/** @} */ + +#define CAN_BAUDRATE_UNKNOWN ((uint32_t)-1) +#define CAN_BAUDRATE_UNCONFIGURED 0 + +/** + * Baudrate definition in bits per second + */ +typedef uint32_t can_baudrate_t; + +/** + * Supported CAN bit-time types + */ +enum CAN_BITTIME_TYPE { + /** Standard bit-time definition according to Bosch */ + CAN_BITTIME_STD, + /** Hardware-specific BTR bit-time definition */ + CAN_BITTIME_BTR +}; + +/** + * See @ref CAN_BITTIME_TYPE + */ +typedef enum CAN_BITTIME_TYPE can_bittime_type_t; + +/** + * Standard bit-time parameters according to Bosch + */ +struct can_bittime_std { + uint32_t brp; /**< Baud rate prescaler */ + uint8_t prop_seg; /**< from 1 to 8 */ + uint8_t phase_seg1; /**< from 1 to 8 */ + uint8_t phase_seg2; /**< from 1 to 8 */ + uint8_t sjw:7; /**< from 1 to 4 */ + uint8_t sam:1; /**< 1 - enable triple sampling */ +}; + +/** + * Hardware-specific BTR bit-times + */ +struct can_bittime_btr { + + uint8_t btr0; /**< Bus timing register 0 */ + uint8_t btr1; /**< Bus timing register 1 */ +}; + +/** + * Custom CAN bit-time definition + */ +struct can_bittime { + /** Type of bit-time definition */ + can_bittime_type_t type; + + union { + /** Standard bit-time */ + struct can_bittime_std std; + /** Hardware-spcific BTR bit-time */ + struct can_bittime_btr btr; + }; +}; + +/*! + * @anchor CAN_MODE @name CAN operation modes + * Modes into which CAN controllers can be set + * @{ */ +enum CAN_MODE { + /*! Set controller in Stop mode (no reception / transmission possible) */ + CAN_MODE_STOP = 0, + + /*! Set controller into normal operation. @n + * Coming from stopped mode or bus off, the controller begins with no + * errors in @ref CAN_STATE_ACTIVE. */ + CAN_MODE_START, + + /*! Set controller into Sleep mode. @n + * This is only possible if the controller is not stopped or bus-off. @n + * Notice that sleep mode will only be entered when there is no bus + * activity. If the controller detects bus activity while "sleeping" + * it will go into operating mode again. @n + * To actively leave sleep mode again trigger @c CAN_MODE_START. */ + CAN_MODE_SLEEP +}; +/** @} */ + +/** See @ref CAN_MODE */ +typedef enum CAN_MODE can_mode_t; + +/*! + * @anchor CAN_CTRLMODE @name CAN controller modes + * Special CAN controllers modes, which can be or'ed together. + * + * @note These modes are hardware-dependent. Please consult the hardware + * manual of the CAN controller for more detailed information. + * + * @{ */ + +/*! Listen-Only mode + * + * In this mode the CAN controller would give no acknowledge to the CAN-bus, + * even if a message is received successfully and messages would not be + * transmitted. This mode might be useful for bus-monitoring, hot-plugging + * or throughput analysis. */ +#define CAN_CTRLMODE_LISTENONLY 0x1 + +/*! Loopback mode + * + * In this mode the CAN controller does an internal loop-back, a message is + * transmitted and simultaneously received. That mode can be used for self + * test operation. */ +#define CAN_CTRLMODE_LOOPBACK 0x2 + +/*! Triple sampling mode + * + * In this mode the CAN controller uses Triple sampling. */ +#define CAN_CTRLMODE_3_SAMPLES 0x4 + +/** @} */ + +/** See @ref CAN_CTRLMODE */ +typedef int can_ctrlmode_t; + +/*! + * @anchor CAN_STATE @name CAN controller states + * States a CAN controller can be in. + * @{ */ +enum CAN_STATE { + /** CAN controller is error active */ + CAN_STATE_ERROR_ACTIVE = 0, + /** CAN controller is active */ + CAN_STATE_ACTIVE = 0, + + /** CAN controller is error active, warning level is reached */ + CAN_STATE_ERROR_WARNING = 1, + /** CAN controller is error active, warning level is reached */ + CAN_STATE_BUS_WARNING = 1, + + /** CAN controller is error passive */ + CAN_STATE_ERROR_PASSIVE = 2, + /** CAN controller is error passive */ + CAN_STATE_BUS_PASSIVE = 2, + + /** CAN controller went into Bus Off */ + CAN_STATE_BUS_OFF, + + /** CAN controller is scanning to get the baudrate */ + CAN_STATE_SCANNING_BAUDRATE, + + /** CAN controller is in stopped mode */ + CAN_STATE_STOPPED, + + /** CAN controller is in Sleep mode */ + CAN_STATE_SLEEPING, +}; +/** @} */ + +/** See @ref CAN_STATE */ +typedef enum CAN_STATE can_state_t; + +#define CAN_STATE_OPERATING(state) ((state) < CAN_STATE_BUS_OFF) + +/** + * Filter for reception of CAN messages. + * + * This filter works as follows: + * A received CAN ID is AND'ed bitwise with @c can_mask and then compared to + * @c can_id. This also includes the @ref CAN_EFF_FLAG and @ref CAN_RTR_FLAG + * of @ref CAN_xxx_FLAG. If this comparison is true, the message will be + * received by the socket. The logic can be inverted with the @c can_id flag + * @ref CAN_INV_FILTER : + * + * @code + * if (can_id & CAN_INV_FILTER) { + * if ((received_can_id & can_mask) != (can_id & ~CAN_INV_FILTER)) + * accept-message; + * } else { + * if ((received_can_id & can_mask) == can_id) + * accept-message; + * } + * @endcode + * + * Multiple filters can be arranged in a filter list and set with + * @ref Sockopts. If one of these filters matches a CAN ID upon reception + * of a CAN frame, this frame is accepted. + * + */ +typedef struct can_filter { + /** CAN ID which must match with incoming IDs after passing the mask. + * The filter logic can be inverted with the flag @ref CAN_INV_FILTER. */ + uint32_t can_id; + + /** Mask which is applied to incoming IDs. See @ref CAN_xxx_MASK + * "CAN ID masks" if exactly one CAN ID should come through. */ + uint32_t can_mask; +} can_filter_t; + +/** + * Socket address structure for the CAN address family + */ +struct sockaddr_can { + /** CAN address family, must be @c AF_CAN */ + sa_family_t can_family; + + /** Interface index of CAN controller. See @ref SIOCGIFINDEX. */ + int can_ifindex; +}; + +/** + * Raw CAN frame + * + * Central structure for receiving and sending CAN frames. + */ +typedef struct can_frame { + /** CAN ID of the frame + * + * See @ref CAN_xxx_FLAG "CAN ID flags" for special bits. + */ + can_id_t can_id; + + /** Size of the payload in bytes */ + uint8_t can_dlc; + + /** Payload data bytes */ + uint8_t data[8] __attribute__ ((aligned(8))); +} can_frame_t; + +/** + * CAN interface request descriptor + * + * Parameter block for submitting CAN control requests. + */ +struct can_ifreq { + union { + char ifrn_name[IFNAMSIZ]; + } ifr_ifrn; + + union { + struct can_bittime bittime; + can_baudrate_t baudrate; + can_ctrlmode_t ctrlmode; + can_mode_t mode; + can_state_t state; + int ifru_ivalue; + } ifr_ifru; +}; + +/*! + * @anchor RTCAN_TIMESTAMPS @name Timestamp switches + * Arguments to pass to @ref RTCAN_RTIOC_TAKE_TIMESTAMP + * @{ */ +#define RTCAN_TAKE_NO_TIMESTAMPS 0 /**< Switch off taking timestamps */ +#define RTCAN_TAKE_TIMESTAMPS 1 /**< Do take timestamps */ +/** @} */ + +#define RTIOC_TYPE_CAN RTDM_CLASS_CAN + +/*! + * @anchor Rawsockopts @name RAW socket options + * Setting and getting CAN RAW socket options. + * @{ */ + +/** + * CAN filter definition + * + * A CAN raw filter list with elements of struct can_filter can be installed + * with @c setsockopt. This list is used upon reception of CAN frames to + * decide whether the bound socket will receive a frame. An empty filter list + * can also be defined using optlen = 0, which is recommanded for write-only + * sockets. + * @n + * If the socket was already bound with @ref Bind, the old filter list + * gets replaced with the new one. Be aware that already received, but + * not read out CAN frames may stay in the socket buffer. + * @n + * @n + * @param [in] level @b SOL_CAN_RAW + * + * @param [in] optname @b CAN_RAW_FILTER + * + * @param [in] optval Pointer to array of struct can_filter. + * + * @param [in] optlen Size of filter list: count * sizeof( struct can_filter). + * @n + * @coretags{task-unrestricted} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -ENOMEM (Not enough memory to fulfill the operation) + * - -EINVAL (Invalid length "optlen") + * - -ENOSPC (No space to store filter list, check RT-Socket-CAN kernel + * parameters) + * . + */ +#define CAN_RAW_FILTER 0x1 + +/** + * CAN error mask + * + * A CAN error mask (see @ref Errors) can be set with @c setsockopt. This + * mask is then used to decide if error frames are delivered to this socket + * in case of error condidtions. The error frames are marked with the + * @ref CAN_ERR_FLAG of @ref CAN_xxx_FLAG and must be handled by the + * application properly. A detailed description of the errors can be + * found in the @c can_id and the @c data fields of struct can_frame + * (see @ref Errors for futher details). + * + * @n + * @param [in] level @b SOL_CAN_RAW + * + * @param [in] optname @b CAN_RAW_ERR_FILTER + * + * @param [in] optval Pointer to error mask of type can_err_mask_t. + * + * @param [in] optlen Size of error mask: sizeof(can_err_mask_t). + * + * @coretags{task-unrestricted} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -EINVAL (Invalid length "optlen") + * . + */ +#define CAN_RAW_ERR_FILTER 0x2 + +/** + * CAN TX loopback + * + * The TX loopback to other local sockets can be selected with this + * @c setsockopt. + * + * @note The TX loopback feature must be enabled in the kernel and then + * the loopback to other local TX sockets is enabled by default. + * + * @n + * @param [in] level @b SOL_CAN_RAW + * + * @param [in] optname @b CAN_RAW_LOOPBACK + * + * @param [in] optval Pointer to integer value. + * + * @param [in] optlen Size of int: sizeof(int). + * + * @coretags{task-unrestricted} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -EINVAL (Invalid length "optlen") + * - -EOPNOTSUPP (not supported, check RT-Socket-CAN kernel parameters). + */ +#define CAN_RAW_LOOPBACK 0x3 + +/** + * CAN receive own messages + * + * Not supported by RT-Socket-CAN, but defined for compatibility with + * Socket-CAN. + */ +#define CAN_RAW_RECV_OWN_MSGS 0x4 + +/** @} */ + +/*! + * @anchor CANIOCTLs @name IOCTLs + * CAN device IOCTLs + * + * @deprecated Passing \c struct \c ifreq as a request descriptor + * for CAN IOCTLs is still accepted for backward compatibility, + * however it is recommended to switch to \c struct \c can_ifreq at + * the first opportunity. + * + * @{ */ + +/** + * Get CAN interface index by name + * + * @param [in,out] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). If + * <TT>ifr_name</TT> holds a valid CAN interface + * name <TT>ifr_ifindex</TT> will be filled with + * the corresponding interface index. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * + * @coretags{task-unrestricted} + */ +#ifdef DOXYGEN_CPP /* For Doxygen only, already defined by kernel headers */ +#define SIOCGIFINDEX defined_by_kernel_header_file +#endif + +/** + * Set baud rate + * + * The baudrate must be specified in bits per second. The driver will + * try to calculate resonable CAN bit-timing parameters. You can use + * @ref SIOCSCANCUSTOMBITTIME to set custom bit-timing. + * + * @param [in] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> must be filled with an instance of + * @ref can_baudrate_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No valid baud rate, see @ref can_baudrate_t. + * - -EDOM : Baud rate not possible. + * - -EAGAIN: Request could not be successully fulfilled. Try again. + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting the baud rate is a configuration task. It should + * be done deliberately or otherwise CAN messages will likely be lost. + */ +#define SIOCSCANBAUDRATE _IOW(RTIOC_TYPE_CAN, 0x01, struct can_ifreq) + +/** + * Get baud rate + * + * @param [in,out] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> will be filled with an instance of + * @ref can_baudrate_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No baud rate was set yet. + * + * @coretags{task-unrestricted} + */ +#define SIOCGCANBAUDRATE _IOWR(RTIOC_TYPE_CAN, 0x02, struct can_ifreq) + +/** + * Set custom bit time parameter + * + * Custem-bit time could be defined in various formats (see + * struct can_bittime). + * + * @param [in] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> must be filled with an instance of + * struct can_bittime. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No valid baud rate, see @ref can_baudrate_t. + * - -EAGAIN: Request could not be successully fulfilled. Try again. + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting the bit-time is a configuration task. It should + * be done deliberately or otherwise CAN messages will likely be lost. + */ +#define SIOCSCANCUSTOMBITTIME _IOW(RTIOC_TYPE_CAN, 0x03, struct can_ifreq) + +/** + * Get custom bit-time parameters + * + * @param [in,out] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> will be filled with an instance of + * struct can_bittime. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No baud rate was set yet. + * + * @coretags{task-unrestricted} + */ +#define SIOCGCANCUSTOMBITTIME _IOWR(RTIOC_TYPE_CAN, 0x04, struct can_ifreq) + +/** + * Set operation mode of CAN controller + * + * See @ref CAN_MODE "CAN controller modes" for available modes. + * + * @param [in] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> must be filled with an instance of + * @ref can_mode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EAGAIN: (@ref CAN_MODE_START, @ref CAN_MODE_STOP) Could not successfully + * set mode, hardware is busy. Try again. + * - -EINVAL: (@ref CAN_MODE_START) Cannot start controller, + * set baud rate first. + * - -ENETDOWN: (@ref CAN_MODE_SLEEP) Cannot go into sleep mode because + controller is stopped or bus off. + * - -EOPNOTSUPP: unknown mode + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting a CAN controller into normal operation after a bus-off can + * take some time (128 occurrences of 11 consecutive recessive bits). + * In such a case, although this IOCTL will return immediately with success + * and @ref SIOCGCANSTATE will report @ref CAN_STATE_ACTIVE, + * bus-off recovery may still be in progress. @n + * If a controller is bus-off, setting it into stop mode will return no error + * but the controller remains bus-off. + */ +#define SIOCSCANMODE _IOW(RTIOC_TYPE_CAN, 0x05, struct can_ifreq) + +/** + * Get current state of CAN controller + * + * States are divided into main states and additional error indicators. A CAN + * controller is always in exactly one main state. CAN bus errors are + * registered by the CAN hardware and collected by the driver. There is one + * error indicator (bit) per error type. If this IOCTL is triggered the error + * types which occured since the last call of this IOCTL are reported and + * thereafter the error indicators are cleared. See also + * @ref CAN_STATE "CAN controller states". + * + * @param [in,out] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> will be filled with an instance of + * @ref can_mode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * + * @coretags{task-unrestricted, might-switch} + */ +#define SIOCGCANSTATE _IOWR(RTIOC_TYPE_CAN, 0x06, struct can_ifreq) + +/** + * Set special controller modes + * + * Various special controller modes could be or'ed together (see + * @ref CAN_CTRLMODE for further information). + * + * @param [in] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> must be filled with an instance of + * @ref can_ctrlmode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No valid baud rate, see @ref can_baudrate_t. + * - -EAGAIN: Request could not be successully fulfilled. Try again. + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting special controller modes is a configuration task. It should + * be done deliberately or otherwise CAN messages will likely be lost. + */ +#define SIOCSCANCTRLMODE _IOW(RTIOC_TYPE_CAN, 0x07, struct can_ifreq) + +/** + * Get special controller modes + * + * + * @param [in] arg Pointer to interface request structure buffer + * (<TT>struct can_ifreq</TT>). + * <TT>ifr_name</TT> must hold a valid CAN interface name, + * <TT>ifr_ifru</TT> must be filled with an instance of + * @ref can_ctrlmode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No baud rate was set yet. + * + * @coretags{task-unrestricted, might-switch} + */ +#define SIOCGCANCTRLMODE _IOWR(RTIOC_TYPE_CAN, 0x08, struct can_ifreq) + +/** + * Enable or disable storing a high precision timestamp upon reception of + * a CAN frame. + * + * A newly created socket takes no timestamps by default. + * + * @param [in] arg int variable, see @ref RTCAN_TIMESTAMPS "Timestamp switches" + * + * @return 0 on success. + * + * @coretags{task-unrestricted} + * + * @note Activating taking timestamps only has an effect on newly received + * CAN messages from the bus. Frames that already are in the socket buffer do + * not have timestamps if it was deactivated before. See @ref Recv "Receive" + * for more details. + */ +#define RTCAN_RTIOC_TAKE_TIMESTAMP _IOW(RTIOC_TYPE_CAN, 0x09, int) + +/** + * Specify a reception timeout for a socket + * + * Defines a timeout for all receive operations via a + * socket which will take effect when one of the @ref Recv "receive functions" + * is called without the @c MSG_DONTWAIT flag set. + * + * The default value for a newly created socket is an infinite timeout. + * + * @note The setting of the timeout value is not done atomically to avoid + * locks. Please set the value before receiving messages from the socket. + * + * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is + * interpreted as relative timeout in nanoseconds in case + * of a positive value. + * See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * + * @coretags{task-unrestricted} + */ +#define RTCAN_RTIOC_RCV_TIMEOUT _IOW(RTIOC_TYPE_CAN, 0x0A, nanosecs_rel_t) + +/** + * Specify a transmission timeout for a socket + * + * Defines a timeout for all send operations via a + * socket which will take effect when one of the @ref Send "send functions" + * is called without the @c MSG_DONTWAIT flag set. + * + * The default value for a newly created socket is an infinite timeout. + * + * @note The setting of the timeout value is not done atomically to avoid + * locks. Please set the value before sending messages to the socket. + * + * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is + * interpreted as relative timeout in nanoseconds in case + * of a positive value. + * See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * + * @coretags{task-unrestricted} + */ +#define RTCAN_RTIOC_SND_TIMEOUT _IOW(RTIOC_TYPE_CAN, 0x0B, nanosecs_rel_t) +/** @} */ + +#define CAN_ERR_DLC 8 /* dlc for error frames */ + +/*! + * @anchor Errors @name Error mask + * Error class (mask) in @c can_id field of struct can_frame to + * be used with @ref CAN_RAW_ERR_FILTER. + * + * @b Note: Error reporting is hardware dependent and most CAN controllers + * report less detailed error conditions than the SJA1000. + * + * @b Note: In case of a bus-off error condition (@ref CAN_ERR_BUSOFF), the + * CAN controller is @b not restarted automatically. It is the application's + * responsibility to react appropriately, e.g. calling @ref CAN_MODE_START. + * + * @b Note: Bus error interrupts (@ref CAN_ERR_BUSERROR) are enabled when an + * application is calling a @ref Recv function on a socket listening + * on bus errors (using @ref CAN_RAW_ERR_FILTER). After one bus error has + * occured, the interrupt will be disabled to allow the application time for + * error processing and to efficiently avoid bus error interrupt flooding. + * @{ */ + +/** TX timeout (netdevice driver) */ +#define CAN_ERR_TX_TIMEOUT 0x00000001U + +/** Lost arbitration (see @ref Error0 "data[0]") */ +#define CAN_ERR_LOSTARB 0x00000002U + +/** Controller problems (see @ref Error1 "data[1]") */ +#define CAN_ERR_CRTL 0x00000004U + +/** Protocol violations (see @ref Error2 "data[2]", + @ref Error3 "data[3]") */ +#define CAN_ERR_PROT 0x00000008U + +/** Transceiver status (see @ref Error4 "data[4]") */ +#define CAN_ERR_TRX 0x00000010U + +/** Received no ACK on transmission */ +#define CAN_ERR_ACK 0x00000020U + +/** Bus off */ +#define CAN_ERR_BUSOFF 0x00000040U + +/** Bus error (may flood!) */ +#define CAN_ERR_BUSERROR 0x00000080U + +/** Controller restarted */ +#define CAN_ERR_RESTARTED 0x00000100U + +/** Omit EFF, RTR, ERR flags */ +#define CAN_ERR_MASK 0x1FFFFFFFU + +/** @} */ + +/*! + * @anchor Error0 @name Arbitration lost error + * Error in the data[0] field of struct can_frame. + * @{ */ +/* arbitration lost in bit ... / data[0] */ +#define CAN_ERR_LOSTARB_UNSPEC 0x00 /**< unspecified */ + /**< else bit number in bitstream */ +/** @} */ + +/*! + * @anchor Error1 @name Controller problems + * Error in the data[1] field of struct can_frame. + * @{ */ +/* error status of CAN-controller / data[1] */ +#define CAN_ERR_CRTL_UNSPEC 0x00 /**< unspecified */ +#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /**< RX buffer overflow */ +#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /**< TX buffer overflow */ +#define CAN_ERR_CRTL_RX_WARNING 0x04 /**< reached warning level for RX errors */ +#define CAN_ERR_CRTL_TX_WARNING 0x08 /**< reached warning level for TX errors */ +#define CAN_ERR_CRTL_RX_PASSIVE 0x10 /**< reached passive level for RX errors */ +#define CAN_ERR_CRTL_TX_PASSIVE 0x20 /**< reached passive level for TX errors */ +/** @} */ + +/*! + * @anchor Error2 @name Protocol error type + * Error in the data[2] field of struct can_frame. + * @{ */ +/* error in CAN protocol (type) / data[2] */ +#define CAN_ERR_PROT_UNSPEC 0x00 /**< unspecified */ +#define CAN_ERR_PROT_BIT 0x01 /**< single bit error */ +#define CAN_ERR_PROT_FORM 0x02 /**< frame format error */ +#define CAN_ERR_PROT_STUFF 0x04 /**< bit stuffing error */ +#define CAN_ERR_PROT_BIT0 0x08 /**< unable to send dominant bit */ +#define CAN_ERR_PROT_BIT1 0x10 /**< unable to send recessive bit */ +#define CAN_ERR_PROT_OVERLOAD 0x20 /**< bus overload */ +#define CAN_ERR_PROT_ACTIVE 0x40 /**< active error announcement */ +#define CAN_ERR_PROT_TX 0x80 /**< error occured on transmission */ +/** @} */ + +/*! + * @anchor Error3 @name Protocol error location + * Error in the data[3] field of struct can_frame. + * @{ */ +/* error in CAN protocol (location) / data[3] */ +#define CAN_ERR_PROT_LOC_UNSPEC 0x00 /**< unspecified */ +#define CAN_ERR_PROT_LOC_SOF 0x03 /**< start of frame */ +#define CAN_ERR_PROT_LOC_ID28_21 0x02 /**< ID bits 28 - 21 (SFF: 10 - 3) */ +#define CAN_ERR_PROT_LOC_ID20_18 0x06 /**< ID bits 20 - 18 (SFF: 2 - 0 )*/ +#define CAN_ERR_PROT_LOC_SRTR 0x04 /**< substitute RTR (SFF: RTR) */ +#define CAN_ERR_PROT_LOC_IDE 0x05 /**< identifier extension */ +#define CAN_ERR_PROT_LOC_ID17_13 0x07 /**< ID bits 17-13 */ +#define CAN_ERR_PROT_LOC_ID12_05 0x0F /**< ID bits 12-5 */ +#define CAN_ERR_PROT_LOC_ID04_00 0x0E /**< ID bits 4-0 */ +#define CAN_ERR_PROT_LOC_RTR 0x0C /**< RTR */ +#define CAN_ERR_PROT_LOC_RES1 0x0D /**< reserved bit 1 */ +#define CAN_ERR_PROT_LOC_RES0 0x09 /**< reserved bit 0 */ +#define CAN_ERR_PROT_LOC_DLC 0x0B /**< data length code */ +#define CAN_ERR_PROT_LOC_DATA 0x0A /**< data section */ +#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /**< CRC sequence */ +#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /**< CRC delimiter */ +#define CAN_ERR_PROT_LOC_ACK 0x19 /**< ACK slot */ +#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /**< ACK delimiter */ +#define CAN_ERR_PROT_LOC_EOF 0x1A /**< end of frame */ +#define CAN_ERR_PROT_LOC_INTERM 0x12 /**< intermission */ +/** @} */ + +/*! + * @anchor Error4 @name Protocol error location + * Error in the data[4] field of struct can_frame. + * @{ */ +/* error status of CAN-transceiver / data[4] */ +/* CANH CANL */ +#define CAN_ERR_TRX_UNSPEC 0x00 /**< 0000 0000 */ +#define CAN_ERR_TRX_CANH_NO_WIRE 0x04 /**< 0000 0100 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_BAT 0x05 /**< 0000 0101 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_VCC 0x06 /**< 0000 0110 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_GND 0x07 /**< 0000 0111 */ +#define CAN_ERR_TRX_CANL_NO_WIRE 0x40 /**< 0100 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_BAT 0x50 /**< 0101 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_VCC 0x60 /**< 0110 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /**< 0111 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /**< 1000 0000 */ +/** @} */ + +/** @} */ + +#endif /* !_RTDM_UAPI_CAN_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h new file mode 100644 index 0000000..82612a5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h @@ -0,0 +1,43 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_UAPI_GPIO_H +#define _RTDM_UAPI_GPIO_H + +struct rtdm_gpio_readout { + nanosecs_abs_t timestamp; + __s32 value; +}; + +#define GPIO_RTIOC_DIR_OUT _IOW(RTDM_CLASS_GPIO, 0, int) +#define GPIO_RTIOC_DIR_IN _IO(RTDM_CLASS_GPIO, 1) +#define GPIO_RTIOC_IRQEN _IOW(RTDM_CLASS_GPIO, 2, int) /* GPIO trigger */ +#define GPIO_RTIOC_IRQDIS _IO(RTDM_CLASS_GPIO, 3) +#define GPIO_RTIOC_REQS _IO(RTDM_CLASS_GPIO, 4) +#define GPIO_RTIOC_RELS _IO(RTDM_CLASS_GPIO, 5) +#define GPIO_RTIOC_TS_MONO _IOR(RTDM_CLASS_GPIO, 7, int) +#define GPIO_RTIOC_TS_REAL _IOR(RTDM_CLASS_GPIO, 8, int) +#define GPIO_RTIOC_TS GPIO_RTIOC_TS_REAL + +#define GPIO_TRIGGER_NONE 0x0 /* unspecified */ +#define GPIO_TRIGGER_EDGE_RISING 0x1 +#define GPIO_TRIGGER_EDGE_FALLING 0x2 +#define GPIO_TRIGGER_LEVEL_HIGH 0x4 +#define GPIO_TRIGGER_LEVEL_LOW 0x8 +#define GPIO_TRIGGER_MASK 0xf + +#endif /* !_RTDM_UAPI_GPIO_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h new file mode 100644 index 0000000..512c89c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h @@ -0,0 +1,56 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, pwm header + * + * @note Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * @ingroup rttesting + */ +#ifndef _RTDM_UAPI_PWM_H +#define _RTDM_UAPI_PWM_H + +#include <linux/types.h> + +#define RTPWM_PROFILE_VER 1 + +struct gpiopwm { + unsigned int duty_cycle; + unsigned int range_min; + unsigned int range_max; + unsigned int period; + unsigned int gpio; +}; + +#define RTIOC_TYPE_PWM RTDM_CLASS_PWM + +#define GPIOPWM_RTIOC_SET_CONFIG \ + _IOW(RTIOC_TYPE_PWM, 0x00, struct gpiopwm) + +#define GPIOPWM_RTIOC_GET_CONFIG \ + _IOR(RTIOC_TYPE_PWM, 0x10, struct gpiopwm) + +#define GPIOPWM_RTIOC_START \ + _IO(RTIOC_TYPE_PWM, 0x20) + +#define GPIOPWM_RTIOC_STOP \ + _IO(RTIOC_TYPE_PWM, 0x30) + +#define GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE \ + _IOW(RTIOC_TYPE_PWM, 0x40, unsigned int) + + +#endif /* !_RTDM_UAPI_TESTING_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h new file mode 100644 index 0000000..432cd9b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h @@ -0,0 +1,881 @@ +/** + * @file + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _RTDM_UAPI_IPC_H +#define _RTDM_UAPI_IPC_H + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_ipc Real-time IPC + * + * @b Profile @b Revision: 1 + * @n + * @n + * @par Device Characteristics + * @n + * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n + * @n + * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_RTIPC @n + * @n + * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_DGRAM @n + * @n + * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_RTIPC @n + * @n + * @{ + * + * @anchor rtipc_operations @name Supported operations + * Standard socket operations supported by the RTIPC protocols. + * @{ + */ + +/** Create an endpoint for communication in the AF_RTIPC domain. + * + * @param[in] domain The communication domain. Must be AF_RTIPC. + * + * @param[in] type The socket type. Must be SOCK_DGRAM. + * + * @param [in] protocol Any of @ref IPCPROTO_XDDP, @ref IPCPROTO_IDDP, + * or @ref IPCPROTO_BUFP. @ref IPCPROTO_IPC is also valid, and refers + * to the default RTIPC protocol, namely @ref IPCPROTO_IDDP. + * + * @return In addition to the standard error codes for @c socket(2), + * the following specific error code may be returned: + * - -ENOPROTOOPT (Protocol is known, but not compiled in the RTIPC driver). + * See @ref RTIPC_PROTO "RTIPC protocols" + * for available protocols. + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int socket__AF_RTIPC(int domain =AF_RTIPC, int type =SOCK_DGRAM, int protocol); +#endif + +/** + * Close a RTIPC socket descriptor. + * + * Blocking calls to any of the @ref sendmsg__AF_RTIPC "sendmsg" or @ref + * recvmsg__AF_RTIPC "recvmsg" functions will be unblocked when the socket + * is closed and return with an error. + * + * @param[in] sockfd The socket descriptor to close. + * + * @return In addition to the standard error codes for @c close(2), + * the following specific error code may be returned: + * none + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int close__AF_RTIPC(int sockfd); +#endif + +/** + * Bind a RTIPC socket to a port. + * + * Bind the socket to a destination port. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param [in] addr The address to bind the socket to (see struct + * sockaddr_ipc). The meaning of such address depends on the RTIPC + * protocol in use for the socket: + * + * - IPCPROTO_XDDP + * + * This action creates an endpoint for channelling traffic between + * the Xenomai and Linux domains. + * + * @em sipc_family must be AF_RTIPC, @em sipc_port is either -1, + * or a valid free port number between 0 and + * CONFIG_XENO_OPT_PIPE_NRDEV-1. + * + * If @em sipc_port is -1, a free port will be assigned automatically. + * + * Upon success, the pseudo-device /dev/rtp@em N will be reserved + * for this communication channel, where @em N is the assigned port + * number. The non real-time side shall open this device to exchange + * data over the bound socket. + * + * @anchor xddp_label_binding + * If a label was assigned (see @ref XDDP_LABEL) prior to + * binding the socket to a port, a registry link referring to the + * created pseudo-device will be automatically set up as + * @c /proc/xenomai/registry/rtipc/xddp/@em label, where @em label is the + * label string passed to setsockopt() for the @ref XDDP_LABEL option. + * + * - IPCPROTO_IDDP + * + * This action creates an endpoint for exchanging datagrams within + * the Xenomai domain. + * + * @em sipc_family must be AF_RTIPC, @em sipc_port is either -1, + * or a valid free port number between 0 and + * CONFIG_XENO_OPT_IDDP_NRPORT-1. + * + * If @em sipc_port is -1, a free port will be assigned + * automatically. The real-time peer shall connect to the same port + * for exchanging data over the bound socket. + * + * @anchor iddp_label_binding + * If a label was assigned (see @ref IDDP_LABEL) prior to binding + * the socket to a port, a registry link referring to the assigned + * port number will be automatically set up as @c + * /proc/xenomai/registry/rtipc/iddp/@em label, where @em label is + * the label string passed to setsockopt() for the @ref IDDP_LABEL + * option. + * + * - IPCPROTO_BUFP + * + * This action creates an endpoint for a one-way byte + * stream within the Xenomai domain. + * + * @em sipc_family must be AF_RTIPC, @em sipc_port is either -1, + * or a valid free port number between 0 and CONFIG_XENO_OPT_BUFP_NRPORT-1. + * + * If @em sipc_port is -1, an available port will be assigned + * automatically. The real-time peer shall connect to the same port + * for exchanging data over the bound socket. + * + * @anchor bufp_label_binding + * If a label was assigned (see @ref BUFP_LABEL) prior to binding + * the socket to a port, a registry link referring to the assigned + * port number will be automatically set up as @c + * /proc/xenomai/registry/rtipc/bufp/@em label, where @em label is + * the label string passed to setsockopt() for the @a BUFP_LABEL + * option. + * + * @param[in] addrlen The size in bytes of the structure pointed to by + * @a addr. + * + * @return In addition to the standard error codes for @c + * bind(2), the following specific error code may be returned: + * - -EFAULT (Invalid data address given) + * - -ENOMEM (Not enough memory) + * - -EINVAL (Invalid parameter) + * - -EADDRINUSE (Socket already bound to a port, or no port available) + * - -EAGAIN (no registry slot available, check/raise + * CONFIG_XENO_OPT_REGISTRY_NRSLOTS) . + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int bind__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr, + socklen_t addrlen); +#endif + +/** + * Initiate a connection on a RTIPC socket. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param [in] addr The address to connect the socket to (see struct + * sockaddr_ipc). + * + * - If sipc_port is a valid port for the protocol, it is used + * verbatim and the connection succeeds immediately, regardless of + * whether the destination is bound at the time of the call. + * + * - If sipc_port is -1 and a label was assigned to the socket, + * connect() blocks for the requested amount of time (see @ref + * SO_RCVTIMEO) until a socket is bound to the same label via @c + * bind(2) (see @ref XDDP_LABEL, @ref IDDP_LABEL, @ref BUFP_LABEL), in + * which case a connection is established between both endpoints. + * + * - If sipc_port is -1 and no label was assigned to the socket, the + * default destination address is cleared, meaning that any subsequent + * write to the socket will return -EDESTADDRREQ, until a valid + * destination address is set via @c connect(2) or @c bind(2). + * + * @param[in] addrlen The size in bytes of the structure pointed to by + * @a addr. + * + * @return In addition to the standard error codes for @c connect(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int connect__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr, + socklen_t addrlen); +#endif + +/** + * Set options on RTIPC sockets. + * + * These functions allow to set various socket options. + * Supported Levels and Options: + * + * - Level @ref sockopts_socket "SOL_SOCKET" + * - Level @ref sockopts_xddp "SOL_XDDP" + * - Level @ref sockopts_iddp "SOL_IDDP" + * - Level @ref sockopts_bufp "SOL_BUFP" + * . + * + * @return In addition to the standard error codes for @c + * setsockopt(2), the following specific error code may + * be returned: + * follow the option links above. + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int setsockopt__AF_RTIPC(int sockfd, int level, int optname, + const void *optval, socklen_t optlen); +#endif +/** + * Get options on RTIPC sockets. + * + * These functions allow to get various socket options. + * Supported Levels and Options: + * + * - Level @ref sockopts_socket "SOL_SOCKET" + * - Level @ref sockopts_xddp "SOL_XDDP" + * - Level @ref sockopts_iddp "SOL_IDDP" + * - Level @ref sockopts_bufp "SOL_BUFP" + * . + * + * @return In addition to the standard error codes for @c + * getsockopt(2), the following specific error code may + * be returned: + * follow the option links above. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int getsockopt__AF_RTIPC(int sockfd, int level, int optname, + void *optval, socklen_t *optlen); +#endif + +/** + * Send a message on a RTIPC socket. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param[in] msg The address of the message header conveying the + * datagram. + * + * @param [in] flags Operation flags: + * + * - MSG_OOB Send out-of-band message. For all RTIPC protocols except + * @ref IPCPROTO_BUFP, sending out-of-band data actually means + * pushing them to the head of the receiving queue, so that the + * reader will always receive them before normal messages. @ref + * IPCPROTO_BUFP does not support out-of-band sending. + * + * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be + * blocked whenever the message cannot be sent immediately at the + * time of the call (e.g. memory shortage), but will rather return + * with -EWOULDBLOCK. Unlike other RTIPC protocols, @ref + * IPCPROTO_XDDP accepts but never considers MSG_DONTWAIT since + * writing to a real-time XDDP endpoint is inherently a non-blocking + * operation. + * + * - MSG_MORE Accumulate data before sending. This flag is accepted by + * the @ref IPCPROTO_XDDP protocol only, and tells the send service + * to accumulate the outgoing data into an internal streaming + * buffer, instead of issuing a datagram immediately for it. See + * @ref XDDP_BUFSZ for more. + * + * @note No RTIPC protocol allows for short writes, and only complete + * messages are sent to the peer. + * + * @return In addition to the standard error codes for @c sendmsg(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT + */ +#ifdef DOXYGEN_CPP +ssize_t sendmsg__AF_RTIPC(int sockfd, const struct msghdr *msg, int flags); +#endif + +/** + * Receive a message from a RTIPC socket. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param[out] msg The address the message header will be copied at. + * + * @param [in] flags Operation flags: + * + * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be + * blocked whenever no message is immediately available for receipt + * at the time of the call, but will rather return with + * -EWOULDBLOCK. + * + * @note @ref IPCPROTO_BUFP does not allow for short reads and always + * returns the requested amount of bytes, except in one situation: + * whenever some writer is waiting for sending data upon a buffer full + * condition, while the caller would have to wait for receiving a + * complete message. This is usually the sign of a pathological use + * of the BUFP socket, like defining an incorrect buffer size via @ref + * BUFP_BUFSZ. In that case, a short read is allowed to prevent a + * deadlock. + * + * @return In addition to the standard error codes for @c recvmsg(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT + */ +#ifdef DOXYGEN_CPP +ssize_t recvmsg__AF_RTIPC(int sockfd, struct msghdr *msg, int flags); +#endif + +/** + * Get socket name. + * + * The name of the local endpoint for the socket is copied back (see + * struct sockaddr_ipc). + * + * @return In addition to the standard error codes for @c getsockname(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int getsockname__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen); +#endif + +/** + * Get socket peer. + * + * The name of the remote endpoint for the socket is copied back (see + * struct sockaddr_ipc). This is the default destination address for + * messages sent on the socket. It can be set either explicitly via @c + * connect(2), or implicitly via @c bind(2) if no @c connect(2) was + * called prior to binding the socket to a port, in which case both + * the local and remote names are equal. + * + * @return In addition to the standard error codes for @c getpeername(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int getpeername__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen); +#endif + +/** @} */ + +#include <cobalt/uapi/kernel/types.h> +#include <cobalt/uapi/kernel/pipe.h> +#include <rtdm/rtdm.h> + +/* Address family */ +#define AF_RTIPC 111 + +/* Protocol family */ +#define PF_RTIPC AF_RTIPC + +/** + * @anchor RTIPC_PROTO @name RTIPC protocol list + * protocols for the PF_RTIPC protocol family + * + * @{ */ +enum { +/** Default protocol (IDDP) */ + IPCPROTO_IPC = 0, +/** + * Cross-domain datagram protocol (RT <-> non-RT). + * + * Real-time Xenomai threads and regular Linux threads may want to + * exchange data in a way that does not require the former to leave + * the real-time domain (i.e. primary mode). The RTDM-based XDDP + * protocol is available for this purpose. + * + * On the Linux domain side, pseudo-device files named /dev/rtp@em \<minor\> + * give regular POSIX threads access to non real-time communication + * endpoints, via the standard character-based I/O interface. On the + * Xenomai domain side, sockets may be bound to XDDP ports, which act + * as proxies to send and receive data to/from the associated + * pseudo-device files. Ports and pseudo-device minor numbers are + * paired, meaning that e.g. socket port 7 will proxy the traffic to/from + * /dev/rtp7. + * + * All data sent through a bound/connected XDDP socket via @c + * sendto(2) or @c write(2) will be passed to the peer endpoint in the + * Linux domain, and made available for reading via the standard @c + * read(2) system call. Conversely, all data sent using @c write(2) + * through the non real-time endpoint will be conveyed to the + * real-time socket endpoint, and made available to the @c recvfrom(2) + * or @c read(2) system calls. + */ + IPCPROTO_XDDP = 1, +/** + * Intra-domain datagram protocol (RT <-> RT). + * + * The RTDM-based IDDP protocol enables real-time threads to exchange + * datagrams within the Xenomai domain, via socket endpoints. + */ + IPCPROTO_IDDP = 2, +/** + * Buffer protocol (RT <-> RT, byte-oriented). + * + * The RTDM-based BUFP protocol implements a lightweight, + * byte-oriented, one-way Producer-Consumer data path. All messages + * written are buffered into a single memory area in strict FIFO + * order, until read by the consumer. + * + * This protocol always prevents short writes, and only allows short + * reads when a potential deadlock situation arises (i.e. readers and + * writers waiting for each other indefinitely). + */ + IPCPROTO_BUFP = 3, + IPCPROTO_MAX +}; +/** @} */ + +/** + * Port number type for the RTIPC address family. + */ +typedef int16_t rtipc_port_t; + +/** + * Port label information structure. + */ +struct rtipc_port_label { + /** Port label string, null-terminated. */ + char label[XNOBJECT_NAME_LEN]; +}; + +/** + * Socket address structure for the RTIPC address family. + */ +struct sockaddr_ipc { + /** RTIPC address family, must be @c AF_RTIPC */ + sa_family_t sipc_family; + /** Port number. */ + rtipc_port_t sipc_port; +}; + +#define SOL_XDDP 311 +/** + * @anchor sockopts_xddp @name XDDP socket options + * Setting and getting XDDP socket options. + * @{ */ +/** + * XDDP label assignment + * + * ASCII label strings can be attached to XDDP ports, so that opening + * the non-RT endpoint can be done by specifying this symbolic device + * name rather than referring to a raw pseudo-device entry + * (i.e. /dev/rtp@em N). + * + * When available, this label will be registered when binding, in + * addition to the port number (see @ref xddp_label_binding + * "XDDP port binding"). + * + * It is not allowed to assign a label after the socket was + * bound. However, multiple assignment calls are allowed prior to the + * binding; the last label set will be used. + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_LABEL + * @param [in] optval Pointer to struct rtipc_port_label + * @param [in] optlen sizeof(struct rtipc_port_label) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define XDDP_LABEL 1 +/** + * XDDP local pool size configuration + * + * By default, the memory needed to convey the data is pulled from + * Xenomai's system pool. Setting a local pool size overrides this + * default for the socket. + * + * If a non-zero size was configured, a local pool is allocated at + * binding time. This pool will provide storage for pending datagrams. + * + * It is not allowed to configure a local pool size after the socket + * was bound. However, multiple configuration calls are allowed prior + * to the binding; the last value set will be used. + * + * @note: the pool memory is obtained from the host allocator by the + * @ref bind__AF_RTIPC "bind call". + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_POOLSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the local pool to reserve at binding time + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen invalid or *@a optval is zero) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define XDDP_POOLSZ 2 +/** + * XDDP streaming buffer size configuration + * + * In addition to sending datagrams, real-time threads may stream data + * in a byte-oriented mode through the port as well. This increases + * the bandwidth and reduces the overhead, when the overall data to + * send to the Linux domain is collected by bits, and keeping the + * message boundaries is not required. + * + * This feature is enabled when a non-zero buffer size is set for the + * socket. In that case, the real-time data accumulates into the + * streaming buffer when MSG_MORE is passed to any of the @ref + * sendmsg__AF_RTIPC "send functions", until: + * + * - the receiver from the Linux domain wakes up and consumes it, + * - a different source port attempts to send data to the same + * destination port, + * - MSG_MORE is absent from the send flags, + * - the buffer is full, + * . + * whichever comes first. + * + * Setting *@a optval to zero disables the streaming buffer, in which + * case all sendings are conveyed in separate datagrams, regardless of + * MSG_MORE. + * + * @note only a single streaming buffer exists per socket. When this + * buffer is full, the real-time data stops accumulating and sending + * operations resume in mere datagram mode. Accumulation may happen + * again after some or all data in the streaming buffer is consumed + * from the Linux domain endpoint. + * + * The streaming buffer size may be adjusted multiple times during the + * socket lifetime; the latest configuration change will take effect + * when the accumulation resumes after the previous buffer was + * flushed. + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_BUFSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the streaming buffer + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -ENOMEM (Not enough memory) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define XDDP_BUFSZ 3 +/** + * XDDP monitoring callback + * + * Other RTDM drivers may install a user-defined callback via the @ref + * rtdm_setsockopt call from the inter-driver API, in order to collect + * particular events occurring on the channel. + * + * This notification mechanism is particularly useful to monitor a + * channel asynchronously while performing other tasks. + * + * The user-provided routine will be passed the RTDM file descriptor + * of the socket receiving the event, the event code, and an optional + * argument. Four events are currently defined, see @ref XDDP_EVENTS. + * + * The XDDP_EVTIN and XDDP_EVTOUT events are fired on behalf of a + * fully atomic context; therefore, care must be taken to keep their + * overhead low. In those cases, the Xenomai services that may be + * called from the callback are restricted to the set allowed to a + * real-time interrupt handler. + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_MONITOR + * @param [in] optval Pointer to a pointer to function of type int + * (*)(int fd, int event, long arg), containing the address of the + * user-defined callback.Passing a NULL callback pointer + * in @a optval disables monitoring. + * @param [in] optlen sizeof(int (*)(int fd, int event, long arg)) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EPERM (Operation not allowed from user-space) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT, kernel space only + */ +#define XDDP_MONITOR 4 +/** @} */ + +/** + * @anchor XDDP_EVENTS @name XDDP events + * Specific events occurring on XDDP channels, which can be monitored + * via the @ref XDDP_MONITOR socket option. + * + * @{ */ +/** + * @ref XDDP_MONITOR "Monitor" writes to the non real-time endpoint. + * + * XDDP_EVTIN is sent when data is written to the non real-time + * endpoint the socket is bound to (i.e. via /dev/rtp@em N), which + * means that some input is pending for the real-time endpoint. The + * argument is the size of the incoming message. + */ +#define XDDP_EVTIN 1 +/** + * @ref XDDP_MONITOR "Monitor" reads from the non real-time endpoint. + * + * XDDP_EVTOUT is sent when the non real-time endpoint successfully + * reads a complete message (i.e. via /dev/rtp@em N). The argument is + * the size of the outgoing message. + */ +#define XDDP_EVTOUT 2 +/** + * @ref XDDP_MONITOR "Monitor" close from the non real-time endpoint. + * + * XDDP_EVTDOWN is sent when the non real-time endpoint is closed. The + * argument is always 0. + */ +#define XDDP_EVTDOWN 3 +/** + * @ref XDDP_MONITOR "Monitor" memory shortage for non real-time + * datagrams. + * + * XDDP_EVTNOBUF is sent when no memory is available from the pool to + * hold the message currently sent from the non real-time + * endpoint. The argument is the size of the failed allocation. Upon + * return from the callback, the caller will block and retry until + * enough space is available from the pool; during that process, the + * callback might be invoked multiple times, each time a new attempt + * to get the required memory fails. + */ +#define XDDP_EVTNOBUF 4 +/** @} */ + +#define SOL_IDDP 312 +/** + * @anchor sockopts_iddp @name IDDP socket options + * Setting and getting IDDP socket options. + * @{ */ +/** + * IDDP label assignment + * + * ASCII label strings can be attached to IDDP ports, in order to + * connect sockets to them in a more descriptive way than using plain + * numeric port values. + * + * When available, this label will be registered when binding, in + * addition to the port number (see @ref iddp_label_binding + * "IDDP port binding"). + * + * It is not allowed to assign a label after the socket was + * bound. However, multiple assignment calls are allowed prior to the + * binding; the last label set will be used. + * + * @param [in] level @ref sockopts_iddp "SOL_IDDP" + * @param [in] optname @b IDDP_LABEL + * @param [in] optval Pointer to struct rtipc_port_label + * @param [in] optlen sizeof(struct rtipc_port_label) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define IDDP_LABEL 1 +/** + * IDDP local pool size configuration + * + * By default, the memory needed to convey the data is pulled from + * Xenomai's system pool. Setting a local pool size overrides this + * default for the socket. + * + * If a non-zero size was configured, a local pool is allocated at + * binding time. This pool will provide storage for pending datagrams. + * + * It is not allowed to configure a local pool size after the socket + * was bound. However, multiple configuration calls are allowed prior + * to the binding; the last value set will be used. + * + * @note: the pool memory is obtained from the host allocator by the + * @ref bind__AF_RTIPC "bind call". + * + * @param [in] level @ref sockopts_iddp "SOL_IDDP" + * @param [in] optname @b IDDP_POOLSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the local pool to reserve at binding time + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid or *@a optval is zero) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define IDDP_POOLSZ 2 +/** @} */ + +#define SOL_BUFP 313 +/** + * @anchor sockopts_bufp @name BUFP socket options + * Setting and getting BUFP socket options. + * @{ */ +/** + * BUFP label assignment + * + * ASCII label strings can be attached to BUFP ports, in order to + * connect sockets to them in a more descriptive way than using plain + * numeric port values. + * + * When available, this label will be registered when binding, in + * addition to the port number (see @ref bufp_label_binding + * "BUFP port binding"). + * + * It is not allowed to assign a label after the socket was + * bound. However, multiple assignment calls are allowed prior to the + * binding; the last label set will be used. + * + * @param [in] level @ref sockopts_bufp "SOL_BUFP" + * @param [in] optname @b BUFP_LABEL + * @param [in] optval Pointer to struct rtipc_port_label + * @param [in] optlen sizeof(struct rtipc_port_label) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define BUFP_LABEL 1 +/** + * BUFP buffer size configuration + * + * All messages written to a BUFP socket are buffered in a single + * per-socket memory area. Configuring the size of such buffer prior + * to binding the socket to a destination port is mandatory. + * + * It is not allowed to configure a buffer size after the socket was + * bound. However, multiple configuration calls are allowed prior to + * the binding; the last value set will be used. + * + * @note: the buffer memory is obtained from the host allocator by the + * @ref bind__AF_RTIPC "bind call". + * + * @param [in] level @ref sockopts_bufp "SOL_BUFP" + * @param [in] optname @b BUFP_BUFSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the buffer to reserve at binding time + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid or *@a optval is zero) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define BUFP_BUFSZ 2 +/** @} */ + +/** + * @anchor sockopts_socket @name Socket level options + * Setting and getting supported standard socket level options. + * @{ */ +/** + * + * @ref IPCPROTO_IDDP and @ref IPCPROTO_BUFP protocols support the + * standard SO_SNDTIMEO socket option, from the @c SOL_SOCKET level. + * + * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399/ + */ +#ifdef DOXYGEN_CPP +#define SO_SNDTIMEO defined_by_kernel_header_file +#endif +/** + * + * All RTIPC protocols support the standard SO_RCVTIMEO socket option, + * from the @c SOL_SOCKET level. + * + * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399/ + */ +#ifdef DOXYGEN_CPP +#define SO_RCVTIMEO defined_by_kernel_header_file +#endif +/** @} */ + +/** + * @anchor rtdm_ipc_examples @name RTIPC examples + * @{ */ +/** @example bufp-readwrite.c */ +/** @example bufp-label.c */ +/** @example iddp-label.c */ +/** @example iddp-sendrecv.c */ +/** @example xddp-echo.c */ +/** @example xddp-label.c */ +/** @example xddp-stream.c */ +/** @} */ + +/** @} */ + +#endif /* !_RTDM_UAPI_IPC_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h new file mode 100644 index 0000000..65a0e79 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h @@ -0,0 +1,75 @@ +/*** + * + * RTnet - real-time networking subsystem + * Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * As a special exception to the GNU General Public license, the RTnet + * project allows you to use this header file in unmodified form to produce + * application programs executing in user-space which use RTnet services by + * normal system calls. The resulting executable will not be covered by the + * GNU General Public License merely as a result of this header file use. + * Instead, this header file use will be considered normal use of RTnet and + * not a "derived work" in the sense of the GNU General Public License. + * + * This exception does not apply when the application code is built as a + * static or dynamically loadable portion of the Linux kernel nor does the + * exception override other reasons justifying application of the GNU General + * Public License. + * + * This exception applies only to the code released by the RTnet project + * under the name RTnet and bearing this exception notice. If you copy code + * from other sources into a copy of RTnet, the exception does not apply to + * the code that you add in this way. + * + */ + +#ifndef _RTDM_UAPI_NET_H +#define _RTDM_UAPI_NET_H + +/* sub-classes: RTDM_CLASS_NETWORK */ +#define RTDM_SUBCLASS_RTNET 0 + +#define RTIOC_TYPE_NETWORK RTDM_CLASS_NETWORK + +/* RTnet-specific IOCTLs */ +#define RTNET_RTIOC_XMITPARAMS _IOW(RTIOC_TYPE_NETWORK, 0x10, unsigned int) +#define RTNET_RTIOC_PRIORITY RTNET_RTIOC_XMITPARAMS /* legacy */ +#define RTNET_RTIOC_TIMEOUT _IOW(RTIOC_TYPE_NETWORK, 0x11, int64_t) +/* RTNET_RTIOC_CALLBACK _IOW(RTIOC_TYPE_NETWORK, 0x12, ... + * IOCTL only usable inside the kernel. */ +/* RTNET_RTIOC_NONBLOCK _IOW(RTIOC_TYPE_NETWORK, 0x13, unsigned int) + * This IOCTL is no longer supported (and it was buggy anyway). + * Use RTNET_RTIOC_TIMEOUT with any negative timeout value instead. */ +#define RTNET_RTIOC_EXTPOOL _IOW(RTIOC_TYPE_NETWORK, 0x14, unsigned int) +#define RTNET_RTIOC_SHRPOOL _IOW(RTIOC_TYPE_NETWORK, 0x15, unsigned int) + +/* socket transmission priorities */ +#define SOCK_MAX_PRIO 0 +#define SOCK_DEF_PRIO SOCK_MAX_PRIO + \ + (SOCK_MIN_PRIO-SOCK_MAX_PRIO+1)/2 +#define SOCK_MIN_PRIO SOCK_NRT_PRIO - 1 +#define SOCK_NRT_PRIO 31 + +/* socket transmission channels */ +#define SOCK_DEF_RT_CHANNEL 0 /* default rt xmit channel */ +#define SOCK_DEF_NRT_CHANNEL 1 /* default non-rt xmit channel */ +#define SOCK_USER_CHANNEL 2 /* first user-defined channel */ + +/* argument construction for RTNET_RTIOC_XMITPARAMS */ +#define SOCK_XMIT_PARAMS(priority, channel) ((priority) | ((channel) << 16)) + +#endif /* !_RTDM_UAPI_NET_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h new file mode 100644 index 0000000..80c789a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h @@ -0,0 +1,203 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, user API header. + * + * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de> + * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * @ingroup rtdm_user_api + */ +#ifndef _RTDM_UAPI_RTDM_H +#define _RTDM_UAPI_RTDM_H + +/*! + * @addtogroup rtdm + * @{ + */ + +/*! + * @anchor rtdm_api_versioning @name API Versioning + * @{ */ +/** Common user and driver API version */ +#define RTDM_API_VER 9 + +/** Minimum API revision compatible with the current release */ +#define RTDM_API_MIN_COMPAT_VER 9 +/** @} API Versioning */ + +/** RTDM type for representing absolute dates. Its base type is a 64 bit + * unsigned integer. The unit is 1 nanosecond. */ +typedef uint64_t nanosecs_abs_t; + +/** RTDM type for representing relative intervals. Its base type is a 64 bit + * signed integer. The unit is 1 nanosecond. Relative intervals can also + * encode the special timeouts "infinite" and "non-blocking", see + * @ref RTDM_TIMEOUT_xxx. */ +typedef int64_t nanosecs_rel_t; + +/*! + * @anchor RTDM_TIMEOUT_xxx @name RTDM_TIMEOUT_xxx + * Special timeout values + * @{ */ +/** Block forever. */ +#define RTDM_TIMEOUT_INFINITE 0 + +/** Any negative timeout means non-blocking. */ +#define RTDM_TIMEOUT_NONE (-1) +/** @} RTDM_TIMEOUT_xxx */ +/** @} rtdm */ + +/*! + * @addtogroup rtdm_profiles + * @{ + */ + +/*! + * @anchor RTDM_CLASS_xxx @name RTDM_CLASS_xxx + * Device classes + * @{ */ +#define RTDM_CLASS_PARPORT 1 +#define RTDM_CLASS_SERIAL 2 +#define RTDM_CLASS_CAN 3 +#define RTDM_CLASS_NETWORK 4 +#define RTDM_CLASS_RTMAC 5 +#define RTDM_CLASS_TESTING 6 +#define RTDM_CLASS_RTIPC 7 +#define RTDM_CLASS_COBALT 8 +#define RTDM_CLASS_UDD 9 +#define RTDM_CLASS_MEMORY 10 +#define RTDM_CLASS_GPIO 11 +#define RTDM_CLASS_SPI 12 +#define RTDM_CLASS_PWM 13 + +#define RTDM_CLASS_MISC 223 +#define RTDM_CLASS_EXPERIMENTAL 224 +#define RTDM_CLASS_MAX 255 +/** @} RTDM_CLASS_xxx */ + +#define RTDM_SUBCLASS_GENERIC (-1) + +#define RTIOC_TYPE_COMMON 0 + +/*! + * @anchor device_naming @name Device Naming + * Maximum length of device names (excluding the final null character) + * @{ + */ +#define RTDM_MAX_DEVNAME_LEN 31 +/** @} Device Naming */ + +/** + * Device information + */ +typedef struct rtdm_device_info { + /** Device flags, see @ref dev_flags "Device Flags" for details */ + int device_flags; + + /** Device class ID, see @ref RTDM_CLASS_xxx */ + int device_class; + + /** Device sub-class, either RTDM_SUBCLASS_GENERIC or a + * RTDM_SUBCLASS_xxx definition of the related @ref rtdm_profiles + * "Device Profile" */ + int device_sub_class; + + /** Supported device profile version */ + int profile_version; +} rtdm_device_info_t; + +/*! + * @anchor RTDM_PURGE_xxx_BUFFER @name RTDM_PURGE_xxx_BUFFER + * Flags selecting buffers to be purged + * @{ */ +#define RTDM_PURGE_RX_BUFFER 0x0001 +#define RTDM_PURGE_TX_BUFFER 0x0002 +/** @} RTDM_PURGE_xxx_BUFFER*/ + +/*! + * @anchor common_IOCTLs @name Common IOCTLs + * The following IOCTLs are common to all device rtdm_profiles. + * @{ + */ + +/** + * Retrieve information about a device or socket. + * @param[out] arg Pointer to information buffer (struct rtdm_device_info) + */ +#define RTIOC_DEVICE_INFO \ + _IOR(RTIOC_TYPE_COMMON, 0x00, struct rtdm_device_info) + +/** + * Purge internal device or socket buffers. + * @param[in] arg Purge mask, see @ref RTDM_PURGE_xxx_BUFFER + */ +#define RTIOC_PURGE _IOW(RTIOC_TYPE_COMMON, 0x10, int) +/** @} Common IOCTLs */ +/** @} rtdm */ + +/* Internally used for mapping socket functions on IOCTLs */ +struct _rtdm_getsockopt_args { + int level; + int optname; + void *optval; + socklen_t *optlen; +}; + +struct _rtdm_setsockopt_args { + int level; + int optname; + const void *optval; + socklen_t optlen; +}; + +struct _rtdm_getsockaddr_args { + struct sockaddr *addr; + socklen_t *addrlen; +}; + +struct _rtdm_setsockaddr_args { + const struct sockaddr *addr; + socklen_t addrlen; +}; + +#define _RTIOC_GETSOCKOPT _IOW(RTIOC_TYPE_COMMON, 0x20, \ + struct _rtdm_getsockopt_args) +#define _RTIOC_SETSOCKOPT _IOW(RTIOC_TYPE_COMMON, 0x21, \ + struct _rtdm_setsockopt_args) +#define _RTIOC_BIND _IOW(RTIOC_TYPE_COMMON, 0x22, \ + struct _rtdm_setsockaddr_args) +#define _RTIOC_CONNECT _IOW(RTIOC_TYPE_COMMON, 0x23, \ + struct _rtdm_setsockaddr_args) +#define _RTIOC_LISTEN _IOW(RTIOC_TYPE_COMMON, 0x24, \ + int) +#define _RTIOC_ACCEPT _IOW(RTIOC_TYPE_COMMON, 0x25, \ + struct _rtdm_getsockaddr_args) +#define _RTIOC_GETSOCKNAME _IOW(RTIOC_TYPE_COMMON, 0x26, \ + struct _rtdm_getsockaddr_args) +#define _RTIOC_GETPEERNAME _IOW(RTIOC_TYPE_COMMON, 0x27, \ + struct _rtdm_getsockaddr_args) +#define _RTIOC_SHUTDOWN _IOW(RTIOC_TYPE_COMMON, 0x28, \ + int) + +/* Internally used for mmap() */ +struct _rtdm_mmap_request { + __u64 offset; + size_t length; + int prot; + int flags; +}; + +#endif /* !_RTDM_UAPI_RTDM_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h new file mode 100644 index 0000000..9ac691b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h @@ -0,0 +1,407 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, serial device profile header + * + * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * @ingroup rtserial + */ +#ifndef _RTDM_UAPI_SERIAL_H +#define _RTDM_UAPI_SERIAL_H + +#define RTSER_PROFILE_VER 3 + +/*! + * @anchor RTSER_DEF_BAUD @name RTSER_DEF_BAUD + * Default baud rate + * @{ */ +#define RTSER_DEF_BAUD 9600 +/** @} */ + +/*! + * @anchor RTSER_xxx_PARITY @name RTSER_xxx_PARITY + * Number of parity bits + * @{ */ +#define RTSER_NO_PARITY 0x00 +#define RTSER_ODD_PARITY 0x01 +#define RTSER_EVEN_PARITY 0x03 +#define RTSER_DEF_PARITY RTSER_NO_PARITY +/** @} */ + +/*! + * @anchor RTSER_xxx_BITS @name RTSER_xxx_BITS + * Number of data bits + * @{ */ +#define RTSER_5_BITS 0x00 +#define RTSER_6_BITS 0x01 +#define RTSER_7_BITS 0x02 +#define RTSER_8_BITS 0x03 +#define RTSER_DEF_BITS RTSER_8_BITS +/** @} */ + +/*! + * @anchor RTSER_xxx_STOPB @name RTSER_xxx_STOPB + * Number of stop bits + * @{ */ +#define RTSER_1_STOPB 0x00 +/** valid only in combination with 5 data bits */ +#define RTSER_1_5_STOPB 0x01 +#define RTSER_2_STOPB 0x01 +#define RTSER_DEF_STOPB RTSER_1_STOPB +/** @} */ + +/*! + * @anchor RTSER_xxx_HAND @name RTSER_xxx_HAND + * Handshake mechanisms + * @{ */ +#define RTSER_NO_HAND 0x00 +#define RTSER_RTSCTS_HAND 0x01 +#define RTSER_DEF_HAND RTSER_NO_HAND +/** @} */ + +/*! + * @anchor RTSER_RS485_xxx @name RTSER_RS485_xxx + * RS485 mode with automatic RTS handling + * @{ */ +#define RTSER_RS485_DISABLE 0x00 +#define RTSER_RS485_ENABLE 0x01 +#define RTSER_DEF_RS485 RTSER_RS485_DISABLE +/** @} */ + +/*! + * @anchor RTSER_FIFO_xxx @name RTSER_FIFO_xxx + * Reception FIFO interrupt threshold + * @{ */ +#define RTSER_FIFO_DEPTH_1 0x00 +#define RTSER_FIFO_DEPTH_4 0x40 +#define RTSER_FIFO_DEPTH_8 0x80 +#define RTSER_FIFO_DEPTH_14 0xC0 +#define RTSER_DEF_FIFO_DEPTH RTSER_FIFO_DEPTH_1 +/** @} */ + +/*! + * @anchor RTSER_TIMEOUT_xxx @name RTSER_TIMEOUT_xxx + * Special timeout values, see also @ref RTDM_TIMEOUT_xxx + * @{ */ +#define RTSER_TIMEOUT_INFINITE RTDM_TIMEOUT_INFINITE +#define RTSER_TIMEOUT_NONE RTDM_TIMEOUT_NONE +#define RTSER_DEF_TIMEOUT RTDM_TIMEOUT_INFINITE +/** @} */ + +/*! + * @anchor RTSER_xxx_TIMESTAMP_HISTORY @name RTSER_xxx_TIMESTAMP_HISTORY + * Timestamp history control + * @{ */ +#define RTSER_RX_TIMESTAMP_HISTORY 0x01 +#define RTSER_DEF_TIMESTAMP_HISTORY 0x00 +/** @} */ + +/*! + * @anchor RTSER_EVENT_xxx @name RTSER_EVENT_xxx + * Events bits + * @{ */ +#define RTSER_EVENT_RXPEND 0x01 +#define RTSER_EVENT_ERRPEND 0x02 +#define RTSER_EVENT_MODEMHI 0x04 +#define RTSER_EVENT_MODEMLO 0x08 +#define RTSER_EVENT_TXEMPTY 0x10 +#define RTSER_DEF_EVENT_MASK 0x00 +/** @} */ + + +/*! + * @anchor RTSER_SET_xxx @name RTSER_SET_xxx + * Configuration mask bits + * @{ */ +#define RTSER_SET_BAUD 0x0001 +#define RTSER_SET_PARITY 0x0002 +#define RTSER_SET_DATA_BITS 0x0004 +#define RTSER_SET_STOP_BITS 0x0008 +#define RTSER_SET_HANDSHAKE 0x0010 +#define RTSER_SET_FIFO_DEPTH 0x0020 +#define RTSER_SET_TIMEOUT_RX 0x0100 +#define RTSER_SET_TIMEOUT_TX 0x0200 +#define RTSER_SET_TIMEOUT_EVENT 0x0400 +#define RTSER_SET_TIMESTAMP_HISTORY 0x0800 +#define RTSER_SET_EVENT_MASK 0x1000 +#define RTSER_SET_RS485 0x2000 +/** @} */ + + +/*! + * @anchor RTSER_LSR_xxx @name RTSER_LSR_xxx + * Line status bits + * @{ */ +#define RTSER_LSR_DATA 0x01 +#define RTSER_LSR_OVERRUN_ERR 0x02 +#define RTSER_LSR_PARITY_ERR 0x04 +#define RTSER_LSR_FRAMING_ERR 0x08 +#define RTSER_LSR_BREAK_IND 0x10 +#define RTSER_LSR_THR_EMTPY 0x20 +#define RTSER_LSR_TRANSM_EMPTY 0x40 +#define RTSER_LSR_FIFO_ERR 0x80 +#define RTSER_SOFT_OVERRUN_ERR 0x0100 +/** @} */ + + +/*! + * @anchor RTSER_MSR_xxx @name RTSER_MSR_xxx + * Modem status bits + * @{ */ +#define RTSER_MSR_DCTS 0x01 +#define RTSER_MSR_DDSR 0x02 +#define RTSER_MSR_TERI 0x04 +#define RTSER_MSR_DDCD 0x08 +#define RTSER_MSR_CTS 0x10 +#define RTSER_MSR_DSR 0x20 +#define RTSER_MSR_RI 0x40 +#define RTSER_MSR_DCD 0x80 +/** @} */ + + +/*! + * @anchor RTSER_MCR_xxx @name RTSER_MCR_xxx + * Modem control bits + * @{ */ +#define RTSER_MCR_DTR 0x01 +#define RTSER_MCR_RTS 0x02 +#define RTSER_MCR_OUT1 0x04 +#define RTSER_MCR_OUT2 0x08 +#define RTSER_MCR_LOOP 0x10 +/** @} */ + + +/*! + * @anchor RTSER_BREAK_xxx @name RTSER_BREAK_xxx + * Break control + * @{ */ +#define RTSER_BREAK_CLR 0x00 +#define RTSER_BREAK_SET 0x01 + + +/** + * Serial device configuration + */ +typedef struct rtser_config { + /** mask specifying valid fields, see @ref RTSER_SET_xxx */ + int config_mask; + + /** baud rate, default @ref RTSER_DEF_BAUD */ + int baud_rate; + + /** number of parity bits, see @ref RTSER_xxx_PARITY */ + int parity; + + /** number of data bits, see @ref RTSER_xxx_BITS */ + int data_bits; + + /** number of stop bits, see @ref RTSER_xxx_STOPB */ + int stop_bits; + + /** handshake mechanisms, see @ref RTSER_xxx_HAND */ + int handshake; + + /** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */ + int fifo_depth; + + int reserved; + + /** reception timeout, see @ref RTSER_TIMEOUT_xxx for special + * values */ + nanosecs_rel_t rx_timeout; + + /** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special + * values */ + nanosecs_rel_t tx_timeout; + + /** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */ + nanosecs_rel_t event_timeout; + + /** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */ + int timestamp_history; + + /** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see + * @ref RTSER_EVENT_xxx */ + int event_mask; + + /** enable RS485 mode, see @ref RTSER_RS485_xxx */ + int rs485; +} rtser_config_t; + +/** + * Serial device status + */ +typedef struct rtser_status { + /** line status register, see @ref RTSER_LSR_xxx */ + int line_status; + + /** modem status register, see @ref RTSER_MSR_xxx */ + int modem_status; +} rtser_status_t; + +/** + * Additional information about serial device events + */ +typedef struct rtser_event { + /** signalled events, see @ref RTSER_EVENT_xxx */ + int events; + + /** number of pending input characters */ + int rx_pending; + + /** last interrupt timestamp */ + nanosecs_abs_t last_timestamp; + + /** reception timestamp of oldest character in input queue */ + nanosecs_abs_t rxpend_timestamp; +} rtser_event_t; + + +#define RTIOC_TYPE_SERIAL RTDM_CLASS_SERIAL + + +/*! + * @name Sub-Classes of RTDM_CLASS_SERIAL + * @{ */ +#define RTDM_SUBCLASS_16550A 0 +/** @} */ + + +/*! + * @anchor SERIOCTLs @name IOCTLs + * Serial device IOCTLs + * @{ */ + +/** + * Get serial device configuration + * + * @param[out] arg Pointer to configuration buffer (struct rtser_config) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +#define RTSER_RTIOC_GET_CONFIG \ + _IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config) + +/** + * Set serial device configuration + * + * @param[in] arg Pointer to configuration buffer (struct rtser_config) + * + * @return 0 on success, otherwise: + * + * - -EPERM is returned if the caller's context is invalid, see note below. + * + * - -ENOMEM is returned if a new history buffer for timestamps cannot be + * allocated. + * + * @coretags{task-unrestricted} + * + * @note If rtser_config contains a valid timestamp_history and the + * addressed device has been opened in non-real-time context, this IOCTL must + * be issued in non-real-time context as well. Otherwise, this command will + * fail. + */ +#define RTSER_RTIOC_SET_CONFIG \ + _IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config) + +/** + * Get serial device status + * + * @param[out] arg Pointer to status buffer (struct rtser_status) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + * + * @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR, + * @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have + * occured during previous read accesses to the device will be saved for being + * reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the + * saved state will be cleared. + */ +#define RTSER_RTIOC_GET_STATUS \ + _IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status) + +/** + * Get serial device's modem contol register + * + * @param[out] arg Pointer to variable receiving the content (int, see + * @ref RTSER_MCR_xxx) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +#define RTSER_RTIOC_GET_CONTROL \ + _IOR(RTIOC_TYPE_SERIAL, 0x03, int) + +/** + * Set serial device's modem contol register + * + * @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +#define RTSER_RTIOC_SET_CONTROL \ + _IOW(RTIOC_TYPE_SERIAL, 0x04, int) + +/** + * Wait on serial device events according to previously set mask + * + * @param[out] arg Pointer to event information buffer (struct rtser_event) + * + * @return 0 on success, otherwise: + * + * - -EBUSY is returned if another task is already waiting on events of this + * device. + * + * - -EBADF is returned if the file descriptor is invalid or the device has + * just been closed. + * + * @coretags{mode-unrestricted} + */ +#define RTSER_RTIOC_WAIT_EVENT \ + _IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event) +/** @} */ + +/** + * Set or clear break on UART output line + * + * @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + * + * @note A set break condition may also be cleared on UART line + * reconfiguration. + */ +#define RTSER_RTIOC_BREAK_CTL \ + _IOR(RTIOC_TYPE_SERIAL, 0x06, int) +/** @} */ + +/*! + * @anchor SERutils @name RT Serial example and utility programs + * @{ */ +/** @example cross-link.c */ +/** @} */ + +#endif /* !_RTDM_UAPI_SERIAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h new file mode 100644 index 0000000..184a2b0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h @@ -0,0 +1,42 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_UAPI_SPI_H +#define _RTDM_UAPI_SPI_H + +#include <linux/types.h> + +struct rtdm_spi_config { + __u32 speed_hz; + __u16 mode; + __u8 bits_per_word; +}; + +struct rtdm_spi_iobufs { + __u32 io_len; + __u32 i_offset; + __u32 o_offset; + __u32 map_len; +}; + +#define SPI_RTIOC_SET_CONFIG _IOW(RTDM_CLASS_SPI, 0, struct rtdm_spi_config) +#define SPI_RTIOC_GET_CONFIG _IOR(RTDM_CLASS_SPI, 1, struct rtdm_spi_config) +#define SPI_RTIOC_SET_IOBUFS _IOR(RTDM_CLASS_SPI, 2, struct rtdm_spi_iobufs) +#define SPI_RTIOC_TRANSFER _IO(RTDM_CLASS_SPI, 3) +#define SPI_RTIOC_TRANSFER_N _IOR(RTDM_CLASS_SPI, 4, int) + +#endif /* !_RTDM_UAPI_SPI_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h new file mode 100644 index 0000000..40512c9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h @@ -0,0 +1,212 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, testing device profile header + * + * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * @ingroup rttesting + */ +#ifndef _RTDM_UAPI_TESTING_H +#define _RTDM_UAPI_TESTING_H + +#include <linux/types.h> + +#define RTTST_PROFILE_VER 2 + +typedef struct rttst_bench_res { + __s32 avg; + __s32 min; + __s32 max; + __s32 overruns; + __s32 test_loops; +} rttst_bench_res_t; + +typedef struct rttst_interm_bench_res { + struct rttst_bench_res last; + struct rttst_bench_res overall; +} rttst_interm_bench_res_t; + +typedef struct rttst_overall_bench_res { + struct rttst_bench_res result; + __s32 *histogram_avg; + __s32 *histogram_min; + __s32 *histogram_max; +} rttst_overall_bench_res_t; + +#define RTTST_TMBENCH_INVALID -1 /* internal use only */ +#define RTTST_TMBENCH_TASK 0 +#define RTTST_TMBENCH_HANDLER 1 + +typedef struct rttst_tmbench_config { + int mode; + int priority; + __u64 period; + int warmup_loops; + int histogram_size; + int histogram_bucketsize; + int freeze_max; +} rttst_tmbench_config_t; + +struct rttst_swtest_task { + unsigned int index; + unsigned int flags; +}; + +/* Possible values for struct rttst_swtest_task::flags. */ +#define RTTST_SWTEST_FPU 0x1 +#define RTTST_SWTEST_USE_FPU 0x2 /* Only for kernel-space tasks. */ +#define RTTST_SWTEST_FREEZE 0x4 /* Only for kernel-space tasks. */ + +/** + * @brief parameter for the RTTST_RTIOC_SWTEST_SWITCH_TO syscall + * @anchor rttst_swtest_dir + * + * This structure is used to tell the RTTST_RTIOC_SWTEST_SWITCH_TO syscall + * which threads should be exchanged and if the mode (primary/secondary) of the + * from thread should be switched. + */ +struct rttst_swtest_dir { + /** Index of the thread that should be replaced. */ + unsigned int from; + + /** Index of the thread that should run. */ + unsigned int to; + + /** If the mode should be switched: 0 for no switch, 1 for switch. */ + unsigned int switch_mode; +}; + +struct rttst_swtest_error { + struct rttst_swtest_dir last_switch; + unsigned int fp_val; +}; + +#define RTTST_RTDM_NORMAL_CLOSE 0 +#define RTTST_RTDM_DEFER_CLOSE_CONTEXT 1 + +#define RTTST_RTDM_MAGIC_PRIMARY 0xfefbfefb +#define RTTST_RTDM_MAGIC_SECONDARY 0xa5b9a5b9 + +#define RTTST_HEAPCHECK_ZEROOVRD 1 +#define RTTST_HEAPCHECK_SHUFFLE 2 +#define RTTST_HEAPCHECK_PATTERN 4 +#define RTTST_HEAPCHECK_HOT 8 + +struct rttst_heap_parms { + __u64 heap_size; + __u64 block_size; + int flags; + int nrstats; +}; + +struct rttst_heap_stats { + __u64 heap_size; + __u64 user_size; + __u64 block_size; + __s64 alloc_avg_ns; + __s64 alloc_max_ns; + __s64 free_avg_ns; + __s64 free_max_ns; + __u64 maximum_free; + __u64 largest_free; + int nrblocks; + int flags; +}; + +struct rttst_heap_stathdr { + int nrstats; + struct rttst_heap_stats *buf; +}; + +#define RTIOC_TYPE_TESTING RTDM_CLASS_TESTING + +/*! + * @name Sub-Classes of RTDM_CLASS_TESTING + * @{ */ +/** subclass name: "timerbench" */ +#define RTDM_SUBCLASS_TIMERBENCH 0 +/** subclass name: "irqbench" */ +#define RTDM_SUBCLASS_IRQBENCH 1 +/** subclass name: "switchtest" */ +#define RTDM_SUBCLASS_SWITCHTEST 2 +/** subclase name: "rtdm" */ +#define RTDM_SUBCLASS_RTDMTEST 3 +/** subclase name: "heapcheck" */ +#define RTDM_SUBCLASS_HEAPCHECK 4 +/** @} */ + +/*! + * @anchor TSTIOCTLs @name IOCTLs + * Testing device IOCTLs + * @{ */ +#define RTTST_RTIOC_INTERM_BENCH_RES \ + _IOWR(RTIOC_TYPE_TESTING, 0x00, struct rttst_interm_bench_res) + +#define RTTST_RTIOC_TMBENCH_START \ + _IOW(RTIOC_TYPE_TESTING, 0x10, struct rttst_tmbench_config) + +#define RTTST_RTIOC_TMBENCH_STOP \ + _IOWR(RTIOC_TYPE_TESTING, 0x11, struct rttst_overall_bench_res) + +#define RTTST_RTIOC_SWTEST_SET_TASKS_COUNT \ + _IOW(RTIOC_TYPE_TESTING, 0x30, __u32) + +#define RTTST_RTIOC_SWTEST_SET_CPU \ + _IOW(RTIOC_TYPE_TESTING, 0x31, __u32) + +#define RTTST_RTIOC_SWTEST_REGISTER_UTASK \ + _IOW(RTIOC_TYPE_TESTING, 0x32, struct rttst_swtest_task) + +#define RTTST_RTIOC_SWTEST_CREATE_KTASK \ + _IOWR(RTIOC_TYPE_TESTING, 0x33, struct rttst_swtest_task) + +#define RTTST_RTIOC_SWTEST_PEND \ + _IOR(RTIOC_TYPE_TESTING, 0x34, struct rttst_swtest_task) + +#define RTTST_RTIOC_SWTEST_SWITCH_TO \ + _IOR(RTIOC_TYPE_TESTING, 0x35, struct rttst_swtest_dir) + +#define RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT \ + _IOR(RTIOC_TYPE_TESTING, 0x36, __u32) + +#define RTTST_RTIOC_SWTEST_GET_LAST_ERROR \ + _IOR(RTIOC_TYPE_TESTING, 0x37, struct rttst_swtest_error) + +#define RTTST_RTIOC_SWTEST_SET_PAUSE \ + _IOW(RTIOC_TYPE_TESTING, 0x38, __u32) + +#define RTTST_RTIOC_RTDM_DEFER_CLOSE \ + _IOW(RTIOC_TYPE_TESTING, 0x40, __u32) + +#define RTTST_RTIOC_RTDM_ACTOR_GET_CPU \ + _IOR(RTIOC_TYPE_TESTING, 0x41, __u32) + +#define RTTST_RTIOC_RTDM_PING_PRIMARY \ + _IOR(RTIOC_TYPE_TESTING, 0x42, __u32) + +#define RTTST_RTIOC_RTDM_PING_SECONDARY \ + _IOR(RTIOC_TYPE_TESTING, 0x43, __u32) + +#define RTTST_RTIOC_HEAP_CHECK \ + _IOR(RTIOC_TYPE_TESTING, 0x44, struct rttst_heap_parms) + +#define RTTST_RTIOC_HEAP_STAT_COLLECT \ + _IOR(RTIOC_TYPE_TESTING, 0x45, int) + +/** @} */ + +#endif /* !_RTDM_UAPI_TESTING_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h new file mode 100644 index 0000000..065df12 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h @@ -0,0 +1,98 @@ +/** + * @file + * This file is part of the Xenomai project. + * + * @author Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UAPI_UDD_H +#define _RTDM_UAPI_UDD_H + +/** + * @addtogroup rtdm_udd + * + * @{ + */ + +/** + * @anchor udd_signotify + * @brief UDD event notification descriptor + * + * This structure shall be used to pass the information required to + * enable/disable the notification by signal upon interrupt receipt. + * + * If PID is zero or negative, the notification is disabled. + * Otherwise, the Cobalt thread whose PID is given will receive the + * Cobalt signal also mentioned, along with the count of interrupts at + * the time of the receipt stored in siginfo.si_int. A Cobalt thread + * must explicitly wait for notifications using the sigwaitinfo() or + * sigtimedwait() services (no asynchronous mode available). + */ +struct udd_signotify { + /** + * PID of the Cobalt thread to notify upon interrupt + * receipt. If @a pid is zero or negative, the notification is + * disabled. + */ + pid_t pid; + /** + * Signal number to send to PID for notifying, which must be + * in the range [SIGRTMIN .. SIGRTMAX] inclusive. This value + * is not considered if @a pid is zero or negative. + */ + int sig; +}; + +/** + * @anchor udd_ioctl_codes @name UDD_IOCTL + * IOCTL requests + * + * @{ + */ + +/** + * Enable the interrupt line. The UDD-class mini-driver should handle + * this request when received through its ->ioctl() handler if + * provided. Otherwise, the UDD core enables the interrupt line in the + * interrupt controller before returning to the caller. + */ +#define UDD_RTIOC_IRQEN _IO(RTDM_CLASS_UDD, 0) +/** + * Disable the interrupt line. The UDD-class mini-driver should handle + * this request when received through its ->ioctl() handler if + * provided. Otherwise, the UDD core disables the interrupt line in + * the interrupt controller before returning to the caller. + * + * @note The mini-driver must handle the UDD_RTIOC_IRQEN request for a + * custom IRQ from its ->ioctl() handler, otherwise such request + * receives -EIO from the UDD core. + */ +#define UDD_RTIOC_IRQDIS _IO(RTDM_CLASS_UDD, 1) +/** + * Enable/Disable signal notification upon interrupt event. A valid + * @ref udd_signotify "notification descriptor" must be passed along + * with this request, which is handled by the UDD core directly. + * + * @note The mini-driver must handle the UDD_RTIOC_IRQDIS request for + * a custom IRQ from its ->ioctl() handler, otherwise such request + * receives -EIO from the UDD core. + */ +#define UDD_RTIOC_IRQSIG _IOW(RTDM_CLASS_UDD, 2, struct udd_signotify) + +/** @} */ +/** @} */ + +#endif /* !_RTDM_UAPI_UDD_H */ diff --git a/kernel/xenomai-v3.2.4/include/rtdm/udd.h b/kernel/xenomai-v3.2.4/include/rtdm/udd.h new file mode 100644 index 0000000..41e028f --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/rtdm/udd.h @@ -0,0 +1,26 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UDD_H +#define _RTDM_UDD_H + +#include <rtdm/rtdm.h> +#include <rtdm/uapi/udd.h> + +#endif /* !_RTDM_UDD_H */ diff --git a/kernel/xenomai-v3.2.4/include/smokey/Makefile.am b/kernel/xenomai-v3.2.4/include/smokey/Makefile.am new file mode 100644 index 0000000..a0074fb --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/smokey/Makefile.am @@ -0,0 +1,3 @@ +includesubdir = $(includedir)/smokey + +includesub_HEADERS = smokey.h diff --git a/kernel/xenomai-v3.2.4/include/smokey/smokey.h b/kernel/xenomai-v3.2.4/include/smokey/smokey.h new file mode 100644 index 0000000..0ac1e8d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/smokey/smokey.h @@ -0,0 +1,274 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_SMOKEY_SMOKEY_H +#define _XENOMAI_SMOKEY_SMOKEY_H + +#include <stdarg.h> +#include <pthread.h> +#include <boilerplate/list.h> +#include <boilerplate/libc.h> +#include <copperplate/clockobj.h> +#include <xenomai/init.h> + +#ifdef HAVE_FORK +#define do_fork fork +#else +#define do_fork vfork +#endif + +#define SMOKEY_INT(__name) { \ + .name = # __name, \ + .parser = smokey_int, \ + .matched = 0, \ + } + +#define SMOKEY_BOOL(__name) { \ + .name = # __name, \ + .parser = smokey_bool, \ + .matched = 0, \ + } + +#define SMOKEY_STRING(__name) { \ + .name = # __name, \ + .parser = smokey_string, \ + .matched = 0, \ + } + +#define SMOKEY_SIZE(__name) { \ + .name = # __name, \ + .parser = smokey_size, \ + .matched = 0, \ + } + +#define SMOKEY_ARGLIST(__args...) ((struct smokey_arg[]){ __args }) + +#define SMOKEY_NOARGS (((struct smokey_arg[]){ { .name = NULL } })) + +struct smokey_arg { + const char *name; + int (*parser)(const char *s, + struct smokey_arg *arg); + union { + int n_val; + char *s_val; + size_t l_val; + } u; + int matched; +}; + +struct smokey_test { + const char *name; + struct smokey_arg *args; + int nargs; + const char *description; + int (*run)(struct smokey_test *t, + int argc, char *const argv[]); + struct { + int id; + struct pvholder next; + } __reserved; +}; + +#define for_each_smokey_test(__pos) \ + pvlist_for_each_entry((__pos), &smokey_test_list, __reserved.next) + +#define __smokey_arg_count(__args) \ + (sizeof(__args) / sizeof(__args[0])) + +#define smokey_test_plugin(__plugin, __args, __desc) \ + static int run_ ## __plugin(struct smokey_test *t, \ + int argc, char *const argv[]); \ + static struct smokey_test __plugin = { \ + .name = #__plugin, \ + .args = (__args), \ + .nargs = __smokey_arg_count(__args), \ + .description = (__desc), \ + .run = run_ ## __plugin, \ + }; \ + __early_ctor void smokey_plugin_ ## __plugin(void); \ + void smokey_plugin_ ## __plugin(void) \ + { \ + smokey_register_plugin(&(__plugin)); \ + } + +#define SMOKEY_ARG(__plugin, __arg) (smokey_lookup_arg(&(__plugin), # __arg)) +#define SMOKEY_ARG_ISSET(__plugin, __arg) (SMOKEY_ARG(__plugin, __arg)->matched) +#define SMOKEY_ARG_INT(__plugin, __arg) (SMOKEY_ARG(__plugin, __arg)->u.n_val) +#define SMOKEY_ARG_BOOL(__plugin, __arg) (!!SMOKEY_ARG_INT(__plugin, __arg)) +#define SMOKEY_ARG_STRING(__plugin, __arg) (SMOKEY_ARG(__plugin, __arg)->u.s_val) +#define SMOKEY_ARG_SIZE(__plugin, __arg) (SMOKEY_ARG(__plugin, __arg)->u.l_val) + +#define smokey_arg_isset(__t, __name) (smokey_lookup_arg(__t, __name)->matched) +#define smokey_arg_int(__t, __name) (smokey_lookup_arg(__t, __name)->u.n_val) +#define smokey_arg_bool(__t, __name) (!!smokey_arg_int(__t, __name)) +#define smokey_arg_string(__t, __name) (smokey_lookup_arg(__t, __name)->u.s_val) +#define smokey_arg_size(__t, __name) (smokey_lookup_arg(__t, __name)->u.l_val) + +#define smokey_check_errno(__expr) \ + ({ \ + int __ret = (__expr); \ + if (__ret < 0) { \ + __ret = -errno; \ + __smokey_warning(__FILE__, __LINE__, "%s: %s", \ + #__expr, strerror(errno)); \ + } \ + __ret; \ + }) + +#define smokey_check_status(__expr) \ + ({ \ + int __ret = (__expr); \ + if (__ret) { \ + __smokey_warning(__FILE__, __LINE__, "%s: %s", \ + #__expr, strerror(__ret)); \ + __ret = -__ret; \ + } \ + __ret; \ + }) + +#define smokey_assert(__expr) \ + ({ \ + int __ret = (__expr); \ + if (!__ret) \ + __smokey_warning(__FILE__, __LINE__, \ + "assertion failed: %s", #__expr); \ + __ret; \ + }) + +#define smokey_warning(__fmt, __args...) \ + __smokey_warning(__FILE__, __LINE__, __fmt, ##__args) + +#define __T(__ret, __action) \ + ({ \ + (__ret) = (__action); \ + if (__ret) { \ + if ((__ret) > 0) \ + (__ret) = -(__ret); \ + smokey_warning("FAILED: %s (=%s)", \ + __stringify(__action), \ + symerror(__ret)); \ + } \ + (__ret) == 0; \ + }) + +#define __F(__ret, __action) \ + ({ \ + (__ret) = (__action); \ + if ((__ret) == 0) \ + smokey_warning("FAILED: %s (=0)", \ + __stringify(__action)); \ + else if ((__ret) > 0) \ + (__ret) = -(__ret); \ + (__ret) != 0; \ + }) + +#define __Terrno(__ret, __action) \ + ({ \ + (__ret) = (__action); \ + if (__ret) { \ + (__ret) = -errno; \ + smokey_warning("FAILED: %s (=%s)", \ + __stringify(__action), \ + symerror(__ret)); \ + } \ + (__ret) == 0; \ + }) + +#define __Tassert(__expr) \ + ({ \ + int __ret = !!(__expr); \ + if (!__ret) \ + smokey_warning("FAILED: %s (=false)", \ + __stringify(__expr)); \ + __ret; \ + }) + +#define __Fassert(__expr) \ + ({ \ + int __ret = (__expr); \ + if (__ret) \ + smokey_warning("FAILED: %s (=true)", \ + __stringify(__expr)); \ + !__ret; \ + }) + +struct smokey_barrier { + pthread_mutex_t lock; + pthread_cond_t barrier; + int signaled; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +void smokey_register_plugin(struct smokey_test *t); + +int smokey_int(const char *s, struct smokey_arg *arg); + +int smokey_bool(const char *s, struct smokey_arg *arg); + +int smokey_string(const char *s, struct smokey_arg *arg); + +int smokey_size(const char *s, struct smokey_arg *arg); + +struct smokey_arg *smokey_lookup_arg(struct smokey_test *t, + const char *arg); + +int smokey_parse_args(struct smokey_test *t, + int argc, char *const argv[]); + +void smokey_vatrace(const char *fmt, va_list ap); + +void smokey_trace(const char *fmt, ...); + +void smokey_note(const char *fmt, ...); + +void __smokey_warning(const char *file, int lineno, + const char *fmt, ...); + +int smokey_barrier_init(struct smokey_barrier *b); + +void smokey_barrier_destroy(struct smokey_barrier *b); + +int smokey_barrier_wait(struct smokey_barrier *b); + +int smokey_barrier_timedwait(struct smokey_barrier *b, + struct timespec *ts); + +void smokey_barrier_release(struct smokey_barrier *b); + +int smokey_fork_exec(const char *path, const char *arg); + +int smokey_modprobe(const char *name, bool silent); + +int smokey_rmmod(const char *name); + +#ifdef __cplusplus +} +#endif + +extern struct pvlistobj smokey_test_list; + +extern int smokey_keep_going; + +extern int smokey_verbose_mode; + +extern int smokey_on_vm; + +#endif /* _XENOMAI_SMOKEY_SMOKEY_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/Makefile.am new file mode 100644 index 0000000..69721d9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/Makefile.am @@ -0,0 +1,10 @@ +includesubdir = $(includedir)/trank + +includesub_HEADERS = trank.h + +if XENO_COBALT +includesub_HEADERS += rtdk.h +SUBDIRS = posix native rtdm +endif + +DIST_SUBDIRS = posix native rtdm diff --git a/kernel/xenomai-v3.2.4/include/trank/native/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/native/Makefile.am new file mode 100644 index 0000000..98dc104 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/Makefile.am @@ -0,0 +1,16 @@ +includesubdir = $(includedir)/trank/native + +includesub_HEADERS = \ + alarm.h \ + buffer.h \ + cond.h \ + event.h \ + heap.h \ + misc.h \ + mutex.h \ + pipe.h \ + queue.h \ + sem.h \ + task.h \ + timer.h \ + types.h diff --git a/kernel/xenomai-v3.2.4/include/trank/native/alarm.h b/kernel/xenomai-v3.2.4/include/trank/native/alarm.h new file mode 100644 index 0000000..688d625 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/alarm.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_ALARM_H +#define _XENOMAI_TRANK_NATIVE_ALARM_H + +#include <trank/trank.h> +#include <alchemy/alarm.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COMPAT_DECL(int, rt_alarm_create(RT_ALARM *alarm, const char *name)); + +COMPAT_DECL(int, rt_alarm_wait(RT_ALARM *alarm)); + +COMPAT_DECL(int, rt_alarm_delete(RT_ALARM *alarm)); + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_NATIVE_ALARM_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/buffer.h b/kernel/xenomai-v3.2.4/include/trank/native/buffer.h new file mode 100644 index 0000000..b32affc --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/buffer.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_BUFFER_H +#define _XENOMAI_TRANK_NATIVE_BUFFER_H + +#include <alchemy/buffer.h> + +#endif /* _XENOMAI_TRANK_NATIVE_BUFFER_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/cond.h b/kernel/xenomai-v3.2.4/include/trank/native/cond.h new file mode 100644 index 0000000..49e1e40 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/cond.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_COND_H +#define _XENOMAI_TRANK_NATIVE_COND_H + +#include <alchemy/cond.h> + +#endif /* _XENOMAI_TRANK_NATIVE_COND_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/event.h b/kernel/xenomai-v3.2.4/include/trank/native/event.h new file mode 100644 index 0000000..800cbca --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/event.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_EVENT_H +#define _XENOMAI_TRANK_NATIVE_EVENT_H + +#include <alchemy/event.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COMPAT_DECL(int, rt_event_create(RT_EVENT *event, const char *name, + unsigned long ivalue, int mode)); + +COMPAT_DECL(int, rt_event_signal(RT_EVENT *event, unsigned long mask)); + +COMPAT_DECL(int, rt_event_clear(RT_EVENT *event, unsigned long mask, + unsigned long *mask_r)); + +#ifdef __XENO_COMPAT__ + +static inline +int rt_event_wait_until(RT_EVENT *event, + unsigned long mask, unsigned long *mask_r, + int mode, RTIME timeout) +{ + struct timespec ts; + unsigned int _mask; + int ret; + + ret = rt_event_wait_timed(event, mask, &_mask, mode, + alchemy_abs_timeout(timeout, &ts)); + if (ret) + return ret; + + *mask_r = _mask; + + return 0; +} + +static inline +int rt_event_wait(RT_EVENT *event, + unsigned long mask, unsigned long *mask_r, + int mode, RTIME timeout) +{ + struct timespec ts; + unsigned int _mask; + int ret; + + ret = rt_event_wait_timed(event, mask, &_mask, mode, + alchemy_rel_timeout(timeout, &ts)); + if (ret) + return ret; + + *mask_r = _mask; + + return 0; +} + +#endif /* __XENO_COMPAT__ */ + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_NATIVE_EVENT_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/heap.h b/kernel/xenomai-v3.2.4/include/trank/native/heap.h new file mode 100644 index 0000000..8beed18 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/heap.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_HEAP_H +#define _XENOMAI_TRANK_NATIVE_HEAP_H + +#include <alchemy/heap.h> + +#define H_MAPPABLE 0 +#define H_SHARED 0 +#define H_NONCACHED 0 +#define H_DMA 0 + +#endif /* _XENOMAI_TRANK_NATIVE_HEAP_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/misc.h b/kernel/xenomai-v3.2.4/include/trank/native/misc.h new file mode 100644 index 0000000..ea242d2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/misc.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_MISC_H +#define _XENOMAI_TRANK_NATIVE_MISC_H + +#include <stdint.h> +#include <errno.h> +#include <trank/trank.h> + +#define IORN_IOPORT 0 +#define IORN_IOMEM 0 + +typedef struct rt_ioregion { +} RT_IOREGION; + +#ifdef __cplusplus +extern "C" { +#endif + +__deprecated +static inline int rt_io_get_region(RT_IOREGION *iorn, + const char *name, + uint64_t start, + uint64_t len, + int flags) +{ + trank_warning("service should be provided by a RTDM driver"); + return -ENOSYS; +} + +__deprecated +int rt_io_put_region(RT_IOREGION *iorn) +{ + trank_warning("service should be provided by a RTDM driver"); + return -ENOSYS; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_NATIVE_MISC_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/mutex.h b/kernel/xenomai-v3.2.4/include/trank/native/mutex.h new file mode 100644 index 0000000..9c8683b --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/mutex.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_MUTEX_H +#define _XENOMAI_TRANK_NATIVE_MUTEX_H + +#include <alchemy/mutex.h> + +#endif /* _XENOMAI_TRANK_NATIVE_MUTEX_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/pipe.h b/kernel/xenomai-v3.2.4/include/trank/native/pipe.h new file mode 100644 index 0000000..42f56a5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/pipe.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_PIPE_H +#define _XENOMAI_TRANK_NATIVE_PIPE_H + +#include <alchemy/pipe.h> +#include <trank/trank.h> + +#ifdef __cplusplus +extern "C" { +#endif + +COMPAT_DECL(int, rt_pipe_create(RT_PIPE *pipe, + const char *name, + int minor, size_t poolsize)); +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_NATIVE_PIPE_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/queue.h b/kernel/xenomai-v3.2.4/include/trank/native/queue.h new file mode 100644 index 0000000..d144a22 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/queue.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_QUEUE_H +#define _XENOMAI_TRANK_NATIVE_QUEUE_H + +#include <alchemy/queue.h> + +#define Q_SHARED 0 +#define Q_DMA 0 + +#endif /* _XENOMAI_TRANK_NATIVE_QUEUE_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/sem.h b/kernel/xenomai-v3.2.4/include/trank/native/sem.h new file mode 100644 index 0000000..34494a4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/sem.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_SEM_H +#define _XENOMAI_TRANK_NATIVE_SEM_H + +#include <alchemy/sem.h> + +#endif /* _XENOMAI_TRANK_NATIVE_SEM_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/task.h b/kernel/xenomai-v3.2.4/include/trank/native/task.h new file mode 100644 index 0000000..85c3cd4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/task.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_TASK_H +#define _XENOMAI_TRANK_NATIVE_TASK_H + +#include <errno.h> +#include <alchemy/task.h> +#include <trank/trank.h> +#include <trank/native/types.h> + +#define T_FPU 0 +#define T_NOSIG 0 +#define T_SUSP __THREAD_M_SPARE7 + +/* bit #24 onward are otherwise unused. */ +#define T_CPU(cpu) (1 << (24 + (cpu & 7))) +#define T_CPUMASK 0xff000000 + +#ifdef __cplusplus +extern "C" { +#endif + +__deprecated +static inline int rt_task_notify(RT_TASK *task, rt_sigset_t sigs) +{ + trank_warning("in-kernel native API is gone, rebase over RTDM"); + return -ENOSYS; +} + +COMPAT_DECL(int, rt_task_create(RT_TASK *task, const char *name, + int stksize, int prio, int mode)); + +COMPAT_DECL(int, rt_task_spawn(RT_TASK *task, const char *name, + int stksize, int prio, int mode, + void (*entry)(void *arg), void *arg)); + +COMPAT_DECL(int, rt_task_set_periodic(RT_TASK *task, + RTIME idate, RTIME period)); +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_NATIVE_TASK_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/timer.h b/kernel/xenomai-v3.2.4/include/trank/native/timer.h new file mode 100644 index 0000000..9cb606d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/timer.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_TIMER_H +#define _XENOMAI_TRANK_NATIVE_TIMER_H + +#include <errno.h> +#include <trank/trank.h> +#include <alchemy/timer.h> + +#define TM_ONESHOT 0 + +#ifdef __cplusplus +extern "C" { +#endif + +__deprecated +static inline int rt_timer_set_mode(RTIME nstick) +{ +#ifdef CONFIG_XENO_LORES_CLOCK_DISABLED + if (nstick != TM_ONESHOT) { + trank_warning("start program with --alchemy-clock-resolution option instead"); + return -ENODEV; + } +#endif + return 0; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_NATIVE_TIMER_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/native/types.h b/kernel/xenomai-v3.2.4/include/trank/native/types.h new file mode 100644 index 0000000..f525c15 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/native/types.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_NATIVE_TYPES_H +#define _XENOMAI_TRANK_NATIVE_TYPES_H + +#include <alchemy/timer.h> + +typedef unsigned long rt_sigset_t; + +#endif /* _XENOMAI_TRANK_NATIVE_TYPES_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am new file mode 100644 index 0000000..87b4500 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am @@ -0,0 +1,3 @@ +includesubdir = $(includedir)/trank/posix + +includesub_HEADERS = pthread.h diff --git a/kernel/xenomai-v3.2.4/include/trank/posix/pthread.h b/kernel/xenomai-v3.2.4/include/trank/posix/pthread.h new file mode 100644 index 0000000..a7364c5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/posix/pthread.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#pragma GCC system_header +#include_next <pthread.h> + +#ifndef _XENOMAI_TRANK_POSIX_PTHREAD_H +#define _XENOMAI_TRANK_POSIX_PTHREAD_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Set the mode of the current thread (compatibility service) + * + * This service is a non-portable extension of the POSIX interface. + * + * @param clrmask set of bits to be cleared; + * + * @param setmask set of bits to be set. + * + * @param mode_r If non-NULL, @a mode_r must be a pointer to a memory + * location which will be written upon success with the previous set + * of active mode bits. If NULL, the previous set of active mode bits + * will not be returned. + * + * @return 0 on success; + * @return an error number if: + * - EINVAL, some bit in @a clrmask or @a setmask is invalid. + * + * @note Setting @a clrmask and @a setmask to zero leads to a nop, + * only returning the previous mode if @a mode_r is a valid address. + * + * @deprecated This service is an alias to pthread_setmode_np() for + * source compatibility with Xenomai 2.x. + */ +static inline int pthread_set_mode_np(int clrmask, int setmask, + int *mask_r) +{ + return pthread_setmode_np(clrmask, setmask, mask_r); +} + +/** + * Set a thread name (compatibility service) + * + * This service set to @a name, the name of @a thread. This name is + * used for displaying information in /proc/xenomai/sched. + * + * This service is a non-portable extension of the POSIX interface. + * + * @param thread target thread; + * + * @param name name of the thread. + * + * @return 0 on success; + * @return an error number if: + * - ESRCH, @a thread is invalid. + * + * @deprecated This service is an alias to pthread_setname_np() for + * source compatibility with Xenomai 2.x. + */ +static inline int pthread_set_name_np(pthread_t thread, + const char *name) +{ + return pthread_setname_np(thread, name); +} + +int pthread_make_periodic_np(pthread_t thread, + struct timespec *starttp, + struct timespec *periodtp); + +int pthread_wait_np(unsigned long *overruns_r); + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_POSIX_PTHREAD_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdk.h b/kernel/xenomai-v3.2.4/include/trank/rtdk.h new file mode 100644 index 0000000..e8bb6d1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdk.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_RTDK_H +#define _XENOMAI_TRANK_RTDK_H + +#include <stdio.h> + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void rt_print_auto_init(int enable) +{ + /* stdio support is automatically enabled by libcobalt. */ +} + +static inline void rt_print_cleanup(void) { } + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_RTDK_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am new file mode 100644 index 0000000..2eae0f6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am @@ -0,0 +1,8 @@ +includesubdir = $(includedir)/trank/rtdm + +includesub_HEADERS = \ + rtcan.h \ + rtdm.h \ + rtipc.h \ + rtserial.h \ + rttesting.h diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h new file mode 100644 index 0000000..e87e5e1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_RTDM_RTCAN_H +#define _XENOMAI_TRANK_RTDM_RTCAN_H + +#include <rtdm/can.h> + +#endif /* _XENOMAI_TRANK_RTDM_RTCAN_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h new file mode 100644 index 0000000..80d874a --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h @@ -0,0 +1,177 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_RTDM_RTDM_H +#define _XENOMAI_TRANK_RTDM_RTDM_H + +#include_next <rtdm/rtdm.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef RTDM_NO_DEFAULT_USER_API + +#define rt_dev_call(__call, __args...) \ +({ \ + int __ret; \ + __ret = __RT(__call(__args)); \ + __ret < 0 ? -errno : __ret; \ +}) + +#define rt_dev_open(__args...) rt_dev_call(open, __args) +#define rt_dev_ioctl(__args...) rt_dev_call(ioctl, __args) +#define rt_dev_recvfrom(__args...) rt_dev_call(recvfrom, __args) + +static inline int rt_dev_socket(int domain, int type, int protocol) +{ + return rt_dev_call(socket, domain, type, protocol); +} + +static inline ssize_t rt_dev_recvmsg(int fd, struct msghdr *msg, int flags) +{ + return rt_dev_call(recvmsg, fd, msg, flags); +} + +static inline ssize_t rt_dev_recv(int fd, void *buf, size_t len, int flags) +{ + return rt_dev_call(recvfrom, fd, buf, len, flags, NULL, NULL); +} + +static inline ssize_t rt_dev_sendmsg(int fd, const struct msghdr *msg, int flags) +{ + return rt_dev_call(sendmsg, fd, msg, flags); +} + +static inline ssize_t rt_dev_sendto(int fd, const void *buf, size_t len, + int flags, const struct sockaddr *to, + socklen_t tolen) +{ + struct iovec iov; + struct msghdr msg; + + iov.iov_base = (void *)buf; + iov.iov_len = len; + + msg.msg_name = (struct sockaddr *)to; + msg.msg_namelen = tolen; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + return rt_dev_call(sendmsg, fd, &msg, flags); +} + +static inline ssize_t rt_dev_send(int fd, const void *buf, size_t len, + int flags) +{ + return rt_dev_call(sendto, fd, buf, len, flags, NULL, 0); +} + +static inline int rt_dev_getsockopt(int fd, int level, int optname, + void *optval, socklen_t *optlen) +{ + struct _rtdm_getsockopt_args args = { + level, optname, optval, optlen + }; + + return rt_dev_call(ioctl, fd, _RTIOC_GETSOCKOPT, &args); +} + +static inline int rt_dev_setsockopt(int fd, int level, int optname, + const void *optval, socklen_t optlen) +{ + struct _rtdm_setsockopt_args args = { + level, optname, (void *)optval, optlen + }; + + return rt_dev_call(ioctl, fd, _RTIOC_SETSOCKOPT, &args); +} + +static inline int rt_dev_bind(int fd, const struct sockaddr *my_addr, + socklen_t addrlen) +{ + struct _rtdm_setsockaddr_args args = { my_addr, addrlen }; + + return rt_dev_call(ioctl, fd, _RTIOC_BIND, &args); +} + +static inline int rt_dev_connect(int fd, const struct sockaddr *serv_addr, + socklen_t addrlen) +{ + struct _rtdm_setsockaddr_args args = { serv_addr, addrlen }; + + return rt_dev_call(ioctl, fd, _RTIOC_CONNECT, &args); +} + +static inline int rt_dev_listen(int fd, int backlog) +{ + return rt_dev_call(ioctl, fd, _RTIOC_LISTEN, backlog); +} + +static inline int rt_dev_accept(int fd, struct sockaddr *addr, + socklen_t *addrlen) +{ + struct _rtdm_getsockaddr_args args = { addr, addrlen }; + + return rt_dev_call(ioctl, fd, _RTIOC_ACCEPT, &args); +} + +static inline int rt_dev_getsockname(int fd, struct sockaddr *name, + socklen_t *namelen) +{ + struct _rtdm_getsockaddr_args args = { name, namelen }; + + return rt_dev_call(ioctl, fd, _RTIOC_GETSOCKNAME, &args); +} + +static inline int rt_dev_getpeername(int fd, struct sockaddr *name, + socklen_t *namelen) +{ + struct _rtdm_getsockaddr_args args = { name, namelen }; + + return rt_dev_call(ioctl, fd, _RTIOC_GETPEERNAME, &args); +} + +static inline int rt_dev_shutdown(int fd, int how) +{ + return rt_dev_call(ioctl, fd, _RTIOC_SHUTDOWN, how); +} + +static inline int rt_dev_close(int fd) +{ + return rt_dev_call(close, fd); +} + +static inline ssize_t rt_dev_write(int fd, const void *buf, size_t len) +{ + return rt_dev_call(write, fd, buf, len); +} + +static inline ssize_t rt_dev_read(int fd, void *buf, size_t len) +{ + return rt_dev_call(read, fd, buf, len); +} + +#endif /* !RTDM_NO_DEFAULT_USER_API */ + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_TRANK_RTDM_RTDM_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h new file mode 100644 index 0000000..4e626f8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_RTDM_RTIPC_H +#define _XENOMAI_TRANK_RTDM_RTIPC_H + +#include <rtdm/ipc.h> + +#endif /* _XENOMAI_TRANK_RTDM_RTIPC_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h new file mode 100644 index 0000000..7f2a7fb --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_RTDM_RTSERIAL_H +#define _XENOMAI_TRANK_RTDM_RTSERIAL_H + +#include <rtdm/serial.h> + +#endif /* _XENOMAI_TRANK_RTDM_RTSERIAL_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h new file mode 100644 index 0000000..b774fd0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_RTDM_RTTESTING_H +#define _XENOMAI_TRANK_RTDM_RTTESTING_H + +#include <rtdm/testing.h> + +#endif /* _XENOMAI_TRANK_RTDM_RTTESTING_H */ diff --git a/kernel/xenomai-v3.2.4/include/trank/trank.h b/kernel/xenomai-v3.2.4/include/trank/trank.h new file mode 100644 index 0000000..cc68837 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/trank/trank.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TRANK_TRANK_H +#define _XENOMAI_TRANK_TRANK_H + +#include <boilerplate/compiler.h> + +#ifdef __XENO_COMPAT__ + +#ifdef __cplusplus +extern "C" { +#endif + +void warning(const char *fmt, ...); + +#ifdef __cplusplus +} +#endif + +#define trank_warning(__fmt, __args...) \ + warning("%s: " __fmt, __func__, ##__args) + +#define __CURRENT(call) __current_ ## call + +#define COMPAT_DECL(T, P) __typeof__(T) P +#define CURRENT_DECL(T, P) __typeof__(T) __CURRENT(P) + +#else /* !__XENO_COMPAT__ */ + +#define __CURRENT(call) call + +#define COMPAT_DECL(T, P) +#define CURRENT_DECL(T, P) __typeof__(T) P; \ + __typeof__(T) __current_ ## P + +#define CURRENT_IMPL(T, I, A) \ +__typeof__(T) I A __attribute__((alias("__current_" __stringify(I)), weak)); \ +__typeof__(T) __current_ ## I A + +#endif /* !__XENO_COMPAT__ */ + +#endif /* _XENOMAI_TRANK_TRANK_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/Makefile.am b/kernel/xenomai-v3.2.4/include/vxworks/Makefile.am new file mode 100644 index 0000000..849fbdf --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/Makefile.am @@ -0,0 +1,18 @@ +includesubdir = $(includedir)/vxworks + +includesub_HEADERS = \ + errnoLib.h \ + intLib.h \ + kernLib.h \ + lstLib.h \ + memPartLib.h \ + msgQLib.h \ + rngLib.h \ + semLib.h \ + sysLib.h \ + taskHookLib.h \ + taskInfo.h \ + taskLib.h \ + tickLib.h \ + types.h \ + wdLib.h diff --git a/kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h b/kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h new file mode 100644 index 0000000..f434927 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_ERRNOLIB_H +#define _XENOMAI_VXWORKS_ERRNOLIB_H + +#include <errno.h> +#include <vxworks/taskLib.h> + +#define OK 0 +#define ERROR (-1) + +#define WIND_TASK_ERR_BASE 0x00030000 +#define WIND_MEM_ERR_BASE 0x00110000 +#define WIND_SEM_ERR_BASE 0x00160000 +#define WIND_OBJ_ERR_BASE 0x003d0000 +#define WIND_MSGQ_ERR_BASE 0x00410000 +#define WIND_INT_ERR_BASE 0x00430000 + +#define S_objLib_OBJ_ID_ERROR (WIND_OBJ_ERR_BASE + 0x0001) +#define S_objLib_OBJ_UNAVAILABLE (WIND_OBJ_ERR_BASE + 0x0002) +#define S_objLib_OBJ_DELETED (WIND_OBJ_ERR_BASE + 0x0003) +#define S_objLib_OBJ_TIMEOUT (WIND_OBJ_ERR_BASE + 0x0004) +#define S_objLib_OBJ_NO_METHOD (WIND_OBJ_ERR_BASE + 0x0005) + +#define S_taskLib_NAME_NOT_FOUND (WIND_TASK_ERR_BASE + 0x0065) +#define S_taskLib_TASK_HOOK_TABLE_FULL (WIND_TASK_ERR_BASE + 0x0066) +#define S_taskLib_TASK_HOOK_NOT_FOUND (WIND_TASK_ERR_BASE + 0x0067) +#define S_taskLib_ILLEGAL_PRIORITY (WIND_TASK_ERR_BASE + 0x006d) + +#define S_semLib_INVALID_STATE (WIND_SEM_ERR_BASE + 0x0065) +#define S_semLib_INVALID_OPTION (WIND_SEM_ERR_BASE + 0x0066) +#define S_semLib_INVALID_QUEUE_TYPE (WIND_SEM_ERR_BASE + 0x0067) +#define S_semLib_INVALID_OPERATION (WIND_SEM_ERR_BASE + 0x0068) + +#define S_msgQLib_INVALID_MSG_LENGTH (WIND_MSGQ_ERR_BASE + 0x0001) +#define S_msgQLib_NON_ZERO_TIMEOUT_AT_INT_LEVEL (WIND_MSGQ_ERR_BASE + 0x0002) +#define S_msgQLib_INVALID_QUEUE_TYPE (WIND_MSGQ_ERR_BASE + 0x0003) + +#define S_intLib_NOT_ISR_CALLABLE (WIND_INT_ERR_BASE + 0x0001) + +#define S_memLib_NOT_ENOUGH_MEMORY (WIND_MEM_ERR_BASE + 0x0001) +#define S_memLib_INVALID_NBYTES (WIND_MEM_ERR_BASE + 0x0002) + +#ifdef __cplusplus +extern "C" { +#endif + +void printErrno(int status); + +STATUS errnoSet(int status); + +int errnoGet(void); + +int errnoOfTaskGet(TASK_ID task_id); + +STATUS errnoOfTaskSet(TASK_ID task_id, int status); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_ERRNOLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/intLib.h b/kernel/xenomai-v3.2.4/include/vxworks/intLib.h new file mode 100644 index 0000000..9fd0e76 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/intLib.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_INTLIB_H +#define _XENOMAI_VXWORKS_INTLIB_H + +#include <vxworks/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +BOOL intContext(void); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_INTLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/kernLib.h b/kernel/xenomai-v3.2.4/include/vxworks/kernLib.h new file mode 100644 index 0000000..dd9d5d3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/kernLib.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_KERNLIB_H +#define _XENOMAI_VXWORKS_KERNLIB_H + +#include <vxworks/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +STATUS kernelTimeSlice(int ticks); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_KERNLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/lstLib.h b/kernel/xenomai-v3.2.4/include/vxworks/lstLib.h new file mode 100644 index 0000000..9644ca5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/lstLib.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_LSTLIB_H +#define _XENOMAI_VXWORKS_LSTLIB_H + +#include <boilerplate/list.h> +#include <vxworks/types.h> + +typedef struct LIST { + struct pvlistobj list; + int count; +} LIST; + +typedef struct NODE { + struct pvholder link; + struct LIST *list; +} NODE; + +static inline void lstInit(LIST *l) +{ + pvlist_init(&l->list); + l->count = 0; +} + +static inline void lstAdd(LIST *l, NODE *n) +{ + pvholder_init(&n->link); + pvlist_append(&n->link, &l->list); + n->list = l; + l->count++; +} + +static inline int lstCount(LIST *l) +{ + return l->count; +} + +static inline void lstDelete(LIST *l, NODE *n) +{ + pvlist_remove(&n->link); + n->list = NULL; + l->count--; +} + +static inline NODE *lstFirst(LIST *l) +{ + if (l == NULL || pvlist_empty(&l->list)) + return NULL; + + return pvlist_first_entry(&l->list, struct NODE, link); +} + +static inline NODE *lstGet(LIST *l) +{ + struct NODE *n; + + if (l == NULL || pvlist_empty(&l->list)) + return NULL; + + n = pvlist_pop_entry(&l->list, struct NODE, link); + n->list = NULL; + l->count--; + + return n; +} + +static inline void lstInsert(LIST *l, NODE *nprev, NODE *n) +{ + pvholder_init(&n->link); + + if (nprev == NULL) + pvlist_prepend(&n->link, &l->list); + else + pvlist_insert(&n->link, &nprev->link); + + n->list = l; + l->count++; +} + +static inline NODE *lstLast(LIST *l) +{ + if (l == NULL || pvlist_empty(&l->list)) + return NULL; + + return pvlist_last_entry(&l->list, struct NODE, link); +} + +static inline NODE *lstNext(NODE *n) +{ + if (n->list == NULL || &n->link == n->list->list.head.prev) + return NULL; + + return container_of(n->link.next, struct NODE, link); +} + +static inline NODE *lstPrevious(NODE *n) +{ + if (n->list == NULL || &n->link == n->list->list.head.next) + return NULL; + + return container_of(n->link.prev, struct NODE, link); +} + +static inline void lstFree(LIST *l) +{ + lstInit(l); +} + +#ifdef __cplusplus +extern "C" { +#endif + +void lstExtract(LIST *lsrc, NODE *nstart, NODE *nend, LIST *ldst); + +NODE *lstNth(LIST *l, int nodenum); + +NODE *lstNStep(NODE *n, int steps); + +int lstFind(LIST *l, NODE *n); + +void lstConcat(LIST *ldst, LIST *lsrc); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_LSTLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h b/kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h new file mode 100644 index 0000000..139714c --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_MEMPARTLIB_H +#define _XENOMAI_VXWORKS_MEMPARTLIB_H + +#include <vxworks/types.h> + +typedef uintptr_t PART_ID; + +struct wind_part_stats { + unsigned long numBytesFree; + unsigned long numBlocksFree; + unsigned long numBytesAlloc; + unsigned long numBlocksAlloc; + unsigned long maxBytesAlloc; +}; + +typedef struct wind_part_stats MEM_PART_STATS; + +#ifdef __cplusplus +extern "C" { +#endif + +PART_ID memPartCreate(char *pPool, unsigned int poolSize); + +STATUS memPartAddToPool(PART_ID partId, + char *pPool, unsigned int poolSize); + +void *memPartAlignedAlloc(PART_ID partId, + unsigned int nBytes, unsigned int alignment); + +void *memPartAlloc(PART_ID partId, unsigned int nBytes); + +STATUS memPartFree(PART_ID partId, char *pBlock); + +void memAddToPool(char *pPool, unsigned int poolSize); + +STATUS memPartInfoGet(PART_ID partId, + MEM_PART_STATS *ppartStats); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_MEMPARTLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h b/kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h new file mode 100644 index 0000000..e8e44a6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_MSGQLIB_H +#define _XENOMAI_VXWORKS_MSGQLIB_H + +#include <vxworks/types.h> + +typedef uintptr_t MSG_Q_ID; + +#define MSG_PRI_NORMAL 0 +#define MSG_PRI_URGENT 1 + +#define MSG_Q_FIFO 0x0 +#define MSG_Q_PRIORITY 0x1 + +#ifdef __cplusplus +extern "C" { +#endif + +MSG_Q_ID msgQCreate(int maxMsgs, int maxMsgLength, int options); + +STATUS msgQDelete(MSG_Q_ID msgQId); + +int msgQNumMsgs(MSG_Q_ID msgQId); + +int msgQReceive(MSG_Q_ID msgQId, char *buf, UINT bytes, int timeout); + +STATUS msgQSend(MSG_Q_ID msgQId, const char *buf, UINT bytes, + int timeout, int prio); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_MSGQLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/rngLib.h b/kernel/xenomai-v3.2.4/include/vxworks/rngLib.h new file mode 100644 index 0000000..561358d --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/rngLib.h @@ -0,0 +1,61 @@ +/* + * * Copyright (C) 2008 Niklaus Giger <niklaus.giger@member.fsf.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_RNGLIB_H +#define _XENOMAI_VXWORKS_RNGLIB_H + +#include <vxworks/types.h> + +typedef uintptr_t RING_ID; + +#ifdef __cplusplus +extern "C" { +#endif + +RING_ID rngCreate(int nbytes); + +void rngDelete(RING_ID ringId); + +void rngFlush(RING_ID ringId); + +int rngBufGet(RING_ID rngId, char *buffer, int maxbytes); + +int rngBufPut(RING_ID rngId, char *buffer, int nbytes); + +BOOL rngIsEmpty(RING_ID ringId); + +BOOL rngIsFull(RING_ID ringId); + +int rngFreeBytes(RING_ID ringId); + +int rngNBytes(RING_ID ringId); + +void rngPutAhead(RING_ID ringId, char byte, int offset); + +void rngMoveAhead(RING_ID ringId, int n); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_RNGLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/semLib.h b/kernel/xenomai-v3.2.4/include/vxworks/semLib.h new file mode 100644 index 0000000..1ff9f97 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/semLib.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_SEMLIB_H +#define _XENOMAI_VXWORKS_SEMLIB_H + +#include <vxworks/types.h> + +#define SEM_Q_FIFO 0x0 +#define SEM_Q_PRIORITY 0x1 +#define SEM_DELETE_SAFE 0x4 +#define SEM_INVERSION_SAFE 0x8 + +typedef uintptr_t SEM_ID; + +typedef enum { + SEM_EMPTY =0, + SEM_FULL +} SEM_B_STATE; + +#ifdef __cplusplus +extern "C" { +#endif + +STATUS semGive(SEM_ID sem_id); + +STATUS semTake(SEM_ID sem_id, int timeout); + +STATUS semFlush(SEM_ID sem_id); + +STATUS semDelete(SEM_ID sem_id); + +SEM_ID semBCreate(int flags, SEM_B_STATE state); + +SEM_ID semMCreate(int flags); + +SEM_ID semCCreate(int flags, int count); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_SEMLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/sysLib.h b/kernel/xenomai-v3.2.4/include/vxworks/sysLib.h new file mode 100644 index 0000000..caa7e03 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/sysLib.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_SYSLIB_H +#define _XENOMAI_VXWORKS_SYSLIB_H + +#include <vxworks/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +int sysClkRateGet(void); + +STATUS sysClkRateSet(int hz); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_SYSLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h b/kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h new file mode 100644 index 0000000..132786e --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_TASKHOOKLIB_H +#define _XENOMAI_VXWORKS_TASKHOOKLIB_H + +#include <vxworks/types.h> +#include <vxworks/taskLib.h> + +#ifdef __cplusplus +extern "C" { +#endif + +STATUS taskCreateHookAdd(FUNCPTR createHook); + +STATUS taskCreateHookDelete(FUNCPTR createHook); + +STATUS taskDeleteHookAdd(FUNCPTR deleteHook); + +STATUS taskDeleteHookDelete(FUNCPTR deleteHook); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_TASKHOOKLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h b/kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h new file mode 100644 index 0000000..0416efc --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_TASKINFO_H +#define _XENOMAI_VXWORKS_TASKINFO_H + +#include <vxworks/types.h> +#include <vxworks/taskLib.h> + +typedef struct TASK_DESC { + TASK_ID td_tid; + int td_priority; + int td_status; + int td_flags; + char td_name[32]; + FUNCPTR td_entry; + int td_errorStatus; + + int td_stacksize; + char *td_pStackBase; + char *td_pStackEnd; +} TASK_DESC; + +#ifdef __cplusplus +extern "C" { +#endif + +const char *taskName(TASK_ID task_id); + +TASK_ID taskNameToId(const char *name); + +TASK_ID taskIdDefault(TASK_ID task_id); + +BOOL taskIsReady(TASK_ID task_id); + +BOOL taskIsSuspended (TASK_ID task_id); + +STATUS taskGetInfo(TASK_ID task_id, TASK_DESC *desc); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_TASKINFO_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/taskLib.h b/kernel/xenomai-v3.2.4/include/vxworks/taskLib.h new file mode 100644 index 0000000..ebbe7c6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/taskLib.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_TASKLIB_H +#define _XENOMAI_VXWORKS_TASKLIB_H + +#include <pthread.h> +#include <vxworks/types.h> + +/* Task options: none applicable - only there for code compatibility. */ +#define VX_UNBREAKABLE 0x0002 +#define VX_FP_TASK 0x0008 +#define VX_PRIVATE_ENV 0x0080 +#define VX_NO_STACK_FILL 0x0100 + +#define WIND_READY 0x0 +#define WIND_SUSPEND 0x1 +#define WIND_PEND 0x2 +#define WIND_DELAY 0x4 +#define WIND_DEAD 0x8 +#define WIND_STOP 0x10 /* Never reported. */ + +typedef uintptr_t TASK_ID; + +typedef void (*FUNCPTR)(long arg, ...); + +typedef struct WIND_TCB { + void *opaque; + int status; + int safeCnt; + int flags; + FUNCPTR entry; +} WIND_TCB; + +#ifdef __cplusplus +extern "C" { +#endif + +TASK_ID taskSpawn(const char *name, + int prio, + int flags, + int stacksize, + FUNCPTR entry, + long arg0, long arg1, long arg2, long arg3, long arg4, + long arg5, long arg6, long arg7, long arg8, long arg9); + +STATUS taskInit(WIND_TCB *pTcb, + const char *name, + int prio, + int flags, + char * stack __attribute__ ((unused)), + int stacksize, + FUNCPTR entry, + long arg0, long arg1, long arg2, long arg3, long arg4, + long arg5, long arg6, long arg7, long arg8, long arg9); + +STATUS taskActivate(TASK_ID tid); + +STATUS taskDelete(TASK_ID tid); + +STATUS taskDeleteForce(TASK_ID tid); + +STATUS taskSuspend(TASK_ID tid); + +STATUS taskResume(TASK_ID tid); + +STATUS taskPrioritySet(TASK_ID tid, + int prio); + +STATUS taskPriorityGet(TASK_ID tid, + int *pprio); + +void taskExit(int code); + +STATUS taskLock(void); + +STATUS taskUnlock(void); + +TASK_ID taskIdSelf(void); + +STATUS taskSafe(void); + +STATUS taskUnsafe(void); + +STATUS taskDelay(int ticks); + +STATUS taskIdVerify(TASK_ID tid); + +struct WIND_TCB *taskTcb(TASK_ID tid); + +int wind_task_normalize_priority(int wind_prio); + +int wind_task_denormalize_priority(int core_prio); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_TASKLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/tickLib.h b/kernel/xenomai-v3.2.4/include/vxworks/tickLib.h new file mode 100644 index 0000000..52c96f0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/tickLib.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_TICKLIB_H +#define _XENOMAI_VXWORKS_TICKLIB_H + +#include <vxworks/types.h> + +#ifdef __cplusplus +extern "C" { +#endif + +ULONG tickGet(void); + +void tickSet(ULONG ticks); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_TICKLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/types.h b/kernel/xenomai-v3.2.4/include/vxworks/types.h new file mode 100644 index 0000000..0790831 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/types.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_TYPES_H +#define _XENOMAI_VXWORKS_TYPES_H + +#include <stdint.h> + +#undef STATUS +typedef int STATUS; +typedef int BOOL; + +typedef unsigned int UINT; + +typedef unsigned long ULONG; + +#define NO_WAIT 0 +#define WAIT_FOREVER (-1) + +#endif /* !_XENOMAI_VXWORKS_TYPES_H */ diff --git a/kernel/xenomai-v3.2.4/include/vxworks/wdLib.h b/kernel/xenomai-v3.2.4/include/vxworks/wdLib.h new file mode 100644 index 0000000..bbe76aa --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/vxworks/wdLib.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * This file satisfies the references within the emulator code + * mimicking a VxWorks-like API built upon the copperplate library. + * + * VxWorks is a registered trademark of Wind River Systems, Inc. + */ + +#ifndef _XENOMAI_VXWORKS_WDLIB_H +#define _XENOMAI_VXWORKS_WDLIB_H + +#include <vxworks/types.h> + +typedef uintptr_t WDOG_ID; + +#ifdef __cplusplus +extern "C" { +#endif + +WDOG_ID wdCreate(void); + +STATUS wdDelete(WDOG_ID wdog_id); + +STATUS wdStart(WDOG_ID wdog_id, + int delay, + void (*handler)(long), + long arg); + +STATUS wdCancel(WDOG_ID wdog_id); + +#ifdef __cplusplus +} +#endif + +#endif /* !_XENOMAI_VXWORKS_WDLIB_H */ diff --git a/kernel/xenomai-v3.2.4/include/xenomai/Makefile.am b/kernel/xenomai-v3.2.4/include/xenomai/Makefile.am new file mode 100644 index 0000000..f4d0c16 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/xenomai/Makefile.am @@ -0,0 +1,6 @@ +includesubdir = $(includedir)/xenomai + +includesub_HEADERS = \ + init.h \ + tunables.h \ + version.h diff --git a/kernel/xenomai-v3.2.4/include/xenomai/init.h b/kernel/xenomai-v3.2.4/include/xenomai/init.h new file mode 100644 index 0000000..598bf53 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/xenomai/init.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_INIT_H +#define _XENOMAI_INIT_H + +#include <boilerplate/setup.h> +#include <boilerplate/ancillaries.h> + +#ifdef __cplusplus +extern "C" { +#endif + +void xenomai_init(int *argcp, char *const **argvp); + +void xenomai_init_dso(int *argcp, char *const **argvp); + +int xenomai_main(int argc, char *const argv[]); + +void xenomai_usage(void); + +void application_usage(void); + +void application_version(void); + +extern const char *xenomai_version_string; + +extern const int xenomai_auto_bootstrap; + +#ifdef __cplusplus +} +#endif + +#endif /* _XENOMAI_INIT_H */ diff --git a/kernel/xenomai-v3.2.4/include/xenomai/tunables.h b/kernel/xenomai-v3.2.4/include/xenomai/tunables.h new file mode 100644 index 0000000..9eebc22 --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/xenomai/tunables.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_TUNABLES_H +#define _XENOMAI_TUNABLES_H + +#include <boilerplate/tunables.h> +#include <copperplate/tunables.h> + +#endif /* !_XENOMAI_TUNABLES_H */ diff --git a/kernel/xenomai-v3.2.4/include/xenomai/version.h b/kernel/xenomai-v3.2.4/include/xenomai/version.h new file mode 100644 index 0000000..bf603be --- /dev/null +++ b/kernel/xenomai-v3.2.4/include/xenomai/version.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_VERSION_H +#define _XENOMAI_VERSION_H + +#ifndef __KERNEL__ +#include <xeno_config.h> +#include <boilerplate/compiler.h> +#endif + +#define XENO_VERSION(maj, min, rev) (((maj)<<16)|((min)<<8)|(rev)) + +#define XENO_VERSION_CODE XENO_VERSION(CONFIG_XENO_VERSION_MAJOR, \ + CONFIG_XENO_VERSION_MINOR, \ + CONFIG_XENO_REVISION_LEVEL) + +#define XENO_VERSION_STRING CONFIG_XENO_VERSION_STRING + +#endif /* _XENOMAI_VERSION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/COPYING b/kernel/xenomai-v3.2.4/kernel/cobalt/COPYING new file mode 100644 index 0000000..0d72637 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/COPYING @@ -0,0 +1,281 @@ + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig new file mode 100644 index 0000000..3233de1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig @@ -0,0 +1,489 @@ +menu "Core features" + +config XENO_OPT_SCHED_CLASSES + bool "Extra scheduling classes" + default n + help + The Cobalt kernel implements a set of scheduling classes. + Each scheduling class defines its own set of rules for + determining when and how to select a new thread to run. + + Cobalt has a built-in real-time class, which supports both + preemptive fixed-priority FIFO, and round-robin scheduling. + + Enabling CONFIG_XENO_OPT_SCHED_CLASSES allows you to select + additional scheduling classes to enable in the Cobalt kernel. + + If in doubt, say N. + +config XENO_OPT_SCHED_WEAK + bool "Weak scheduling class" + default n + depends on XENO_OPT_SCHED_CLASSES + help + This option creates a Cobalt scheduling class for mapping + members of the regular POSIX SCHED_FIFO/RR policies to a low + priority class of the Cobalt kernel, providing no real-time + guarantee. Therefore, up to a hundred non real-time priority + levels are available from the SCHED_WEAK policy. + + When CONFIG_XENO_OPT_SCHED_WEAK is disabled, Cobalt still + supports a single non real-time priority level (i.e. zero + priority), assigned to members of the SCHED_OTHER class. + + SCHED_WEAK/SCHED_OTHER threads can access Cobalt resources, + wait on Cobalt synchronization objects, but cannot compete for + the CPU with members of the real-time Cobalt classes. + + Since Cobalt assumes no real-time requirement for + SCHED_WEAK/SCHED_OTHER threads, they are automatically moved + back to secondary mode upon return from any Cobalt syscall if + necessary, unless they hold a Cobalt mutex, which would defer + the transition until such mutex is released. + + If in doubt, say N. + +config XENO_OPT_SCHED_TP + bool "Temporal partitioning" + default n + depends on XENO_OPT_SCHED_CLASSES + help + This option enables support for temporal partitioning. + + If in doubt, say N. + +config XENO_OPT_SCHED_TP_NRPART + int "Number of partitions" + default 4 + range 1 1024 + depends on XENO_OPT_SCHED_TP + help + Define here the maximum number of temporal partitions the TP + scheduler may have to handle. + +config XENO_OPT_SCHED_SPORADIC + bool "Sporadic scheduling" + default n + depends on XENO_OPT_SCHED_CLASSES + help + This option enables support for the sporadic scheduling policy + in the Cobalt kernel (SCHED_SPORADIC), also known as POSIX + sporadic server. + + It can be used to enforce a capped limit on the execution time + of a thread within a given period of time. + + If in doubt, say N. + +config XENO_OPT_SCHED_SPORADIC_MAXREPL + int "Maximum number of pending replenishments" + default 8 + range 4 16 + depends on XENO_OPT_SCHED_SPORADIC + help + For performance reason, the budget replenishment information + is statically stored on a per-thread basis. This parameter + defines the maximum number of replenishment requests that can + be pending concurrently for any given thread that undergoes + sporadic scheduling (system minimum is 4). + +config XENO_OPT_SCHED_QUOTA + bool "Thread groups with runtime quota" + default n + depends on XENO_OPT_SCHED_CLASSES + help + This option enables the SCHED_QUOTA scheduling policy in the + Cobalt kernel. + + This policy enforces a limitation on the CPU consumption of + threads over a globally defined period, known as the quota + interval. This is done by pooling threads with common + requirements in groups, and giving each group a share of the + global period (see CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD). + + When threads have entirely consumed the quota allotted to the + group they belong to, the latter is suspended as a whole, + until the next quota interval starts. At this point, a new + runtime budget is given to each group, in accordance with its + share. + + If in doubt, say N. + +config XENO_OPT_SCHED_QUOTA_PERIOD + int "Quota interval (us)" + default 10000 + range 100 1000000000 + depends on XENO_OPT_SCHED_QUOTA + help + The global period thread groups can get a share of. + +config XENO_OPT_SCHED_QUOTA_NR_GROUPS + int "Number of thread groups" + default 32 + range 1 1024 + depends on XENO_OPT_SCHED_QUOTA + help + The overall number of thread groups which may be defined + across all CPUs. + +config XENO_OPT_STATS + bool "Runtime statistics" + depends on XENO_OPT_VFILE + default y + help + This option causes the Cobalt kernel to collect various + per-thread runtime statistics, which are accessible through + the /proc/xenomai/sched/stat interface. + +config XENO_OPT_STATS_IRQS + bool "Account IRQ handlers separatly" + depends on XENO_OPT_STATS && IPIPE + default y + help + When enabled, the runtime of interrupt handlers is accounted + separately from the threads they interrupt. Also, the + occurrence of shared interrupts is accounted on a per-handler + basis. + + This option is available to legacy I-pipe builds only. + +config XENO_OPT_SHIRQ + bool "Shared interrupts" + help + Enables support for both level- and edge-triggered shared + interrupts, so that multiple real-time interrupt handlers + are allowed to control dedicated hardware devices which are + configured to share the same interrupt line. + +config XENO_OPT_RR_QUANTUM + int "Round-robin quantum (us)" + default 1000 + help + This parameter defines the duration of the default round-robin + time quantum expressed as a count of micro-seconds. This value + may be overriden internally by Cobalt services which do + provide a round-robin interval. + +config XENO_OPT_AUTOTUNE + tristate "Auto-tuning" + default y + select XENO_DRIVERS_AUTOTUNE + help + Enable auto-tuning capabilities. Auto-tuning is used for + adjusting the core timing services to the intrinsic latency of + the platform. + +config XENO_OPT_SCALABLE_SCHED + bool "O(1) scheduler" + help + This option causes a multi-level priority queue to be used in + the real-time scheduler, so that it operates in constant-time + regardless of the number of _concurrently runnable_ threads + (which might be much lower than the total number of active + threads). + + Its use is recommended for large multi-threaded systems + involving more than 10 of such threads; otherwise, the default + linear method usually performs better with lower memory + footprints. + +choice + prompt "Timer indexing method" + default XENO_OPT_TIMER_LIST if !X86_64 + default XENO_OPT_TIMER_RBTREE if X86_64 + help + This option allows to select the underlying data structure + which is going to be used for ordering the outstanding + software timers managed by the Cobalt kernel. + +config XENO_OPT_TIMER_LIST + bool "Linear" + help + Use a linked list. Albeit O(N), this simple data structure is + particularly efficient when only a few timers (< 10) may be + concurrently outstanding at any point in time. + +config XENO_OPT_TIMER_RBTREE + bool "Tree" + help + Use a red-black tree. This data structure is efficient when a + high number of software timers may be concurrently + outstanding at any point in time. + +endchoice + +config XENO_OPT_PIPE + bool + +config XENO_OPT_MAP + bool + +config XENO_OPT_EXTCLOCK + bool + +config XENO_OPT_COBALT_EXTENSION + bool + +config XENO_OPT_VFILE + bool + depends on PROC_FS + default y + +endmenu + +menu "Sizes and static limits" + +config XENO_OPT_PIPE_NRDEV + int "Number of pipe devices" + depends on XENO_OPT_PIPE + default 32 + help + Message pipes are bi-directional FIFO communication channels + allowing data exchange between Cobalt threads and regular + POSIX threads. Pipes natively preserve message boundaries, but + can also be used in byte streaming mode from kernel to + user-space. + + This option sets the maximum number of pipe devices supported + in the system. Pipe devices are named /dev/rtpN where N is a + device minor number ranging from 0 to XENO_OPT_PIPE_NRDEV - 1. + +config XENO_OPT_REGISTRY_NRSLOTS + int "Number of registry slots" + default 512 + help + The registry is used by the Cobalt kernel to export named + resources to user-space programs via the /proc interface. + Each named resource occupies a registry slot. This option sets + the maximum number of resources the registry can handle. + +config XENO_OPT_SYS_HEAPSZ + int "Size of system heap (Kb)" + default 4096 + help + The system heap is used for various internal allocations by + the Cobalt kernel. The size is expressed in Kilobytes. + +config XENO_OPT_PRIVATE_HEAPSZ + int "Size of private heap (Kb)" + default 256 + help + The Cobalt kernel implements fast IPC mechanisms within the + scope of a process which require a private kernel memory heap + to be mapped in the address space of each Xenomai application + process. This option can be used to set the size of this + per-process heap. + + 64k is considered a large enough size for common use cases. + +config XENO_OPT_SHARED_HEAPSZ + int "Size of shared heap (Kb)" + default 256 + help + The Cobalt kernel implements fast IPC mechanisms between + processes which require a shared kernel memory heap to be + mapped in the address space of all Xenomai application + processes. This option can be used to set the size of this + system-wide heap. + + 64k is considered a large enough size for common use cases. + +config XENO_OPT_NRTIMERS + int "Maximum number of POSIX timers per process" + default 256 + help + This tunable controls how many POSIX timers can exist at any + given time for each Cobalt process (a timer is created by a + call to the timer_create() service of the Cobalt/POSIX API). + +config XENO_OPT_DEBUG_TRACE_LOGSZ + int "Trace log size" + depends on XENO_OPT_DEBUG_TRACE_RELAX + default 16 + help + The size (kilobytes) of the trace log of relax requests. Once + this limit is reached, subsequent traces will be silently + discarded. + + Writing to /proc/xenomai/debug/relax empties the trace log. + +endmenu + +menu "Latency settings" + +config XENO_OPT_TIMING_SCHEDLAT + int "User scheduling latency (ns)" + default 0 + help + The user scheduling latency is the time between the + termination of an interrupt handler and the execution of the + first instruction of the real-time application thread this + handler resumes. A default value of 0 (recommended) will cause + a pre-calibrated value to be used. + + If the auto-tuner is enabled, this value will be used as the + factory default when running "autotune --reset". + +config XENO_OPT_TIMING_KSCHEDLAT + int "Intra-kernel scheduling latency (ns)" + default 0 + help + The intra-kernel scheduling latency is the time between the + termination of an interrupt handler and the execution of the + first instruction of the RTDM kernel thread this handler + resumes. A default value of 0 (recommended) will cause a + pre-calibrated value to be used. + + Intra-kernel latency is usually significantly lower than user + scheduling latency on MMU-enabled platforms, due to CPU cache + latency. + + If the auto-tuner is enabled, this value will be used as the + factory default when running "autotune --reset". + +config XENO_OPT_TIMING_IRQLAT + int "Interrupt latency (ns)" + default 0 + help + The interrupt latency is the time between the occurrence of an + IRQ and the first instruction of the interrupt handler which + will service it. A default value of 0 (recommended) will cause + a pre-calibrated value to be used. + + If the auto-tuner is enabled, this value will be used as the + factory default when running "autotune --reset". + +endmenu + +menuconfig XENO_OPT_DEBUG + depends on XENO_OPT_VFILE + bool "Debug support" + help + When enabled, various debugging features can be switched + on. They can help to find problems in applications, drivers, + and the Cobalt kernel. XENO_OPT_DEBUG by itself does not have + any impact on the generated code. + +if XENO_OPT_DEBUG + +config XENO_OPT_DEBUG_COBALT + bool "Cobalt runtime assertions" + help + This option activates various assertions inside the Cobalt + kernel. This option has limited overhead. + +config XENO_OPT_DEBUG_MEMORY + bool "Cobalt memory checks" + help + This option enables memory debug checks inside the Cobalt + kernel. This option may induce significant overhead with large + heaps. + +config XENO_OPT_DEBUG_CONTEXT + bool "Check for calling context" + help + This option enables checks for the calling context in the + Cobalt kernel, aimed at detecting when regular Linux routines + are entered from a real-time context, and conversely. + +config XENO_OPT_DEBUG_LOCKING + bool "Spinlock debugging support" + default y if SMP + help + This option activates runtime assertions, and measurements + of spinlocks spinning time and duration in the Cobalt + kernel. It helps finding latency spots due to interrupt + masked sections. Statistics about the longest masked section + can be found in /proc/xenomai/debug/lock. + + This option may induce a measurable overhead on low end + machines. + +config XENO_OPT_DEBUG_USER + bool "User consistency checks" + help + This option enables a set of consistency checks for + detecting wrong runtime behavior in user applications. + + With some of the debug categories, threads can ask for + notification when a problem is detected, by turning on the + PTHREAD_WARNSW mode bit with pthread_setmode_np(). Cobalt + sends the Linux-originated SIGDEBUG signal for notifying + threads, along with a reason code passed into the associated + siginfo data (see pthread_setmode_np()). + + Some of these runtime checks may induce overhead, enable + them for debugging purposes only. + +if XENO_OPT_DEBUG_USER + +config XENO_OPT_DEBUG_MUTEX_RELAXED + bool "Detect relaxed mutex owner" + default y + help + A thread which attempts to acquire a mutex currently owned by + another thread running in secondary/relaxed mode thread will + suffer unwanted latencies, due to a priority inversion. + debug notifications are enabled for such thread, it receives + a SIGDEBUG signal. + + This option has some overhead in real-time mode over + contented mutexes. + +config XENO_OPT_DEBUG_MUTEX_SLEEP + bool "Detect sleeping with mutex" + default y + help + A thread which goes sleeping while holding a mutex is prone + to cause unwanted latencies to other threads serialized by + the same lock. If debug notifications are enabled for such + thread, it receives a SIGDEBUG signal right before entering + sleep. + + This option has noticeable overhead in real-time mode as it + disables the normal fast mutex operations from user-space, + causing a system call for each mutex acquisition/release. + +config XENO_OPT_DEBUG_LEGACY + bool "Detect usage of legacy constructs/features" + default n + help + Turns on detection of legacy API usage. + +endif # XENO_OPT_DEBUG_USER + +config XENO_OPT_DEBUG_TRACE_RELAX + bool "Trace relax requests" + default n + help + This option enables recording of unwanted relax requests from + user-space applications leaving the real-time domain, logging + the thread information and code location involved. All records + are readable from /proc/xenomai/debug/relax, and can be + decoded using the "slackspot" utility. + +config XENO_OPT_WATCHDOG + bool "Watchdog support" + default y + help + This option activates a watchdog aimed at detecting runaway + Cobalt threads. If enabled, the watchdog triggers after a + given period of uninterrupted real-time activity has elapsed + without Linux interaction in the meantime. + + In such an event, the current thread is moved out the + real-time domain, receiving a SIGDEBUG signal from the Linux + kernel immediately after. + + The timeout value of the watchdog can be set using the + XENO_OPT_WATCHDOG_TIMEOUT parameter. + +config XENO_OPT_WATCHDOG_TIMEOUT + depends on XENO_OPT_WATCHDOG + int "Watchdog timeout" + default 4 + range 1 60 + help + Watchdog timeout value (in seconds). + +endif # XENO_OPT_DEBUG diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile new file mode 100644 index 0000000..6cd2d59 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile @@ -0,0 +1,27 @@ +obj-$(CONFIG_XENOMAI) += pipeline/ xenomai.o rtdm/ posix/ + +xenomai-y := arith.o \ + bufd.o \ + clock.o \ + heap.o \ + init.o \ + lock.o \ + registry.o \ + sched-idle.o \ + sched-rt.o \ + sched.o \ + select.o \ + synch.o \ + thread.o \ + time.o \ + timer.o \ + tree.o + +xenomai-$(CONFIG_XENO_OPT_SCHED_QUOTA) += sched-quota.o +xenomai-$(CONFIG_XENO_OPT_SCHED_WEAK) += sched-weak.o +xenomai-$(CONFIG_XENO_OPT_SCHED_SPORADIC) += sched-sporadic.o +xenomai-$(CONFIG_XENO_OPT_SCHED_TP) += sched-tp.o +xenomai-$(CONFIG_XENO_OPT_DEBUG) += debug.o +xenomai-$(CONFIG_XENO_OPT_PIPE) += pipe.o +xenomai-$(CONFIG_XENO_OPT_MAP) += map.o +xenomai-$(CONFIG_PROC_FS) += vfile.o procfs.o diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig new file mode 100644 index 0000000..b0cbdc3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig @@ -0,0 +1,16 @@ +source "kernel/xenomai/Kconfig" +source "drivers/xenomai/Kconfig" + +config XENO_ARCH_FPU + def_bool VFP + +config XENO_ARCH_SYS3264 + def_bool n + +config XENO_ARCH_OUTOFLINE_XNLOCK + bool + default y + +config XENO_ARCH_IPIPE_COMPAT + def_bool DOVETAIL + select IPIPE_COMPAT diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile new file mode 100644 index 0000000..13cbf84 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile @@ -0,0 +1,5 @@ + +obj-$(CONFIG_XENOMAI) += xenomai.o +xenomai-y := machine.o + +ccflags-y := -I$(srctree)/arch/arm/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..3cf5825 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2001-2021 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_DOVETAIL_CALIBRATION_H +#define _COBALT_ARM_DOVETAIL_CALIBRATION_H + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ + unsigned int sched_latency; + +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 + sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT; +#else + sched_latency = 5000; +#endif + p->user = xnclock_ns_to_ticks(&nkclock, sched_latency); + p->kernel = xnclock_ns_to_ticks(&nkclock, + CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#endif /* !_COBALT_ARM_DOVETAIL_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h new file mode 100644 index 0000000..9c0af20 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_DOVETAIL_FEATURES_H +#define _COBALT_ARM_DOVETAIL_FEATURES_H + +struct cobalt_featinfo; +static inline void collect_arch_features(struct cobalt_featinfo *p) { } + +#include <asm/xenomai/uapi/features.h> + +#endif /* !_COBALT_ARM_DOVETAIL_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..4cc0752 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_DOVETAIL_FPTEST_H +#define _COBALT_ARM_DOVETAIL_FPTEST_H + +#include <linux/errno.h> +#include <asm/hwcap.h> + +#ifdef CONFIG_VFP +#define have_vfp (elf_hwcap & HWCAP_VFP) +#else /* !CONFIG_VFP */ +#define have_vfp 0 +#endif /* !CONFIG_VFP */ + +#include <asm/xenomai/uapi/fptest.h> + +static inline int fp_kernel_supported(void) +{ + return 0; +} + +static inline void fp_init(void) +{ +} + +static inline int fp_linux_begin(void) +{ + return -ENOSYS; +} + +static inline void fp_linux_end(void) +{ +} + +static inline int fp_detect(void) +{ + return have_vfp ? __COBALT_HAVE_VFP : 0; +} + +#endif /* _COBALT_ARM_DOVETAIL_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h new file mode 100644 index 0000000..a694a78 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h @@ -0,0 +1,72 @@ +/** + * Copyright © 2002-2004 Philippe Gerum. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_DOVETAIL_MACHINE_H +#define _COBALT_ARM_DOVETAIL_MACHINE_H + +#include <linux/version.h> +#include <asm/byteorder.h> +#include <asm/cacheflush.h> + +#define xnarch_cache_aliasing() cache_is_vivt() + +#if __LINUX_ARM_ARCH__ < 5 +static inline __attribute_const__ unsigned long ffnz(unsigned long x) +{ + int r = 0; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} +#else +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + int __r; + __asm__("clz\t%0, %1" : "=r" (__r) : "r"(ul & (-ul)) : "cc"); + return 31 - __r; +} +#endif + +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_ARM_DOVETAIL_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..d41b257 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_DOVETAIL_SYSCALL_H +#define _COBALT_ARM_DOVETAIL_SYSCALL_H + +#include <linux/errno.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +/* + * Cobalt syscall numbers can be fetched from ARM_ORIG_r0 with ARM_r7 + * containing the Xenomai syscall marker, Linux syscalls directly from + * ARM_r7. Since we have to work with Dovetail whilst remaining binary + * compatible with applications built for the I-pipe, we retain the + * old syscall signature based on receiving XENO_ARM_SYSCALL in + * ARM_r7, possibly ORed with __COBALT_SYSCALL_BIT by Dovetail + * (IPIPE_COMPAT mode). + * + * FIXME: We also have __COBALT_SYSCALL_BIT (equal to + * __OOB_SYSCALL_BIT) present in the actual syscall number in r0, + * which is pretty much useless. Oh, well... When support for the + * I-pipe is dropped, we may switch back to the regular convention + * Dovetail abides by, with the actual syscall number into r7 ORed + * with __OOB_SYSCALL_BIT, freeing r0 for passing a call argument. + */ +#define __xn_reg_sys(__regs) ((__regs)->ARM_ORIG_r0) +#define __xn_syscall_p(__regs) (((__regs)->ARM_r7 & ~__COBALT_SYSCALL_BIT) == XENO_ARM_SYSCALL) +#define __xn_syscall(__regs) (__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT) + +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = (__regs)->ARM_r7; \ + *(__code) < NR_syscalls || *(__code) >= __ARM_NR_BASE; \ + }) + +#define __xn_reg_rval(__regs) ((__regs)->ARM_r0) +#define __xn_reg_pc(__regs) ((__regs)->ARM_ip) +#define __xn_reg_sp(__regs) ((__regs)->ARM_sp) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +static inline +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + /* We need none of these with Dovetail. */ + return -ENOSYS; +} + +#define pipeline_get_syscall_args pipeline_get_syscall_args +static inline void pipeline_get_syscall_args(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->ARM_r1; + args[1] = regs->ARM_r2; + args[2] = regs->ARM_r3; + args[3] = regs->ARM_r4; + args[4] = regs->ARM_r5; +} + +#endif /* !_COBALT_ARM_DOVETAIL_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h new file mode 100644 index 0000000..95c5a11 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_SYSCALL32_H +#define _COBALT_ARM_ASM_SYSCALL32_H + +#include <asm-generic/xenomai/syscall32.h> + +#endif /* !_COBALT_ARM_ASM_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h new file mode 100644 index 0000000..792a3d2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_DOVETAIL_THREAD_H +#define _COBALT_ARM_DOVETAIL_THREAD_H + +#include <asm-generic/xenomai/dovetail/thread.h> +#include <asm/traps.h> + +#define xnarch_fault_pc(__regs) ((__regs)->ARM_pc - (thumb_mode(__regs) ? 2 : 4)) +#define xnarch_fault_pf_p(__nr) ((__nr) == ARM_TRAP_ACCESS) +#define xnarch_fault_bp_p(__nr) ((current->ptrace & PT_PTRACED) && \ + ((__nr) == ARM_TRAP_BREAK || \ + (__nr) == ARM_TRAP_UNDEFINSTR)) +#define xnarch_fault_notify(__nr) (!xnarch_fault_bp_p(__nr)) + +#endif /* !_COBALT_ARM_DOVETAIL_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h new file mode 100644 index 0000000..fe59896 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_WRAPPERS_H +#define _COBALT_ARM_ASM_WRAPPERS_H + +#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */ + +#define __put_user_inatomic __put_user +#define __get_user_inatomic __get_user + +#endif /* _COBALT_ARM_ASM_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c new file mode 100644 index 0000000..bc32f17 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org> + */ + +#include <linux/mm.h> +#include <asm/xenomai/machine.h> + +static void mach_arm_prefault(struct vm_area_struct *vma) +{ + unsigned long addr; + unsigned int flags; + + if ((vma->vm_flags & VM_MAYREAD)) { + flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0; + for (addr = vma->vm_start; + addr != vma->vm_end; addr += PAGE_SIZE) + handle_mm_fault(vma, addr, flags, NULL); + } +} + +static const char *const fault_labels[] = { + [ARM_TRAP_ACCESS] = "Data or instruction access", + [ARM_TRAP_SECTION] = "Section fault", + [ARM_TRAP_DABT] = "Generic data abort", + [ARM_TRAP_PABT] = "Prefetch abort", + [ARM_TRAP_BREAK] = "Instruction breakpoint", + [ARM_TRAP_FPU] = "Floating point exception", + [ARM_TRAP_VFP] = "VFP Floating point exception", + [ARM_TRAP_UNDEFINSTR] = "Undefined instruction", + [ARM_TRAP_ALIGNMENT] = "Unaligned access exception", + [31] = NULL +}; + +struct cobalt_machine cobalt_machine = { + .name = "arm", + .init = NULL, + .late_init = NULL, + .cleanup = NULL, + .prefault = mach_arm_prefault, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h new file mode 100644 index 0000000..cb7fb4a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM_ASM_UAPI_ARITH_H +#define _COBALT_ARM_ASM_UAPI_ARITH_H + +#include <asm/xenomai/uapi/features.h> + +#if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE)) +static inline __attribute__((__const__)) unsigned long long +mach_arm_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + const unsigned rhs_integ); + +#define xnarch_nodiv_ullimd(op, frac, integ) \ + mach_arm_nodiv_ullimd((op), (frac), (integ)) + +static inline __attribute__((__const__)) long long +mach_arm_nodiv_llimd(const long long op, + const unsigned long long frac, + const unsigned rhs_integ); + +#define xnarch_nodiv_llimd(op, frac, integ) \ + mach_arm_nodiv_llimd((op), (frac), (integ)) +#else /* arm <= v3 */ +#define xnarch_add96and64(l0, l1, l2, s0, s1) \ + do { \ + __asm__ ("adds %2, %2, %4\n\t" \ + "adcs %1, %1, %3\n\t" \ + "adc %0, %0, #0\n\t" \ + : "+r"(l0), "+r"(l1), "+r"(l2) \ + : "r"(s0), "r"(s1): "cc"); \ + } while (0) +#endif /* arm <= v3 */ + +#include <cobalt/uapi/asm-generic/arith.h> + +#if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE)) +#define mach_arm_nodiv_ullimd_str \ + "umull %[tl], %[rl], %[opl], %[fracl]\n\t" \ + "umull %[rm], %[rh], %[oph], %[frach]\n\t" \ + "adds %[rl], %[rl], %[tl], lsr #31\n\t" \ + "adcs %[rm], %[rm], #0\n\t" \ + "adc %[rh], %[rh], #0\n\t" \ + "umull %[tl], %[th], %[oph], %[fracl]\n\t" \ + "adds %[rl], %[rl], %[tl]\n\t" \ + "adcs %[rm], %[rm], %[th]\n\t" \ + "adc %[rh], %[rh], #0\n\t" \ + "umull %[tl], %[th], %[opl], %[frach]\n\t" \ + "adds %[rl], %[rl], %[tl]\n\t" \ + "adcs %[rm], %[rm], %[th]\n\t" \ + "adc %[rh], %[rh], #0\n\t" \ + "umlal %[rm], %[rh], %[opl], %[integ]\n\t" \ + "mla %[rh], %[oph], %[integ], %[rh]\n\t" + +static inline __attribute__((__const__)) unsigned long long +mach_arm_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + const unsigned rhs_integ) +{ + register unsigned rl __asm__("r5"); + register unsigned rm __asm__("r0"); + register unsigned rh __asm__("r1"); + register unsigned fracl __asm__ ("r2"); + register unsigned frach __asm__ ("r3"); + register unsigned integ __asm__("r4") = rhs_integ; + register unsigned opl __asm__ ("r6"); + register unsigned oph __asm__ ("r8"); + register unsigned tl __asm__("r9"); + register unsigned th __asm__("r10"); + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(frac, frach, fracl); + + __asm__ (mach_arm_nodiv_ullimd_str + : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh), + [tl]"=r"(tl), [th]"=r"(th) + : [opl]"r"(opl), [oph]"r"(oph), + [fracl]"r"(fracl), [frach]"r"(frach), + [integ]"r"(integ) + : "cc"); + + return xnarch_u64fromu32(rh, rm); +} + +static inline __attribute__((__const__)) long long +mach_arm_nodiv_llimd(const long long op, + const unsigned long long frac, + const unsigned rhs_integ) +{ + register unsigned rl __asm__("r5"); + register unsigned rm __asm__("r0"); + register unsigned rh __asm__("r1"); + register unsigned fracl __asm__ ("r2"); + register unsigned frach __asm__ ("r3"); + register unsigned integ __asm__("r4") = rhs_integ; + register unsigned opl __asm__ ("r6"); + register unsigned oph __asm__ ("r8"); + register unsigned tl __asm__("r9"); + register unsigned th __asm__("r10"); + register unsigned s __asm__("r12"); + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(frac, frach, fracl); + + __asm__ ("movs %[s], %[oph], lsr #30\n\t" + "beq 1f\n\t" + "rsbs %[opl], %[opl], #0\n\t" + "sbc %[oph], %[oph], %[oph], lsl #1\n" + "1:\t" + mach_arm_nodiv_ullimd_str + "teq %[s], #0\n\t" + "beq 2f\n\t" + "rsbs %[rm], %[rm], #0\n\t" + "sbc %[rh], %[rh], %[rh], lsl #1\n" + "2:\t" + : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh), + [tl]"=r"(tl), [th]"=r"(th), [s]"=r"(s) + : [opl]"r"(opl), [oph]"r"(oph), + [fracl]"r"(fracl), [frach]"r"(frach), + [integ]"r"(integ) + : "cc"); + + return xnarch_u64fromu32(rh, rm); +} +#endif /* arm >= v4 */ + +#endif /* _COBALT_ARM_ASM_UAPI_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h new file mode 100644 index 0000000..43b7afb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM_ASM_UAPI_FEATURES_H +#define _COBALT_ARM_ASM_UAPI_FEATURES_H + +/* The ABI revision level we use on this arch. */ +#define XENOMAI_ABI_REV 18UL + +#define XENOMAI_FEAT_DEP (__xn_feat_generic_mask) + +#define XENOMAI_FEAT_MAN (__xn_feat_generic_man_mask) + +#define XNARCH_HAVE_LLMULSHFT 1 +#define XNARCH_HAVE_NODIV_LLIMD 1 + +struct cobalt_featinfo_archdep { /* no arch-specific feature */ }; + +#include <cobalt/uapi/asm-generic/features.h> + +static inline const char *get_feature_label(unsigned int feature) +{ + return get_generic_feature_label(feature); +} + +#endif /* !_COBALT_ARM_ASM_UAPI_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h new file mode 100644 index 0000000..d5c2c75 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM_ASM_UAPI_FPTEST_H +#define _COBALT_ARM_ASM_UAPI_FPTEST_H + +#define __COBALT_HAVE_VFP 0x1 + +static inline void fp_regs_set(int features, unsigned int val) +{ + unsigned long long e[16]; + unsigned int i; + + if (features & __COBALT_HAVE_VFP) { + for (i = 0; i < 16; i++) + e[i] = val; + + /* vldm %0!, {d0-d15}, + AKA fldmiax %0!, {d0-d15} */ + __asm__ __volatile__("ldc p11, cr0, [%0],#32*4": + "=r"(i) + : "0"(&e[0]) + : "d0", "d1", "d2", "d3", "d4", "d5", + "d6", "d7", "d8", "d9", "d10", "d11", + "d12", "d13", "d14", "d15", + "memory"); + } +} + +static inline unsigned int fp_regs_check(int features, unsigned int val, + int (*report)(const char *fmt, ...)) +{ + unsigned int result = val, i; + unsigned long long e[16]; + + if (features & __COBALT_HAVE_VFP) { + /* vstm %0!, {d0-d15}, + AKA fstmiax %0!, {d0-d15} */ + __asm__ __volatile__("stc p11, cr0, [%0],#32*4": + "=r"(i): "0"(&e[0]): "memory"); + + for (i = 0; i < 16; i++) + if (e[i] != val) { + report("d%d: %llu != %u\n", i, e[i], val); + result = e[i]; + } + } + + return result; +} + +#endif /* !_COBALT_ARM_ASM_UAPI_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h new file mode 100644 index 0000000..c079a35 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM_ASM_UAPI_SYSCALL_H +#define _COBALT_ARM_ASM_UAPI_SYSCALL_H + +#define __xn_syscode(__nr) (__COBALT_SYSCALL_BIT | (__nr)) + +#define XENO_ARM_SYSCALL 0x000F0042 /* carefully chosen... */ + +#define XENOMAI_SYSARCH_TSCINFO 4 + +#endif /* !_COBALT_ARM_ASM_UAPI_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h new file mode 100644 index 0000000..b17cfb2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM_ASM_UAPI_TSC_H +#define _COBALT_ARM_ASM_UAPI_TSC_H + +struct __xn_tscinfo { + volatile unsigned int *counter; +}; + +#endif /* !_COBALT_ARM_ASM_UAPI_TSC_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile new file mode 100644 index 0000000..c482fb3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-$(CONFIG_IPIPE) := machine.o thread.o switch.o syscall.o + +ccflags-y := -I$(srctree)/arch/arm/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README new file mode 100644 index 0000000..80f954a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README @@ -0,0 +1,3 @@ +Get the interrupt pipeline code for the target kernel from +http://xenomai.org/downloads/ipipe/, or +git://git.xenomai.org/ipipe.git diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..d227cae --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_CALIBRATION_H +#define _COBALT_ARM_ASM_CALIBRATION_H + +unsigned int omap_rev(void); +#define cpu_is_omap44xx() ((omap_rev() & 0xff) == 0x44) + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ + unsigned int ulat; +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 + ulat = CONFIG_XENO_OPT_TIMING_SCHEDLAT; +#elif defined(CONFIG_ARCH_AT91RM9200) + ulat = 8500; +#elif defined(CONFIG_ARCH_AT91SAM9263) + ulat = 11000; +#elif defined(CONFIG_SOC_IMX6Q) + ulat = 6000; +#elif defined(CONFIG_ARCH_MX51) + ulat = 5000; +#elif defined(CONFIG_ARCH_MX53) + ulat = 5000; +#elif defined(CONFIG_ARCH_MX6) + ulat = 2000; +#elif defined(CONFIG_SOC_IMX7) + ulat = 2000; +#elif defined(CONFIG_SOC_LS1021A) + ulat = 2800; +#elif defined(CONFIG_ARCH_OMAP) + ulat = cpu_is_omap44xx() ? 2500 : 5000; +#elif defined(CONFIG_ARCH_STI) + ulat = 6000; +#elif defined(CONFIG_ARCH_SOCFPGA) + ulat = 4500; +#else + ulat = 9500; /* XXX sane? */ +#endif + p->user = xnclock_ns_to_ticks(&nkclock, ulat); + p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#endif /* !_COBALT_ARM_ASM_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h new file mode 100644 index 0000000..d485286 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_FEATURES_H +#define _COBALT_ARM_ASM_FEATURES_H + +struct cobalt_featinfo; +static inline void collect_arch_features(struct cobalt_featinfo *p) { } + +#include <asm/xenomai/uapi/features.h> + +#endif /* !_COBALT_ARM_ASM_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..d3f335f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_FPTEST_H +#define _COBALT_ARM_ASM_FPTEST_H + +#include <linux/errno.h> +#include <asm/hwcap.h> + +#ifdef CONFIG_VFP +#define have_vfp (ELF_HWCAP & HWCAP_VFP) +#else /* !CONFIG_VFP */ +#define have_vfp 0 +#endif /* !CONFIG_VFP */ + +#include <asm/xenomai/uapi/fptest.h> + +static inline int fp_kernel_supported(void) +{ + return 1; +} + +static inline void fp_init(void) +{ +} + +static inline int fp_linux_begin(void) +{ + return -ENOSYS; +} + +static inline void fp_linux_end(void) +{ +} + +static inline int fp_detect(void) +{ + return have_vfp ? __COBALT_HAVE_VFP : 0; +} + +#endif /* _COBALT_ARM_ASM_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h new file mode 100644 index 0000000..d6e965f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h @@ -0,0 +1,85 @@ +/** + * Copyright © 2002-2004 Philippe Gerum. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_MACHINE_H +#define _COBALT_ARM_ASM_MACHINE_H + +#include <linux/version.h> +#include <asm/byteorder.h> + +#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq + +#include <asm/barrier.h> +#include <asm/compiler.h> +#include <asm/cmpxchg.h> +#include <asm/switch_to.h> +#include <asm/system_info.h> +#include <asm/system_misc.h> +#include <asm/timex.h> +#include <asm/processor.h> +#include <asm/ipipe.h> +#include <asm/mach/irq.h> +#include <asm/cacheflush.h> + +#define xnarch_cache_aliasing() cache_is_vivt() + +#if __LINUX_ARM_ARCH__ < 5 +static inline __attribute_const__ unsigned long ffnz(unsigned long x) +{ + int r = 0; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} +#else +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + int __r; + __asm__("clz\t%0, %1" : "=r" (__r) : "r"(ul & (-ul)) : "cc"); + return 31 - __r; +} +#endif + +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_ARM_ASM_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..362f0eb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_SYSCALL_H +#define _COBALT_ARM_ASM_SYSCALL_H + +#include <linux/errno.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +#ifndef __ARM_NR_ipipe +/* Legacy pipelines do not define this. */ +#define __ARM_NR_ipipe (__NR_SYSCALL_BASE + XENO_ARM_SYSCALL) +#endif + +/* + * Cobalt syscall numbers can be fetched from ARM_ORIG_r0 with ARM_r7 + * containing the Xenomai syscall marker, Linux syscalls directly from + * ARM_r7 (may require the OABI tweak). + */ +#define __xn_reg_sys(__regs) ((__regs)->ARM_ORIG_r0) +/* In OABI_COMPAT mode, handle both OABI and EABI userspace syscalls */ +#ifdef CONFIG_OABI_COMPAT +#define __xn_syscall_p(__regs) (((__regs)->ARM_r7 == __NR_OABI_SYSCALL_BASE + XENO_ARM_SYSCALL) || \ + ((__regs)->ARM_r7 == __ARM_NR_ipipe)) +#define __xn_abi_decode(__regs) ((__regs)->ARM_r7 - __NR_OABI_SYSCALL_BASE) +#else /* !CONFIG_OABI_COMPAT */ +#define __xn_syscall_p(__regs) ((__regs)->ARM_r7 == __ARM_NR_ipipe) +#define __xn_abi_decode(__regs) ((__regs)->ARM_r7) +#endif /* !CONFIG_OABI_COMPAT */ +#define __xn_syscall(__regs) (__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT) + +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = __xn_abi_decode(__regs); \ + *(__code) < NR_syscalls || *(__code) >= __ARM_NR_BASE; \ + }) + +#define __xn_reg_rval(__regs) ((__regs)->ARM_r0) +#define __xn_reg_arg1(__regs) ((__regs)->ARM_r1) +#define __xn_reg_arg2(__regs) ((__regs)->ARM_r2) +#define __xn_reg_arg3(__regs) ((__regs)->ARM_r3) +#define __xn_reg_arg4(__regs) ((__regs)->ARM_r4) +#define __xn_reg_arg5(__regs) ((__regs)->ARM_r5) +#define __xn_reg_pc(__regs) ((__regs)->ARM_ip) +#define __xn_reg_sp(__regs) ((__regs)->ARM_sp) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5); + +#endif /* !_COBALT_ARM_ASM_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h new file mode 100644 index 0000000..95c5a11 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_SYSCALL32_H +#define _COBALT_ARM_ASM_SYSCALL32_H + +#include <asm-generic/xenomai/syscall32.h> + +#endif /* !_COBALT_ARM_ASM_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h new file mode 100644 index 0000000..93346fd --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_THREAD_H +#define _COBALT_ARM_ASM_THREAD_H + +#include <asm-generic/xenomai/ipipe/thread.h> + +#ifdef CONFIG_XENO_ARCH_FPU +#ifdef CONFIG_VFP +#include <asm/vfp.h> +#endif /* CONFIG_VFP */ +#endif /* !CONFIG_XENO_ARCH_FPU */ + +struct xnarchtcb { + struct xntcb core; +#ifdef CONFIG_XENO_ARCH_FPU +#ifdef CONFIG_VFP + union vfp_state *fpup; +#define xnarch_fpu_ptr(tcb) ((tcb)->fpup) +#else +#define xnarch_fpu_ptr(tcb) NULL +#endif +#endif +}; + +#define xnarch_fault_regs(d) ((d)->regs) +#define xnarch_fault_trap(d) ((d)->exception) +#define xnarch_fault_code(d) (0) +#define xnarch_fault_pc(d) ((d)->regs->ARM_pc - (thumb_mode((d)->regs) ? 2 : 4)) /* XXX ? */ + +#define xnarch_fault_pf_p(d) ((d)->exception == IPIPE_TRAP_ACCESS) +#define xnarch_fault_bp_p(d) ((current->ptrace & PT_PTRACED) && \ + ((d)->exception == IPIPE_TRAP_BREAK || \ + (d)->exception == IPIPE_TRAP_UNDEFINSTR)) + +#define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d)) + +static inline +struct task_struct *xnarch_host_task(struct xnarchtcb *tcb) +{ + return tcb->core.host_task; +} + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in); + +static inline void xnarch_enter_root(struct xnthread *root) { } + +static inline int xnarch_escalate(void) +{ + if (ipipe_root_p) { + ipipe_raise_irq(cobalt_pipeline.escalate_virq); + return 1; + } + + return 0; +} + +#if defined(CONFIG_XENO_ARCH_FPU) && defined(CONFIG_VFP) + +void xnarch_init_root_tcb(struct xnthread *thread); + +void xnarch_init_shadow_tcb(struct xnthread *thread); + +int xnarch_fault_fpu_p(struct ipipe_trap_data *d); + +void xnarch_leave_root(struct xnthread *root); + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread); + +int xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d); + +#else /* !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */ + +static inline void xnarch_init_root_tcb(struct xnthread *thread) { } +static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { } + +/* + * Userland may raise FPU faults with FPU-enabled kernels, regardless + * of whether real-time threads actually use FPU, so we simply ignore + * these faults. + */ +static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d) +{ + return 0; +} + +static inline void xnarch_leave_root(struct xnthread *root) { } + +static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { } + +static inline int xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d) +{ + return 0; +} +#endif /* !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */ + +static inline void xnarch_enable_kfpu(void) { } + +static inline void xnarch_disable_kfpu(void) { } + +#endif /* !_COBALT_ARM_ASM_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h new file mode 100644 index 0000000..fe59896 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM_ASM_WRAPPERS_H +#define _COBALT_ARM_ASM_WRAPPERS_H + +#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */ + +#define __put_user_inatomic __put_user +#define __get_user_inatomic __get_user + +#endif /* _COBALT_ARM_ASM_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c new file mode 100644 index 0000000..0fd48ca --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c @@ -0,0 +1,63 @@ +/** + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/mm.h> +#include <asm/xenomai/machine.h> + +static void mach_arm_prefault(struct vm_area_struct *vma) +{ + unsigned long addr; + unsigned int flags; + + if ((vma->vm_flags & VM_MAYREAD)) { + flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0; + for (addr = vma->vm_start; + addr != vma->vm_end; addr += PAGE_SIZE) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + handle_mm_fault(vma->vm_mm, vma, addr, flags); +#else + handle_mm_fault(vma, addr, flags); +#endif + } +} + +static const char *const fault_labels[] = { + [IPIPE_TRAP_ACCESS] = "Data or instruction access", + [IPIPE_TRAP_SECTION] = "Section fault", + [IPIPE_TRAP_DABT] = "Generic data abort", + [IPIPE_TRAP_UNKNOWN] = "Unknown exception", + [IPIPE_TRAP_BREAK] = "Instruction breakpoint", + [IPIPE_TRAP_FPU] = "Floating point exception", + [IPIPE_TRAP_VFP] = "VFP Floating point exception", + [IPIPE_TRAP_UNDEFINSTR] = "Undefined instruction", +#ifdef IPIPE_TRAP_ALIGNMENT + [IPIPE_TRAP_ALIGNMENT] = "Unaligned access exception", +#endif /* IPIPE_TRAP_ALIGNMENT */ + [IPIPE_NR_FAULTS] = NULL +}; + +struct cobalt_machine cobalt_machine = { + .name = "arm", + .init = NULL, + .late_init = NULL, + .cleanup = NULL, + .prefault = mach_arm_prefault, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S new file mode 100644 index 0000000..9fc847a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S @@ -0,0 +1,219 @@ +/* + * Copyright (C) 2005 Stelian Pop. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/linkage.h> +#include <linux/version.h> +#include <asm/assembler.h> +#include <asm/asm-offsets.h> +#include <asm/tls.h> +#ifdef CONFIG_VFP +#include <asm/vfpmacros.h> +#endif + + .macro fpu_switch tmp +#ifdef CONFIG_VFP +#if __LINUX_ARM_ARCH__ <= 6 +#ifdef CONFIG_JUMP_LABEL +9998: nop + .pushsection __jump_table, "aw" + .word 9998b, 9999f, __xeno_vfp_key + .popsection +#else + ldr \tmp, =elf_hwcap + ldr \tmp, [\tmp] + tst \tmp, #HWCAP_VFP + beq 9999f +#endif +#endif + @ Always disable VFP so we can lazily save/restore the old + @ state. This occurs in the context of the previous thread. + VFPFMRX \tmp, FPEXC + bic \tmp, \tmp, #FPEXC_EN + VFPFMXR FPEXC, \tmp +#if __LINUX_ARM_ARCH__ <= 6 +9999: +#endif +#endif + .endm + + .text + +#if defined(CONFIG_VFP) && defined(CONFIG_XENO_ARCH_FPU) +/* Copied from vfp_save_state in arch/arm/vfp/vfphw.S + * r0 = pointer to union vfp_state, r1 = fpexc + */ +ENTRY(__asm_vfp_save) + VFPFSTMIA r0, r2 @ save the working registers + VFPFMRX r2, FPSCR @ current status + tst r1, #FPEXC_EX @ is there additional state to save? + beq 1f + VFPFMRX r3, FPINST @ FPINST (only if FPEXC.EX is set) + tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? + beq 1f + VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) +1: + stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 + mov pc, lr +ENDPROC(__asm_vfp_save) + +/* Copied from no_old_VFP_process in arch/arm/vfp/vfphw.S + * r0 = pointer to union vfp_state + * r1 = current cpu + */ +ENTRY(__asm_vfp_load) +#ifdef CONFIG_SMP + str r1, [r0, #VFP_CPU] +#endif + VFPFLDMIA r0, r2 @ reload the working registers while + @ FPEXC is in a safe state + ldmia r0, {r1, r2, r3, r12} @ load FPEXC, FPSCR, FPINST, FPINST2 + tst r1, #FPEXC_EX @ is there additional state to restore? + beq 1f + VFPFMXR FPINST, r3 @ restore FPINST (only if FPEXC.EX is set) + tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? + beq 1f + VFPFMXR FPINST2, r12 @ FPINST2 if needed (and present) +1: + VFPFMXR FPSCR, r2 @ restore status + mov pc, lr +ENDPROC(__asm_vfp_load) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) + .macro load_tls base, tp, tpuser + ldr \tp, [\base, #TI_TP_VALUE] + .endm + + .macro switch_tls base, tp, tpuser, tmp1, tmp2 + set_tls \tp, \tmp1, \tmp2 + .endm +#else + .macro load_tls base, tp, tpuser + ldr \tp, [\base, #TI_TP_VALUE] + ldr \tpuser, [\base, #TI_TP_VALUE + 4] + .endm +#endif + +/* + * Switch context routine. + * + * Registers according to the ARM procedure call standard: + * Reg Description + * r0-r3 argument/scratch registers + * r4-r9 variable register + * r10=sl stack limit/variable register + * r11=fp frame pointer/variable register + * r12=ip intra-procedure-call scratch register + * r13=sp stack pointer (auto preserved) + * r14=lr link register + * r15=pc program counter (auto preserved) + * + * Copied from __switch_to, arch/arm/kernel/entry-armv.S. + * Right now it is identical, but who knows what the + * future reserves us... + * + * XXX: All the following config options are NOT tested: + * CONFIG_IWMMXT + * + * Calling args: + * r0 = previous thread_info, r1 = next thread_info + * + * FIXME: this is ugly, at some point we should stop duplicating + * what __switch_to() does, dropping specific fpu management from + * Cobalt in the same move; this will have to wait until Dovetail + * is substituted to the I-pipe though, since the former already + * provides the support we need for this. --rpm + */ +ENTRY(__asm_thread_switch) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) + add ip, r0, #TI_CPU_SAVE + ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack + THUMB( str sp, [ip], #4 ) + THUMB( str lr, [ip], #4 ) + load_tls r1, r4, r5 +#ifdef CONFIG_CPU_USE_DOMAINS + ldr r6, [r1, #TI_CPU_DOMAIN] +#endif + switch_tls r0, r4, r5, r3, r7 +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + ldr r7, [r1, #TI_TASK] + ldr r8, =__stack_chk_guard + ldr r7, [r7, #TSK_STACK_CANARY] +#endif +#ifdef CONFIG_CPU_USE_DOMAINS + mcr p15, 0, r6, c3, c0, 0 @ Set domain register +#endif + fpu_switch r4 +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + str r7, [r8] +#endif + ARM( add r4, r1, #TI_CPU_SAVE ) + ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously + THUMB( add ip, r1, #TI_CPU_SAVE ) + THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously + THUMB( ldr sp, [ip], #4 ) + THUMB( ldr pc, [ip] ) +ENDPROC(__asm_thread_switch) + +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) */ + +#include <asm/unwind.h> + + UNWIND(.fnstart ) + UNWIND(.cantunwind ) + add ip, r0, #TI_CPU_SAVE + ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack + THUMB( str sp, [ip], #4 ) + THUMB( str lr, [ip], #4 ) + ldr r4, [r1, #TI_TP_VALUE] + ldr r5, [r1, #TI_TP_VALUE + 4] +#ifdef CONFIG_CPU_USE_DOMAINS + mrc p15, 0, r6, c3, c0, 0 @ Get domain register + str r6, [r0, #TI_CPU_DOMAIN] @ Save old domain register + ldr r6, [r1, #TI_CPU_DOMAIN] +#endif + switch_tls r0, r4, r5, r3, r7 +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) + ldr r7, [r1, #TI_TASK] + ldr r8, =__stack_chk_guard + .if (TSK_STACK_CANARY > IMM12_MASK) + add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK + .endif + ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK] +#endif +#ifdef CONFIG_CPU_USE_DOMAINS + mcr p15, 0, r6, c3, c0, 0 @ Set domain register +#endif + mov r5, r0 + fpu_switch r4 + add r4, r1, #TI_CPU_SAVE +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) + str r7, [r8] +#endif + THUMB( mov ip, r4 ) + mov r0, r5 + ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously + THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously + THUMB( ldr sp, [ip], #4 ) + THUMB( ldr pc, [ip] ) + UNWIND(.fnend ) + +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c new file mode 100644 index 0000000..ee78243 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2005 Stelian Pop + * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/ipipe.h> +#include <asm/xenomai/syscall.h> +#include <asm/xenomai/uapi/tsc.h> + +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + struct ipipe_sysinfo ipipe_info; + struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc; + struct __xn_tscinfo info; + int ret; + + if (a1 != XENOMAI_SYSARCH_TSCINFO) + return -EINVAL; + + ret = ipipe_get_sysinfo(&ipipe_info); + if (ret) + return ret; + + switch (p->type) { + case IPIPE_TSC_TYPE_DECREMENTER: + info.counter = p->u.dec.counter; + break; + case IPIPE_TSC_TYPE_NONE: + return -ENOSYS; + default: + info.counter = p->u.fr.counter; + break; + } + + return cobalt_copy_to_user((void *)a2, &info, sizeof(info)); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c new file mode 100644 index 0000000..c68b5e3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c @@ -0,0 +1,343 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/sched.h> +#include <linux/ipipe.h> +#include <linux/mm.h> +#include <linux/jump_label.h> +#include <asm/mmu_context.h> +#include <cobalt/kernel/thread.h> + +struct static_key __xeno_vfp_key = STATIC_KEY_INIT_TRUE; + +asmlinkage void __asm_thread_switch(struct thread_info *out, + struct thread_info *in); + +asmlinkage void __asm_thread_trampoline(void); + +#if defined(CONFIG_XENO_ARCH_FPU) && defined(CONFIG_VFP) + +static unsigned int vfp_checked; +static DEFINE_MUTEX(vfp_check_lock); + +asmlinkage void __asm_vfp_save(union vfp_state *vfp, unsigned int fpexc); + +asmlinkage void __asm_vfp_load(union vfp_state *vfp, unsigned int cpu); + +#define do_vfp_fmrx(_vfp_) \ + ({ \ + u32 __v; \ + asm volatile("mrc p10, 7, %0, " __stringify(_vfp_) \ + ", cr0, 0 @ fmrx %0, " #_vfp_: \ + "=r" (__v)); \ + __v; \ + }) + +#define do_vfp_fmxr(_vfp_,_var_) \ + asm volatile("mcr p10, 7, %0, " __stringify(_vfp_) \ + ", cr0, 0 @ fmxr " #_vfp_ ", %0": \ + /* */ : "r" (_var_)) + +extern union vfp_state *vfp_current_hw_state[NR_CPUS]; + +static inline union vfp_state *get_fpu_owner(void) +{ + union vfp_state *vfp_owner; + unsigned int cpu; +#ifdef CONFIG_SMP + unsigned int fpexc; +#endif + +#if __LINUX_ARM_ARCH__ <= 6 + if (!static_key_true(&__xeno_vfp_key)) + return NULL; +#endif + +#ifdef CONFIG_SMP + fpexc = do_vfp_fmrx(FPEXC); + if (!(fpexc & FPEXC_EN)) + return NULL; +#endif + + cpu = raw_smp_processor_id(); + vfp_owner = vfp_current_hw_state[cpu]; + if (!vfp_owner) + return NULL; + +#ifdef CONFIG_SMP + if (vfp_owner->hard.cpu != cpu) + return NULL; +#endif /* SMP */ + + return vfp_owner; +} + +#define do_disable_vfp(fpexc) \ + do_vfp_fmxr(FPEXC, fpexc & ~FPEXC_EN) + +#define XNARCH_VFP_ANY_EXC \ + (FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK) + +#define do_enable_vfp() \ + ({ \ + unsigned _fpexc = do_vfp_fmrx(FPEXC) | FPEXC_EN; \ + do_vfp_fmxr(FPEXC, _fpexc & ~XNARCH_VFP_ANY_EXC); \ + _fpexc; \ + }) + +int xnarch_fault_fpu_p(struct ipipe_trap_data *d) +{ + /* This function does the same thing to decode the faulting instruct as + "call_fpe" in arch/arm/entry-armv.S */ + static unsigned copro_to_exc[16] = { + IPIPE_TRAP_UNDEFINSTR, + /* FPE */ + IPIPE_TRAP_FPU, IPIPE_TRAP_FPU, + IPIPE_TRAP_UNDEFINSTR, +#ifdef CONFIG_CRUNCH + IPIPE_TRAP_FPU, IPIPE_TRAP_FPU, IPIPE_TRAP_FPU, +#else /* !CONFIG_CRUNCH */ + IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, +#endif /* !CONFIG_CRUNCH */ + IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, +#ifdef CONFIG_VFP + IPIPE_TRAP_VFP, IPIPE_TRAP_VFP, +#else /* !CONFIG_VFP */ + IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, +#endif /* !CONFIG_VFP */ + IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, + IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, + }; + unsigned instr, exc, cp; + char *pc; + + if (d->exception == IPIPE_TRAP_FPU) + return 1; + + if (d->exception == IPIPE_TRAP_VFP) + goto trap_vfp; + + if (d->exception != IPIPE_TRAP_UNDEFINSTR) + return 0; + + pc = (char *) xnarch_fault_pc(d); + if (unlikely(thumb_mode(d->regs))) { + unsigned short thumbh, thumbl; + +#if defined(CONFIG_ARM_THUMB) && __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_V7) +#if __LINUX_ARM_ARCH__ < 7 + if (cpu_architecture() < CPU_ARCH_ARMv7) +#else + if (0) +#endif /* arch < 7 */ +#endif /* thumb && arch >= 6 && cpu_v7 */ + return 0; + + thumbh = *(unsigned short *) pc; + thumbl = *((unsigned short *) pc + 1); + + if ((thumbh & 0x0000f800) < 0x0000e800) + return 0; + instr = (thumbh << 16) | thumbl; + +#ifdef CONFIG_NEON + if ((instr & 0xef000000) == 0xef000000 + || (instr & 0xff100000) == 0xf9000000) + goto trap_vfp; +#endif + } else { + instr = *(unsigned *) pc; + +#ifdef CONFIG_NEON + if ((instr & 0xfe000000) == 0xf2000000 + || (instr & 0xff100000) == 0xf4000000) + goto trap_vfp; +#endif + } + + if ((instr & 0x0c000000) != 0x0c000000) + return 0; + + cp = (instr & 0x00000f00) >> 8; +#ifdef CONFIG_IWMMXT + /* We need something equivalent to _TIF_USING_IWMMXT for Xenomai kernel + threads */ + if (cp <= 1) { + d->exception = IPIPE_TRAP_FPU; + return 1; + } +#endif + + exc = copro_to_exc[cp]; + if (exc == IPIPE_TRAP_VFP) { + trap_vfp: + /* If an exception is pending, the VFP fault is not really an + "FPU unavailable" fault, so we return undefinstr in that + case, the nucleus will let linux handle the fault. */ + exc = do_vfp_fmrx(FPEXC); + if (exc & (FPEXC_EX|FPEXC_DEX) + || ((exc & FPEXC_EN) && do_vfp_fmrx(FPSCR) & FPSCR_IXE)) + exc = IPIPE_TRAP_UNDEFINSTR; + else + exc = IPIPE_TRAP_VFP; + } + + d->exception = exc; + return exc != IPIPE_TRAP_UNDEFINSTR; +} + +void xnarch_leave_root(struct xnthread *root) +{ + struct xnarchtcb *rootcb = xnthread_archtcb(root); + rootcb->fpup = get_fpu_owner(); +} + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) +{ + union vfp_state *const from_fpup = from ? from->tcb.fpup : NULL; + unsigned cpu = raw_smp_processor_id(); + + if (xnthread_test_state(to, XNROOT) == 0) { + union vfp_state *const to_fpup = to->tcb.fpup; + unsigned fpexc = do_enable_vfp(); + + if (from_fpup == to_fpup) + return; + + if (from_fpup) + __asm_vfp_save(from_fpup, fpexc); + + __asm_vfp_load(to_fpup, cpu); + } else { + /* + * We are restoring the Linux current thread. The FPU + * can be disabled, so that a fault will occur if the + * newly switched thread uses the FPU, to allow the + * kernel handler to pick the correct FPU context, and + * save in the same move the last used RT context. + */ + vfp_current_hw_state[cpu] = from_fpup; +#ifdef CONFIG_SMP + /* + * On SMP, since returning to FPU disabled mode means + * that we have to save fpu, avoid doing it if + * current FPU context belongs to the task we are + * switching to. + */ + if (from_fpup) { + union vfp_state *const current_task_fpup = + &to->tcb.core.tip->vfpstate; + const unsigned fpdis = do_vfp_fmrx(FPEXC); + const unsigned fpen = fpdis | FPEXC_EN; + + do_vfp_fmxr(FPEXC, fpen & ~XNARCH_VFP_ANY_EXC); + if (from_fpup == current_task_fpup) + return; + + __asm_vfp_save(from_fpup, fpen); + do_vfp_fmxr(FPEXC, fpdis); + } +#endif + } +} + +int xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d) +{ + if (xnthread_test_state(to, XNFPU)) + /* FPU is already enabled, probably an exception */ + return 0; + +#if __LINUX_ARM_ARCH__ <= 6 + if (!static_key_true(&__xeno_vfp_key)) + /* VFP instruction emitted, on a cpu without VFP, this + is an error */ + return 0; +#endif + + xnlock_get(&nklock); + xnthread_set_state(to, XNFPU); + xnlock_put(&nklock); + + xnarch_switch_fpu(from, to); + + /* Retry faulting instruction */ + d->regs->ARM_pc = xnarch_fault_pc(d); + return 1; +} + +void xnarch_init_shadow_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + + tcb->fpup = &task_thread_info(tcb->core.host_task)->vfpstate; + + if (vfp_checked == 0) { + mutex_lock(&vfp_check_lock); + if (vfp_checked == 0) { + if ((elf_hwcap & HWCAP_VFP) == 0) + static_key_slow_dec(&__xeno_vfp_key); + vfp_checked = 1; + } + mutex_unlock(&vfp_check_lock); + } + + /* XNFPU is set upon first FPU fault */ + xnthread_clear_state(thread, XNFPU); +} + +void xnarch_init_root_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = &thread->tcb; + tcb->fpup = NULL; +} + +#endif /* CONFIG_XENO_ARCH_FPU && CONFIG_VFP*/ + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in) +{ + struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb; + struct mm_struct *prev_mm, *next_mm; + struct task_struct *next; + + next = in_tcb->core.host_task; + prev_mm = out_tcb->core.active_mm; + + next_mm = in_tcb->core.mm; + if (next_mm == NULL) { + in_tcb->core.active_mm = prev_mm; + enter_lazy_tlb(prev_mm, next); + } else { + ipipe_switch_mm_head(prev_mm, next_mm, next); + /* + * We might be switching back to the root thread, + * which we preempted earlier, shortly after "current" + * dropped its mm context in the do_exit() path + * (next->mm == NULL). In that particular case, the + * kernel expects a lazy TLB state for leaving the mm. + */ + if (next->mm == NULL) + enter_lazy_tlb(prev_mm, next); + } + + __asm_thread_switch(out_tcb->core.tip, in_tcb->core.tip); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig new file mode 100644 index 0000000..bdf5f16 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig @@ -0,0 +1,12 @@ +source "kernel/xenomai/Kconfig" +source "drivers/xenomai/Kconfig" + +config XENO_ARCH_FPU + def_bool y + +config XENO_ARCH_SYS3264 + def_bool n + +config XENO_ARCH_OUTOFLINE_XNLOCK + bool + default y diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile new file mode 100644 index 0000000..6c872fd --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := machine.o + +ccflags-y := -I$(srctree)/arch/arm64/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..cd9496b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_CALIBRATION_H +#define _COBALT_ARM64_DOVETAIL_CALIBRATION_H + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ + unsigned int sched_latency; + +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 + sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT; +#else + sched_latency = 5000; +#endif + p->user = xnclock_ns_to_ticks(&nkclock, sched_latency); + p->kernel = xnclock_ns_to_ticks(&nkclock, + CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#endif /* !_COBALT_ARM64_DOVETAIL_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h new file mode 100644 index 0000000..d5a438b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_FEATURES_H +#define _COBALT_ARM64_DOVETAIL_FEATURES_H + +struct cobalt_featinfo; +static inline void collect_arch_features(struct cobalt_featinfo *p) { } + +#include <asm/xenomai/uapi/features.h> + +#endif /* !_COBALT_ARM64_DOVETAIL_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..8c4228d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_FPTEST_H +#define _COBALT_ARM64_DOVETAIL_FPTEST_H + +#include <linux/errno.h> +#include <asm/xenomai/uapi/fptest.h> +#include <asm/hwcap.h> + +#define have_fp (ELF_HWCAP & HWCAP_FP) + +static inline int fp_kernel_supported(void) +{ + return 0; +} + +static inline void fp_init(void) +{ +} + +static inline int fp_linux_begin(void) +{ + return -ENOSYS; +} + +static inline void fp_linux_end(void) +{ +} + +static inline int fp_detect(void) +{ + return have_fp ? __COBALT_HAVE_FPU : 0; +} + +#endif /* !_COBALT_ARM64_DOVETAIL_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h new file mode 100644 index 0000000..e71a5b7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_MACHINE_H +#define _COBALT_ARM64_DOVETAIL_MACHINE_H + +#include <linux/version.h> +#include <asm/byteorder.h> +#include <cobalt/kernel/assert.h> + +/* D-side always behaves as PIPT on AArch64 (see arch/arm64/include/asm/cachetype.h) */ +#define xnarch_cache_aliasing() 0 + +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + int __r; + + /* zero input is not valid */ + XENO_WARN_ON(COBALT, ul == 0); + + __asm__ ("rbit\t%0, %1\n" + "clz\t%0, %0\n" + : "=r" (__r) : "r"(ul) : "cc"); + + return __r; +} + +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_ARM64_DOVETAIL_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..96871e2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com> + * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_SYSCALL_H +#define _COBALT_ARM64_DOVETAIL_SYSCALL_H + +#include <linux/errno.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +/* + * Cobalt and Linux syscall numbers can be fetched from syscallno, + * masking out the __COBALT_SYSCALL_BIT marker. + */ +#define __xn_reg_sys(__regs) ((unsigned long)(__regs)->syscallno) +#define __xn_syscall_p(regs) ((__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT) != 0) +#define __xn_syscall(__regs) ((unsigned long)(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)) + +#define __xn_reg_rval(__regs) ((__regs)->regs[0]) +#define __xn_reg_pc(__regs) ((__regs)->pc) +#define __xn_reg_sp(__regs) ((__regs)->sp) + +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = __xn_syscall(__regs); \ + *(__code) < NR_syscalls; \ + }) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +static inline +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + /* We need none of these with Dovetail. */ + return -ENOSYS; +} + +#endif /* !_COBALT_ARM64_DOVETAIL_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h new file mode 100644 index 0000000..cd0f392 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_SYSCALL32_H +#define _COBALT_ARM64_DOVETAIL_SYSCALL32_H + +#include <asm-generic/xenomai/syscall32.h> + +#endif /* !_COBALT_ARM64_DOVETAIL_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h new file mode 100644 index 0000000..5b60ff3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com> + * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_ARM64_DOVETAIL_THREAD_H +#define _COBALT_ARM64_DOVETAIL_THREAD_H + +#include <asm-generic/xenomai/dovetail/thread.h> +#include <asm/dovetail.h> + +#define xnarch_fault_pc(__regs) ((unsigned long)((__regs)->pc - 4)) /* XXX ? */ + +#define xnarch_fault_pf_p(__nr) ((__nr) == ARM64_TRAP_ACCESS) +#define xnarch_fault_bp_p(__nr) ((current->ptrace & PT_PTRACED) && \ + ((__nr) == ARM64_TRAP_DEBUG || (__nr) == ARM64_TRAP_UNDI)) + +#define xnarch_fault_notify(__nr) (!xnarch_fault_bp_p(__nr)) + +#endif /* !_COBALT_ARM64_DOVETAIL_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h new file mode 100644 index 0000000..7a1122f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_ARM64_ASM_WRAPPERS_H +#define _COBALT_ARM64_ASM_WRAPPERS_H + +#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */ + +#define __put_user_inatomic __put_user +#define __get_user_inatomic __get_user + +#endif /* _COBALT_ARM64_ASM_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c new file mode 100644 index 0000000..e03d7b9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> +// Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org> + +#include <linux/mm.h> +#include <asm/xenomai/machine.h> + +static void mach_arm64_prefault(struct vm_area_struct *vma) +{ + unsigned long addr; + unsigned int flags; + + if ((vma->vm_flags & VM_MAYREAD)) { + flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0; + for (addr = vma->vm_start; + addr != vma->vm_end; addr += PAGE_SIZE) + handle_mm_fault(vma, addr, flags, NULL); + } +} + +static const char *const fault_labels[] = { + [ARM64_TRAP_ACCESS] = "Data or instruction abort", + [ARM64_TRAP_ALIGN] = "SP/PC alignment abort", + [ARM64_TRAP_SEA] = "Synchronous external abort", + [ARM64_TRAP_DEBUG] = "Debug trap", + [ARM64_TRAP_UNDI] = "Undefined instruction", + [ARM64_TRAP_UNDSE] = "Undefined synchronous exception", + [ARM64_TRAP_FPE] = "FPSIMD exception", + [ARM64_TRAP_SVE] = "SVE access trap", + [ARM64_TRAP_BTI] = "Branch target identification trap", + [31] = NULL +}; + +struct cobalt_machine cobalt_machine = { + .name = "arm64", + .init = NULL, + .late_init = NULL, + .cleanup = NULL, + .prefault = mach_arm64_prefault, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h new file mode 100644 index 0000000..3d81f6e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM64_ASM_UAPI_ARITH_H +#define _COBALT_ARM64_ASM_UAPI_ARITH_H + +#include <asm/xenomai/uapi/features.h> + +#if !defined(CONFIG_FTRACE) +static inline __attribute__((__const__)) unsigned long long +mach_arm_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + const unsigned rhs_integ); + +#define xnarch_nodiv_ullimd(op, frac, integ) \ + mach_arm_nodiv_ullimd((op), (frac), (integ)) + +static inline __attribute__((__const__)) long long +mach_arm_nodiv_llimd(const long long op, + const unsigned long long frac, + const unsigned rhs_integ); + +#define xnarch_nodiv_llimd(op, frac, integ) \ + mach_arm_nodiv_llimd((op), (frac), (integ)) +#else /* arm <= v3 */ +#define xnarch_add96and64(l0, l1, l2, s0, s1) \ + do { \ + __asm__ ("adds %2, %2, %4\n\t" \ + "adcs %1, %1, %3\n\t" \ + "adc %0, %0, #0\n\t" \ + : "+r"(l0), "+r"(l1), "+r"(l2) \ + : "r"(s0), "r"(s1): "cc"); \ + } while (0) +#endif /* arm <= v3 */ + +#include <cobalt/uapi/asm-generic/arith.h> + +#if !defined(CONFIG_FTRACE) +#define mach_arm_nodiv_ullimd_str \ + "umull %[tl], %[rl], %[opl], %[fracl]\n\t" \ + "umull %[rm], %[rh], %[oph], %[frach]\n\t" \ + "adds %[rl], %[rl], %[tl], lsr #31\n\t" \ + "adcs %[rm], %[rm], #0\n\t" \ + "adc %[rh], %[rh], #0\n\t" \ + "umull %[tl], %[th], %[oph], %[fracl]\n\t" \ + "adds %[rl], %[rl], %[tl]\n\t" \ + "adcs %[rm], %[rm], %[th]\n\t" \ + "adc %[rh], %[rh], #0\n\t" \ + "umull %[tl], %[th], %[opl], %[frach]\n\t" \ + "adds %[rl], %[rl], %[tl]\n\t" \ + "adcs %[rm], %[rm], %[th]\n\t" \ + "adc %[rh], %[rh], #0\n\t" \ + "umlal %[rm], %[rh], %[opl], %[integ]\n\t" \ + "mla %[rh], %[oph], %[integ], %[rh]\n\t" + +static inline __attribute__((__const__)) unsigned long long +mach_arm_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + const unsigned rhs_integ) +{ + register unsigned rl __asm__("r5"); + register unsigned rm __asm__("r0"); + register unsigned rh __asm__("r1"); + register unsigned fracl __asm__ ("r2"); + register unsigned frach __asm__ ("r3"); + register unsigned integ __asm__("r4") = rhs_integ; + register unsigned opl __asm__ ("r6"); + register unsigned oph __asm__ ("r7"); + register unsigned tl __asm__("r8"); + register unsigned th __asm__("r9"); + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(frac, frach, fracl); + + __asm__ (mach_arm_nodiv_ullimd_str + : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh), + [tl]"=r"(tl), [th]"=r"(th) + : [opl]"r"(opl), [oph]"r"(oph), + [fracl]"r"(fracl), [frach]"r"(frach), + [integ]"r"(integ) + : "cc"); + + return xnarch_u64fromu32(rh, rm); +} + +static inline __attribute__((__const__)) long long +mach_arm_nodiv_llimd(const long long op, + const unsigned long long frac, + const unsigned rhs_integ) +{ + register unsigned rl __asm__("r5"); + register unsigned rm __asm__("r0"); + register unsigned rh __asm__("r1"); + register unsigned fracl __asm__ ("r2"); + register unsigned frach __asm__ ("r3"); + register unsigned integ __asm__("r4") = rhs_integ; + register unsigned opl __asm__ ("r6"); + register unsigned oph __asm__ ("r7"); + register unsigned tl __asm__("r8"); + register unsigned th __asm__("r9"); + register unsigned s __asm__("r10"); + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(frac, frach, fracl); + + __asm__ ("movs %[s], %[oph], lsr #30\n\t" + "beq 1f\n\t" + "rsbs %[opl], %[opl], #0\n\t" + "sbc %[oph], %[oph], %[oph], lsl #1\n" + "1:\t" + mach_arm_nodiv_ullimd_str + "teq %[s], #0\n\t" + "beq 2f\n\t" + "rsbs %[rm], %[rm], #0\n\t" + "sbc %[rh], %[rh], %[rh], lsl #1\n" + "2:\t" + : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh), + [tl]"=r"(tl), [th]"=r"(th), [s]"=r"(s) + : [opl]"r"(opl), [oph]"r"(oph), + [fracl]"r"(fracl), [frach]"r"(frach), + [integ]"r"(integ) + : "cc"); + + return xnarch_u64fromu32(rh, rm); +} +#endif /* arm >= v4 */ + +#endif /* _COBALT_ARM64_ASM_UAPI_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h new file mode 100644 index 0000000..b98a963 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM64_ASM_UAPI_FEATURES_H +#define _COBALT_ARM64_ASM_UAPI_FEATURES_H + +/* The ABI revision level we use on this arch. */ +#define XENOMAI_ABI_REV 2UL + +#define XENOMAI_FEAT_DEP (__xn_feat_generic_mask) + +#define XENOMAI_FEAT_MAN (__xn_feat_generic_man_mask) + +#undef XNARCH_HAVE_LLMULSHFT + +#undef XNARCH_HAVE_NODIV_LLIMD + +struct cobalt_featinfo_archdep { /* no arch-specific feature */ }; + +#include <cobalt/uapi/asm-generic/features.h> + +static inline const char *get_feature_label(unsigned int feature) +{ + return get_generic_feature_label(feature); +} + +#endif /* !_COBALT_ARM64_ASM_UAPI_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h new file mode 100644 index 0000000..7a2cb92 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM64_ASM_UAPI_FPTEST_H +#define _COBALT_ARM64_ASM_UAPI_FPTEST_H + +#define __COBALT_HAVE_FPU 0x1 + +/* + * CAUTION: keep this code strictly inlined in macros: we don't want + * GCC to apply any callee-saved logic to fpsimd registers in + * fp_regs_set() before fp_regs_check() can verify their contents, but + * we still want GCC to know about the registers we have clobbered. + */ + +#define fp_regs_set(__features, __val) \ + do { \ + unsigned long long __e[32]; \ + unsigned int __i; \ + \ + if (__features & __COBALT_HAVE_FPU) { \ + \ + for (__i = 0; __i < 32; __i++) \ + __e[__i] = (__val); \ + \ + __asm__ __volatile__("ldp d0, d1, [%0, #8 * 0] \n" \ + "ldp d2, d3, [%0, #8 * 2] \n" \ + "ldp d4, d5, [%0, #8 * 4]\n" \ + "ldp d6, d7, [%0, #8 * 6]\n" \ + "ldp d8, d9, [%0, #8 * 8]\n" \ + "ldp d10, d11, [%0, #8 * 10]\n" \ + "ldp d12, d13, [%0, #8 * 12]\n" \ + "ldp d14, d15, [%0, #8 * 14]\n" \ + "ldp d16, d17, [%0, #8 * 16]\n" \ + "ldp d18, d19, [%0, #8 * 18]\n" \ + "ldp d20, d21, [%0, #8 * 20]\n" \ + "ldp d22, d23, [%0, #8 * 22]\n" \ + "ldp d24, d25, [%0, #8 * 24]\n" \ + "ldp d26, d27, [%0, #8 * 26]\n" \ + "ldp d28, d29, [%0, #8 * 28]\n" \ + "ldp d30, d31, [%0, #8 * 30]\n" \ + : /* No outputs. */ \ + : "r"(&__e[0]) \ + : "d0", "d1", "d2", "d3", "d4", "d5", "d6", \ + "d7", "d8", "d9", "d10", "d11", "d12", "d13", \ + "d14", "d15", "d16", "d17", "d18", "d19", \ + "d20", "d21", "d22", "d23", "d24", "d25", \ + "d26", "d27", "d28", "d29", "d30", "d31", \ + "memory"); \ + } \ + } while (0) + +#define fp_regs_check(__features, __val, __report) \ + ({ \ + unsigned int __result = (__val), __i; \ + unsigned long long __e[32]; \ + \ + if (__features & __COBALT_HAVE_FPU) { \ + \ + __asm__ __volatile__("stp d0, d1, [%0, #8 * 0] \n" \ + "stp d2, d3, [%0, #8 * 2] \n" \ + "stp d4, d5, [%0, #8 * 4]\n" \ + "stp d6, d7, [%0, #8 * 6]\n" \ + "stp d8, d9, [%0, #8 * 8]\n" \ + "stp d10, d11, [%0, #8 * 10]\n" \ + "stp d12, d13, [%0, #8 * 12]\n" \ + "stp d14, d15, [%0, #8 * 14]\n" \ + "stp d16, d17, [%0, #8 * 16]\n" \ + "stp d18, d19, [%0, #8 * 18]\n" \ + "stp d20, d21, [%0, #8 * 20]\n" \ + "stp d22, d23, [%0, #8 * 22]\n" \ + "stp d24, d25, [%0, #8 * 24]\n" \ + "stp d26, d27, [%0, #8 * 26]\n" \ + "stp d28, d29, [%0, #8 * 28]\n" \ + "stp d30, d31, [%0, #8 * 30]\n" \ + : /* No outputs. */ \ + : "r"(&__e[0]) \ + : "memory"); \ + \ + for (__i = 0; __i < 32; __i++) \ + if (__e[__i] != __val) { \ + __report("d%d: %llu != %u\n", \ + __i, __e[__i], __val); \ + __result = __e[__i]; \ + } \ + } \ + \ + __result; \ + }) + +#endif /* !_COBALT_ARM64_ASM_UAPI_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h new file mode 100644 index 0000000..5b319d6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM64_ASM_UAPI_SYSCALL_H +#define _COBALT_ARM64_ASM_UAPI_SYSCALL_H + +#define __xn_syscode(__nr) (__COBALT_SYSCALL_BIT | (__nr)) + +#define XENOMAI_SYSARCH_TSCINFO 0 + +#endif /* !_COBALT_ARM64_ASM_UAPI_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h new file mode 100644 index 0000000..20a4eaa --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_ARM64_ASM_UAPI_TSC_H +#define _COBALT_ARM64_ASM_UAPI_TSC_H + +struct __xn_tscinfo { + volatile unsigned int *counter; +}; + +#endif /* !_COBALT_ARM64_ASM_UAPI_TSC_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile new file mode 100644 index 0000000..cf12a18 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := machine.o thread.o syscall.o + +ccflags-y := -I$(srctree)/arch/arm64/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README new file mode 100644 index 0000000..80f954a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README @@ -0,0 +1,3 @@ +Get the interrupt pipeline code for the target kernel from +http://xenomai.org/downloads/ipipe/, or +git://git.xenomai.org/ipipe.git diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..e85521e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_CALIBRATION_H +#define _COBALT_ARM64_ASM_CALIBRATION_H + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ + unsigned int ulat; +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 + ulat = CONFIG_XENO_OPT_TIMING_SCHEDLAT; +#elif defined(CONFIG_ARCH_HISI) + ulat = 4000; +#else + ulat = 4000; +#endif + p->user = xnclock_ns_to_ticks(&nkclock, ulat); + p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#endif /* !_COBALT_ARM64_ASM_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h new file mode 100644 index 0000000..112408f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_FEATURES_H +#define _COBALT_ARM64_ASM_FEATURES_H + +struct cobalt_featinfo; +static inline void collect_arch_features(struct cobalt_featinfo *p) { } + +#include <asm/xenomai/uapi/features.h> + +#endif /* !_COBALT_ARM64_ASM_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..39903a0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_FPTEST_H +#define _COBALT_ARM64_ASM_FPTEST_H + +#include <linux/errno.h> +#include <asm/xenomai/uapi/fptest.h> +#include <asm/hwcap.h> + +#define have_fp (ELF_HWCAP & HWCAP_FP) + +static inline int fp_kernel_supported(void) +{ + return 0; +} + +static inline void fp_init(void) +{ +} + +static inline int fp_linux_begin(void) +{ + return -ENOSYS; +} + +static inline void fp_linux_end(void) +{ +} + +static inline int fp_detect(void) +{ + return have_fp ? __COBALT_HAVE_FPU : 0; +} + +#endif /* _COBALT_ARM64_ASM_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h new file mode 100644 index 0000000..c91c8f5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h @@ -0,0 +1,68 @@ +/** + * Copyright © 2002-2004 Philippe Gerum. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * ARM64 port + * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_MACHINE_H +#define _COBALT_ARM64_ASM_MACHINE_H + +#include <linux/version.h> +#include <asm/byteorder.h> + +#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq + +#include <asm/barrier.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#include <asm/compiler.h> +#endif + +#include <asm/cmpxchg.h> +#include <asm/switch_to.h> +#include <asm/system_misc.h> +#include <asm/timex.h> +#include <asm/processor.h> +#include <asm/ipipe.h> +#include <asm/cacheflush.h> +#include <cobalt/kernel/assert.h> + +/* D-side always behaves as PIPT on AArch64 (see arch/arm64/include/asm/cachetype.h) */ +#define xnarch_cache_aliasing() 0 + +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + int __r; + + /* zero input is not valid */ + XENO_WARN_ON(COBALT, ul == 0); + + __asm__ ("rbit\t%0, %1\n" + "clz\t%0, %0\n" + : "=r" (__r) : "r"(ul) : "cc"); + + return __r; +} + +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_ARM64_ASM_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..6b8b71d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_SYSCALL_H +#define _COBALT_ARM64_ASM_SYSCALL_H + +#include <linux/errno.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +/* + * Cobalt and Linux syscall numbers can be fetched from syscallno, + * masking out the __COBALT_SYSCALL_BIT marker. + */ +#define __xn_reg_sys(__regs) ((unsigned long)(__regs)->syscallno) +#define __xn_syscall_p(regs) ((__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT) != 0) +#define __xn_syscall(__regs) ((unsigned long)(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)) + +#define __xn_reg_rval(__regs) ((__regs)->regs[0]) +#define __xn_reg_arg1(__regs) ((__regs)->regs[0]) +#define __xn_reg_arg2(__regs) ((__regs)->regs[1]) +#define __xn_reg_arg3(__regs) ((__regs)->regs[2]) +#define __xn_reg_arg4(__regs) ((__regs)->regs[3]) +#define __xn_reg_arg5(__regs) ((__regs)->regs[4]) +#define __xn_reg_pc(__regs) ((__regs)->pc) +#define __xn_reg_sp(__regs) ((__regs)->sp) + +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = __xn_syscall(__regs); \ + *(__code) < NR_syscalls; \ + }) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5); + +#endif /* !_COBALT_ARM64_ASM_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h new file mode 100644 index 0000000..a66ddd6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_SYSCALL32_H +#define _COBALT_ARM64_ASM_SYSCALL32_H + +#include <asm-generic/xenomai/syscall32.h> + +#endif /* !_COBALT_ARM64_ASM_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h new file mode 100644 index 0000000..7899a49 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2005 Stelian Pop + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_THREAD_H +#define _COBALT_ARM64_ASM_THREAD_H + +#include <linux/version.h> +#include <asm-generic/xenomai/ipipe/thread.h> + +#if defined(CONFIG_XENO_ARCH_FPU) && LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) +#define ARM64_XENO_OLD_SWITCH +#endif + +struct xnarchtcb { + struct xntcb core; +#ifdef ARM64_XENO_OLD_SWITCH + struct fpsimd_state xnfpsimd_state; + struct fpsimd_state *fpup; +#define xnarch_fpu_ptr(tcb) ((tcb)->fpup) +#endif +}; + +#define xnarch_fault_regs(d) ((d)->regs) +#define xnarch_fault_trap(d) ((d)->exception) +#define xnarch_fault_code(d) (0) +#define xnarch_fault_pc(d) ((unsigned long)((d)->regs->pc - 4)) /* XXX ? */ + +#define xnarch_fault_pf_p(d) ((d)->exception == IPIPE_TRAP_ACCESS) +#define xnarch_fault_bp_p(d) ((current->ptrace & PT_PTRACED) && \ + ((d)->exception == IPIPE_TRAP_BREAK || \ + (d)->exception == IPIPE_TRAP_UNDEFINSTR)) + +#define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d)) + +static inline +struct task_struct *xnarch_host_task(struct xnarchtcb *tcb) +{ + return tcb->core.host_task; +} + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in); + +static inline void xnarch_enter_root(struct xnthread *root) { } + +int xnarch_escalate(void); + +#ifdef ARM64_XENO_OLD_SWITCH + +void xnarch_init_root_tcb(struct xnthread *thread); + +void xnarch_init_shadow_tcb(struct xnthread *thread); + +void xnarch_leave_root(struct xnthread *root); + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread); + +#else /* !ARM64_XENO_OLD_SWITCH */ + +static inline void xnarch_init_root_tcb(struct xnthread *thread) { } +static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { } +static inline void xnarch_leave_root(struct xnthread *root) { } +static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { } + +#endif /* !ARM64_XENO_OLD_SWITCH */ + +static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d) +{ + return xnarch_fault_trap(d) == IPIPE_TRAP_FPU_ACC; +} + +static inline int +xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d) +{ + return 0; +} + +static inline void xnarch_enable_kfpu(void) { } + +static inline void xnarch_disable_kfpu(void) { } + +#endif /* !_COBALT_ARM64_ASM_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h new file mode 100644 index 0000000..5a5754f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ARM64_ASM_WRAPPERS_H +#define _COBALT_ARM64_ASM_WRAPPERS_H + +#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */ + +#define __put_user_inatomic __put_user +#define __get_user_inatomic __get_user + +#endif /* _COBALT_ARM64_ASM_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c new file mode 100644 index 0000000..521b734 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c @@ -0,0 +1,66 @@ +/** + * Copyright (C) 2005 Stelian Pop + * + * ARM64 port + * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com> + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/mm.h> +#include <asm/xenomai/machine.h> + +static void mach_arm_prefault(struct vm_area_struct *vma) +{ + unsigned long addr; + unsigned int flags; + + if ((vma->vm_flags & VM_MAYREAD)) { + flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0; + for (addr = vma->vm_start; + addr != vma->vm_end; addr += PAGE_SIZE) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + handle_mm_fault(vma->vm_mm, vma, addr, flags); +#else + handle_mm_fault(vma, addr, flags); +#endif + } +} + +static const char *const fault_labels[] = { + [IPIPE_TRAP_ACCESS] = "Data or instruction access", + [IPIPE_TRAP_SECTION] = "Section fault", + [IPIPE_TRAP_DABT] = "Generic data abort", + [IPIPE_TRAP_UNKNOWN] = "Unknown exception", + [IPIPE_TRAP_BREAK] = "Instruction breakpoint", + [IPIPE_TRAP_FPU_ACC] = "Floating point access", + [IPIPE_TRAP_FPU_EXC] = "Floating point exception", + [IPIPE_TRAP_UNDEFINSTR] = "Undefined instruction", +#ifdef IPIPE_TRAP_ALIGNMENT + [IPIPE_TRAP_ALIGNMENT] = "Unaligned access exception", +#endif /* IPIPE_TRAP_ALIGNMENT */ + [IPIPE_NR_FAULTS] = NULL +}; + +struct cobalt_machine cobalt_machine = { + .name = "arm", + .init = NULL, + .late_init = NULL, + .cleanup = NULL, + .prefault = mach_arm_prefault, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c new file mode 100644 index 0000000..ee78243 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2005 Stelian Pop + * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/ipipe.h> +#include <asm/xenomai/syscall.h> +#include <asm/xenomai/uapi/tsc.h> + +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + struct ipipe_sysinfo ipipe_info; + struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc; + struct __xn_tscinfo info; + int ret; + + if (a1 != XENOMAI_SYSARCH_TSCINFO) + return -EINVAL; + + ret = ipipe_get_sysinfo(&ipipe_info); + if (ret) + return ret; + + switch (p->type) { + case IPIPE_TSC_TYPE_DECREMENTER: + info.counter = p->u.dec.counter; + break; + case IPIPE_TSC_TYPE_NONE: + return -ENOSYS; + default: + info.counter = p->u.fr.counter; + break; + } + + return cobalt_copy_to_user((void *)a2, &info, sizeof(info)); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c new file mode 100644 index 0000000..1068f80 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * ARM port + * Copyright (C) 2005 Stelian Pop + * + * ARM64 port + * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com> + * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/sched.h> +#include <linux/ipipe.h> +#include <linux/mm.h> +#include <linux/jump_label.h> +#include <asm/mmu_context.h> +#include <cobalt/kernel/thread.h> +#include <asm/fpsimd.h> +#include <asm/processor.h> +#include <asm/hw_breakpoint.h> + +#ifdef ARM64_XENO_OLD_SWITCH + +#include <asm/fpsimd.h> + +#define FPSIMD_EN (0x3 << 20) + +static inline unsigned long get_cpacr(void) +{ + unsigned long result; + __asm__ ("mrs %0, cpacr_el1": "=r"(result)); + return result; +} + +static inline void set_cpacr(long val) +{ + __asm__ __volatile__ ( + "msr cpacr_el1, %0\n\t" + "isb" + : /* */ : "r"(val)); +} + +static inline void enable_fpsimd(void) +{ + set_cpacr(get_cpacr() | FPSIMD_EN); +} + +static inline struct fpsimd_state *get_fpu_owner(struct xnarchtcb *rootcb) +{ + struct task_struct *curr = rootcb->core.host_task; + + if (test_ti_thread_flag(task_thread_info(curr), TIF_FOREIGN_FPSTATE)) + /* Foreign fpu state, use auxiliary backup area */ + return &rootcb->xnfpsimd_state; + + return &curr->thread.fpsimd_state; +} + +void xnarch_leave_root(struct xnthread *root) +{ + struct xnarchtcb *rootcb = xnthread_archtcb(root); + rootcb->fpup = get_fpu_owner(rootcb); +} + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) +{ + struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL; + struct fpsimd_state *const to_fpup = to->tcb.fpup; + + enable_fpsimd(); + + if (from_fpup == to_fpup) + return; + + fpsimd_save_state(from_fpup); + + fpsimd_load_state(to_fpup); + to_fpup->cpu = raw_smp_processor_id(); +} + +void xnarch_init_shadow_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + tcb->fpup = &tcb->core.host_task->thread.fpsimd_state; +} + +void xnarch_init_root_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = &thread->tcb; + tcb->fpup = NULL; +} + +#endif /* ARM64_XENO_OLD_SWITCH */ + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in) +{ + struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb; + struct task_struct *prev, *next, *last; + struct mm_struct *prev_mm, *next_mm; + + next = in_tcb->core.host_task; + prev = out_tcb->core.host_task; + prev_mm = out_tcb->core.active_mm; + + next_mm = in_tcb->core.mm; + if (next_mm == NULL) { + in_tcb->core.active_mm = prev_mm; + enter_lazy_tlb(prev_mm, next); + } else { + ipipe_switch_mm_head(prev_mm, next_mm, next); + /* + * We might be switching back to the root thread, + * which we preempted earlier, shortly after "current" + * dropped its mm context in the do_exit() path + * (next->mm == NULL). In that particular case, the + * kernel expects a lazy TLB state for leaving the mm. + */ + if (next->mm == NULL) + enter_lazy_tlb(prev_mm, next); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) + ipipe_switch_to(prev, next); + (void)last; +#else + switch_to(prev, next, last); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) + fpsimd_restore_current_state(); +#endif +#endif +} + +int xnarch_escalate(void) +{ + if (ipipe_root_p) { + ipipe_raise_irq(cobalt_pipeline.escalate_virq); + return 1; + } + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig new file mode 100644 index 0000000..6ce3440 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig @@ -0,0 +1,8 @@ +source "kernel/xenomai/Kconfig" +source "drivers/xenomai/Kconfig" + +config XENO_ARCH_FPU + def_bool PPC_FPU + +config XENO_ARCH_SYS3264 + def_bool n diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h new file mode 100644 index 0000000..160a7d8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_POWERPC_ASM_UAPI_ARITH_H +#define _COBALT_POWERPC_ASM_UAPI_ARITH_H + +#include <asm/xenomai/uapi/features.h> + +#define xnarch_add96and64(l0, l1, l2, s0, s1) \ + do { \ + __asm__ ("addc %2, %2, %4\n\t" \ + "adde %1, %1, %3\n\t" \ + "addze %0, %0\n\t" \ + : "+r"(l0), "+r"(l1), "+r"(l2) \ + : "r"(s0), "r"(s1) : "cc"); \ + } while (0) + +#include <cobalt/uapi/asm-generic/arith.h> + +#endif /* _COBALT_POWERPC_ASM_UAPI_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h new file mode 100644 index 0000000..ed54882 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_POWERPC_ASM_UAPI_FEATURES_H +#define _COBALT_POWERPC_ASM_UAPI_FEATURES_H + +/* The ABI revision level we use on this arch. */ +#define XENOMAI_ABI_REV 18UL + +#define XENOMAI_FEAT_DEP __xn_feat_generic_mask + +#define XENOMAI_FEAT_MAN __xn_feat_generic_man_mask + +#define XNARCH_HAVE_LLMULSHFT 1 +#define XNARCH_HAVE_NODIV_LLIMD 1 + +struct cobalt_featinfo_archdep { /* no arch-specific feature */ }; + +#include <cobalt/uapi/asm-generic/features.h> + +static inline const char *get_feature_label(unsigned feature) +{ + return get_generic_feature_label(feature); +} + +#endif /* !_COBALT_POWERPC_ASM_UAPI_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h new file mode 100644 index 0000000..e6f89c9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_POWERPC_ASM_UAPI_FPTEST_H +#define _COBALT_POWERPC_ASM_UAPI_FPTEST_H + +#ifndef __NO_FPRS__ /* i.e. has FPU, not SPE */ + +static inline void fp_regs_set(int features, unsigned int val) +{ + unsigned long long fpval = val; + + __asm__ __volatile__("lfd 0, %0\n" + " fmr 1, 0\n" + " fmr 2, 0\n" + " fmr 3, 0\n" + " fmr 4, 0\n" + " fmr 5, 0\n" + " fmr 6, 0\n" + " fmr 7, 0\n" + " fmr 8, 0\n" + " fmr 9, 0\n" + " fmr 10, 0\n" + " fmr 11, 0\n" + " fmr 12, 0\n" + " fmr 13, 0\n" + " fmr 14, 0\n" + " fmr 15, 0\n" + " fmr 16, 0\n" + " fmr 17, 0\n" + " fmr 18, 0\n" + " fmr 19, 0\n" + " fmr 20, 0\n" + " fmr 21, 0\n" + " fmr 22, 0\n" + " fmr 23, 0\n" + " fmr 24, 0\n" + " fmr 25, 0\n" + " fmr 26, 0\n" + " fmr 27, 0\n" + " fmr 28, 0\n" + " fmr 29, 0\n" + " fmr 30, 0\n" + " fmr 31, 0\n"::"m"(fpval)); +} + +#define FPTEST_REGVAL(n) { \ + unsigned long long t; \ + __asm__ __volatile__(" stfd " #n ", %0" : "=m" (t)); \ + e[n] = (unsigned)t; \ + } + +static inline unsigned int fp_regs_check(int features, unsigned int val, + int (*report)(const char *fmt, ...)) +{ + unsigned int i, result = val; + unsigned int e[32]; + + FPTEST_REGVAL(0); + FPTEST_REGVAL(1); + FPTEST_REGVAL(2); + FPTEST_REGVAL(3); + FPTEST_REGVAL(4); + FPTEST_REGVAL(5); + FPTEST_REGVAL(6); + FPTEST_REGVAL(7); + FPTEST_REGVAL(8); + FPTEST_REGVAL(9); + FPTEST_REGVAL(10); + FPTEST_REGVAL(11); + FPTEST_REGVAL(12); + FPTEST_REGVAL(13); + FPTEST_REGVAL(14); + FPTEST_REGVAL(15); + FPTEST_REGVAL(16); + FPTEST_REGVAL(17); + FPTEST_REGVAL(18); + FPTEST_REGVAL(19); + FPTEST_REGVAL(20); + FPTEST_REGVAL(21); + FPTEST_REGVAL(22); + FPTEST_REGVAL(23); + FPTEST_REGVAL(24); + FPTEST_REGVAL(25); + FPTEST_REGVAL(26); + FPTEST_REGVAL(27); + FPTEST_REGVAL(28); + FPTEST_REGVAL(29); + FPTEST_REGVAL(30); + FPTEST_REGVAL(31); + + for (i = 0; i < 32; i++) + if (e[i] != val) { + report("r%d: %u != %u\n", i, e[i], val); + result = e[i]; + } + + return result; +} + +#else /* __NO_FPRS__ */ + +static inline void fp_regs_set(int features, unsigned int val) { } + +static inline unsigned int fp_regs_check(int features, unsigned int val, + int (*report)(const char *fmt, ...)) +{ + return val; +} + +#endif /* __NO_FPRS__ */ + +#endif /* !_COBALT_POWERPC_ASM_UAPI_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h new file mode 100644 index 0000000..243aeab --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_POWERPC_ASM_UAPI_SYSCALL_H +#define _COBALT_POWERPC_ASM_UAPI_SYSCALL_H + +#define __xn_syscode(__nr) (__COBALT_SYSCALL_BIT | (__nr)) + +#endif /* !_COBALT_POWERPC_ASM_UAPI_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile new file mode 100644 index 0000000..e175d0a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile @@ -0,0 +1,8 @@ + +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := machine.o thread.o + +xenomai-$(CONFIG_XENO_ARCH_FPU) += fpu.o + +ccflags-y := -I$(srctree)/arch/powerpc/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README new file mode 100644 index 0000000..80f954a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README @@ -0,0 +1,3 @@ +Get the interrupt pipeline code for the target kernel from +http://xenomai.org/downloads/ipipe/, or +git://git.xenomai.org/ipipe.git diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S new file mode 100644 index 0000000..186e922 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2004-2009 Philippe Gerum. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/version.h> +#include <asm/processor.h> +#include <asm/cputable.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +#ifdef THREAD_FPSTATE +#define FIXUP_REG_FPSTATE(__reg) addi __reg, __reg, THREAD_FPSTATE +#else +/* + * v3.10 -> v3.13 do not have THREAD_FPSTATE yet, but still + * define THREAD_FPSCR. + */ +#define FIXUP_REG_FPSTATE(__reg) +#define FPSTATE_FPSCR(__base) THREAD_FPSCR(__base) +#endif + +/* r3 = &thread_struct (tcb->fpup) */ +_GLOBAL(__asm_save_fpu) + mfmsr r5 + ori r5,r5,MSR_FP + SYNC + MTMSRD(r5) + isync + FIXUP_REG_FPSTATE(r3) + SAVE_32FPRS(0,r3) + mffs fr0 + stfd fr0,FPSTATE_FPSCR(r3) + blr + +/* r3 = &thread_struct */ +_GLOBAL(__asm_init_fpu) + mfmsr r5 + ori r5,r5,MSR_FP|MSR_FE0|MSR_FE1 + SYNC + MTMSRD(r5) + + /* Fallback wanted. */ + +/* r3 = &thread_struct (tcb->fpup) */ +_GLOBAL(__asm_restore_fpu) + mfmsr r5 + ori r5,r5,MSR_FP + SYNC + MTMSRD(r5) + isync + FIXUP_REG_FPSTATE(r3) + lfd fr0,FPSTATE_FPSCR(r3) + MTFSF_L(fr0) + REST_32FPRS(0,r3) + blr + +_GLOBAL(__asm_disable_fpu) + mfmsr r5 + li r3,MSR_FP + andc r5,r5,r3 + SYNC + MTMSRD(r5) + isync + blr + +_GLOBAL(__asm_enable_fpu) + mfmsr r5 + ori r5,r5,MSR_FP + SYNC + MTMSRD(r5) + isync + blr diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..9f06c3f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>. + * + * 64-bit PowerPC adoption + * copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_CALIBRATION_H +#define _COBALT_POWERPC_ASM_CALIBRATION_H + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 +#define __sched_latency CONFIG_XENO_OPT_TIMING_SCHEDLAT +#elif defined(CONFIG_PPC_PASEMI) +#define __sched_latency 1000 +#elif defined(CONFIG_WALNUT) +#define __sched_latency 11000 +#elif defined(CONFIG_YOSEMITE) +#define __sched_latency 2000 +#elif defined(CONFIG_BUBINGA) +#define __sched_latency 8000 +#elif defined(CONFIG_SYCAMORE) +#define __sched_latency 8000 +#elif defined(CONFIG_SEQUOIA) +#define __sched_latency 3000 +#elif defined(CONFIG_LWMON5) +#define __sched_latency 2800 +#elif defined(CONFIG_OCOTEA) +#define __sched_latency 2700 +#elif defined(CONFIG_BAMBOO) +#define __sched_latency 4000 +#elif defined(CONFIG_TAISHAN) +#define __sched_latency 1800 +#elif defined(CONFIG_RAINIER) +#define __sched_latency 2300 +#elif defined(CONFIG_YUCCA) +#define __sched_latency 2780 +#elif defined(CONFIG_YELLOWSTONE) +#define __sched_latency 2700 +#elif defined(CONFIG_YOSEMITE) +#define __sched_latency 2500 +#elif defined(CONFIG_MPC8349_ITX) +#define __sched_latency 2500 +#elif defined(CONFIG_MPC836x_MDS) +#define __sched_latency 2900 +#elif defined(CONFIG_MPC5121_ADS) +#define __sched_latency 4000 +#elif defined(CONFIG_MPC8272_ADS) +#define __sched_latency 5500 +#elif defined(CONFIG_MPC85xx_RDB) +#define __sched_latency 2000 +#elif defined(CONFIG_MVME7100) +#define __sched_latency 1500 +#elif defined(CONFIG_TQM8548) +#define __sched_latency 500 +#elif defined(CONFIG_TQM8560) +#define __sched_latency 1000 +#elif defined(CONFIG_TQM8555) +#define __sched_latency 2000 +#elif defined(CONFIG_KUP4K) +#define __sched_latency 22000 +#elif defined(CONFIG_P1022_DS) +#define __sched_latency 3000 +/* + * Check for the most generic configs at the bottom of this list, so + * that the most specific choices available are picked first. + */ +#elif defined(CONFIG_CORENET_GENERIC) +#define __sched_latency 2800 +#elif defined(CONFIG_MPC85xx) || defined(CONFIG_PPC_85xx) +#define __sched_latency 1000 +#elif defined(CONFIG_405GPR) +#define __sched_latency 9000 +#elif defined(CONFIG_PPC_MPC52xx) +#define __sched_latency 4500 +#elif defined(CONFIG_PPC_8xx) +#define __sched_latency 25000 +#endif + +#ifndef __sched_latency +/* Platform is unknown: pick a default value. */ +#define __sched_latency 4000 +#endif + p->user = xnclock_ns_to_ticks(&nkclock, __sched_latency); + p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#undef __sched_latency + +#endif /* !_COBALT_POWERPC_ASM_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h new file mode 100644 index 0000000..03f93a2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_FEATURES_H +#define _COBALT_POWERPC_ASM_FEATURES_H + +struct cobalt_featinfo; +static inline void collect_arch_features(struct cobalt_featinfo *p) { } + +#include <asm/xenomai/uapi/features.h> + +#endif /* !_COBALT_POWERPC_ASM_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..a9d93fe --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_FPTEST_H +#define _COBALT_POWERPC_ASM_FPTEST_H + +#include <linux/errno.h> +#include <linux/printk.h> +#include <asm/xenomai/uapi/fptest.h> + +static inline int fp_kernel_supported(void) +{ +/* + * CAUTION: some architectures have a hardware FP unit, but a + * restricted set of supported FP instructions. Those may enable + * CONFIG_MATH_EMULATION and MATH_EMULATION_HW_UNIMPLEMENTED at the + * same time to provide an emulation of the missing instruction set. + */ +#ifdef CONFIG_PPC_FPU + return 1; +#else +#ifdef CONFIG_MATH_EMULATION + printk_once(XENO_WARNING "kernel-based FPU support is disabled\n"); +#endif /* !CONFIG_MATH_EMULATION */ + return 0; +#endif /* !CONFIG_PPC_FPU */ +} + +static inline void fp_init(void) +{ +} + +static inline int fp_linux_begin(void) +{ + return -ENOSYS; +} + +static inline void fp_linux_end(void) +{ +} + +static inline int fp_detect(void) +{ + return 0; +} + +#endif /* !_COBALT_POWERPC_ASM_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h new file mode 100644 index 0000000..0e41fd6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h @@ -0,0 +1,39 @@ +/** + * Copyright © 2002-2004 Philippe Gerum. + * + * 64-bit PowerPC adoption + * copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_MACHINE_H +#define _COBALT_POWERPC_ASM_MACHINE_H + +#include <linux/compiler.h> + +#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq + +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + __asm__ ("cntlzw %0, %1":"=r"(ul):"r"(ul & (-ul))); + return 31 - ul; +} + +/* Read this last to enable default settings. */ +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_POWERPC_ASM_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..9b166ad --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * 64-bit PowerPC adoption + * copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_SYSCALL_H +#define _COBALT_POWERPC_ASM_SYSCALL_H + +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +/* + * Cobalt and Linux syscall numbers can be fetched from GPR0, masking + * out the __COBALT_SYSCALL_BIT marker. + */ +#define __xn_reg_sys(__regs) ((__regs)->gpr[0]) +#define __xn_syscall_p(__regs) (__xn_reg_sys(__regs) & __COBALT_SYSCALL_BIT) +#define __xn_syscall(__regs) (__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT) + +#define __xn_reg_rval(__regs) ((__regs)->gpr[3]) +#define __xn_reg_arg1(__regs) ((__regs)->gpr[3]) +#define __xn_reg_arg2(__regs) ((__regs)->gpr[4]) +#define __xn_reg_arg3(__regs) ((__regs)->gpr[5]) +#define __xn_reg_arg4(__regs) ((__regs)->gpr[6]) +#define __xn_reg_arg5(__regs) ((__regs)->gpr[7]) +#define __xn_reg_pc(__regs) ((__regs)->nip) +#define __xn_reg_sp(__regs) ((__regs)->gpr[1]) + +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = __xn_syscall(__regs); \ + *(__code) < NR_syscalls; \ + }) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + /* + * We currently never set the SO bit for marking errors, even + * if we always test it upon syscall return. + */ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +static inline +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + return -ENOSYS; +} + +#endif /* !_COBALT_POWERPC_ASM_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h new file mode 100644 index 0000000..15c977c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_SYSCALL32_H +#define _COBALT_POWERPC_ASM_SYSCALL32_H + +#include <asm-generic/xenomai/syscall32.h> + +#endif /* !_COBALT_POWERPC_ASM_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h new file mode 100644 index 0000000..f91e26b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2004-2013 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_THREAD_H +#define _COBALT_POWERPC_ASM_THREAD_H + +#include <asm-generic/xenomai/ipipe/thread.h> + +struct xnarchtcb { + struct xntcb core; +#ifdef CONFIG_XENO_ARCH_FPU + struct thread_struct *fpup; +#define xnarch_fpu_ptr(tcb) ((tcb)->fpup) +#else +#define xnarch_fpu_ptr(tcb) NULL +#endif +}; + +#define xnarch_fault_regs(d) ((d)->regs) +#define xnarch_fault_trap(d) ((unsigned int)(d)->regs->trap) +#define xnarch_fault_code(d) ((d)->regs->dar) +#define xnarch_fault_pc(d) ((d)->regs->nip) +#define xnarch_fault_pc(d) ((d)->regs->nip) +#define xnarch_fault_fpu_p(d) 0 +#define xnarch_fault_pf_p(d) ((d)->exception == IPIPE_TRAP_ACCESS) +#define xnarch_fault_bp_p(d) ((current->ptrace & PT_PTRACED) && \ + ((d)->exception == IPIPE_TRAP_IABR || \ + (d)->exception == IPIPE_TRAP_SSTEP || \ + (d)->exception == IPIPE_TRAP_DEBUG)) +#define xnarch_fault_notify(d) (xnarch_fault_bp_p(d) == 0) + +static inline +struct task_struct *xnarch_host_task(struct xnarchtcb *tcb) +{ + return tcb->core.host_task; +} + +static inline void xnarch_enter_root(struct xnthread *root) { } + +#ifdef CONFIG_XENO_ARCH_FPU + +void xnarch_init_root_tcb(struct xnthread *thread); + +void xnarch_init_shadow_tcb(struct xnthread *thread); + +void xnarch_leave_root(struct xnthread *root); + +#else /* !CONFIG_XENO_ARCH_FPU */ + +static inline void xnarch_init_root_tcb(struct xnthread *thread) { } +static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { } +static inline void xnarch_leave_root(struct xnthread *root) { } + +#endif /* !CONFIG_XENO_ARCH_FPU */ + +static inline int +xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d) +{ + return 0; +} + +static inline int xnarch_escalate(void) +{ + if (ipipe_root_p) { + ipipe_raise_irq(cobalt_pipeline.escalate_virq); + return 1; + } + + return 0; +} + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in); + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread); + +#endif /* !_COBALT_POWERPC_ASM_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h new file mode 100644 index 0000000..f0ae0e4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_POWERPC_ASM_WRAPPERS_H +#define _COBALT_POWERPC_ASM_WRAPPERS_H + +#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */ + +#endif /* _COBALT_POWERPC_ASM_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c new file mode 100644 index 0000000..14e2c4f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c @@ -0,0 +1,67 @@ +/** + * Copyright (C) 2004-2006 Philippe Gerum. + * + * 64-bit PowerPC adoption + * copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/stddef.h> +#include <asm/cputable.h> +#include <asm/xenomai/machine.h> + +static int mach_powerpc_init(void) +{ +#ifdef CONFIG_ALTIVEC + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + printk("Xenomai: ALTIVEC support enabled in kernel but no hardware found.\n" + " Disable CONFIG_ALTIVEC in the kernel configuration.\n"); + return -ENODEV; + } +#endif /* CONFIG_ALTIVEC */ + + return 0; +} + +static const char *const fault_labels[] = { + [0] = "Data or instruction access", + [1] = "Alignment", + [2] = "Altivec unavailable", + [3] = "Program check exception", + [4] = "Machine check exception", + [5] = "Unknown", + [6] = "Instruction breakpoint", + [7] = "Run mode exception", + [8] = "Single-step exception", + [9] = "Non-recoverable exception", + [10] = "Software emulation", + [11] = "Debug", + [12] = "SPE", + [13] = "Altivec assist", + [14] = "Cache-locking exception", + [15] = "Kernel FP unavailable", + [16] = NULL +}; + +struct cobalt_machine cobalt_machine = { + .name = "powerpc", + .init = mach_powerpc_init, + .late_init = NULL, + .cleanup = NULL, + .prefault = NULL, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c new file mode 100644 index 0000000..6ce2787 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * + * 64-bit PowerPC adoption + * copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/sched.h> +#include <linux/ipipe.h> +#include <linux/mm.h> +#include <asm/mmu_context.h> +#include <cobalt/kernel/thread.h> + +asmlinkage struct task_struct * +_switch(struct thread_struct *prev, struct thread_struct *next); + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in) +{ + struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb; + struct mm_struct *prev_mm, *next_mm; + struct task_struct *next; + + next = in_tcb->core.host_task; + prev_mm = out_tcb->core.active_mm; + + next_mm = in_tcb->core.mm; + if (next_mm == NULL) { + in_tcb->core.active_mm = prev_mm; + enter_lazy_tlb(prev_mm, next); + } else { + ipipe_switch_mm_head(prev_mm, next_mm, next); + /* + * We might be switching back to the root thread, + * which we preempted earlier, shortly after "current" + * dropped its mm context in the do_exit() path + * (next->mm == NULL). In that particular case, the + * kernel expects a lazy TLB state for leaving the mm. + */ + if (next->mm == NULL) + enter_lazy_tlb(prev_mm, next); + } + + hard_local_irq_disable(); + _switch(out_tcb->core.tsp, in_tcb->core.tsp); +} + +#ifdef CONFIG_XENO_ARCH_FPU + +asmlinkage void __asm_init_fpu(struct thread_struct *ts); + +asmlinkage void __asm_save_fpu(struct thread_struct *ts); + +asmlinkage void __asm_restore_fpu(struct thread_struct *ts); + +asmlinkage void __asm_disable_fpu(void); + +asmlinkage void __asm_enable_fpu(void); + +#if !defined(CONFIG_SMP) && LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0) +#define get_fpu_owner(cur) last_task_used_math +#else /* CONFIG_SMP */ +#define get_fpu_owner(cur) ({ \ + struct task_struct * _cur = (cur); \ + ((_cur->thread.regs && (_cur->thread.regs->msr & MSR_FP)) \ + ? _cur : NULL); \ +}) +#endif /* CONFIG_SMP */ + +static void xnarch_enable_fpu(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + struct task_struct *task = tcb->core.host_task; + + if (task && task != tcb->core.user_fpu_owner) + __asm_disable_fpu(); + else + __asm_enable_fpu(); +} + +static void do_save_fpu(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + + if (tcb->fpup) { + __asm_save_fpu(tcb->fpup); + + if (tcb->core.user_fpu_owner && + tcb->core.user_fpu_owner->thread.regs) + tcb->core.user_fpu_owner->thread.regs->msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); + } +} + +static void xnarch_restore_fpu(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + struct thread_struct *ts; + struct pt_regs *regs; + + if (tcb->fpup) { + __asm_restore_fpu(tcb->fpup); + /* + * Note: Only enable FP in MSR, if it was enabled when + * we saved the fpu state. + */ + if (tcb->core.user_fpu_owner) { + ts = &tcb->core.user_fpu_owner->thread; + regs = ts->regs; + if (regs) { + regs->msr &= ~(MSR_FE0|MSR_FE1); + regs->msr |= (MSR_FP|ts->fpexc_mode); + } + } + } + /* + * FIXME: We restore FPU "as it was" when Xenomai preempted Linux, + * whereas we could be much lazier. + */ + if (tcb->core.host_task && + tcb->core.host_task != tcb->core.user_fpu_owner) + __asm_disable_fpu(); +} + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) +{ + if (from == to || + xnarch_fpu_ptr(xnthread_archtcb(from)) == + xnarch_fpu_ptr(xnthread_archtcb(to))) { + xnarch_enable_fpu(to); + return; + } + + if (from) + do_save_fpu(from); + + xnarch_restore_fpu(to); +} + +void xnarch_leave_root(struct xnthread *root) +{ + struct xnarchtcb *rootcb = xnthread_archtcb(root); + rootcb->core.user_fpu_owner = get_fpu_owner(rootcb->core.host_task); + /* So that do_save_fpu() operates on the right FPU area. */ + rootcb->fpup = rootcb->core.user_fpu_owner ? + &rootcb->core.user_fpu_owner->thread : NULL; +} + +void xnarch_init_root_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = &thread->tcb; + tcb->fpup = NULL; +} + +void xnarch_init_shadow_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = &thread->tcb; + tcb->fpup = &tcb->core.host_task->thread; +} + +#endif /* CONFIG_XENO_ARCH_FPU */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig new file mode 100644 index 0000000..9adbbf7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig @@ -0,0 +1,8 @@ +config XENO_ARCH_FPU + def_bool y + +config XENO_ARCH_SYS3264 + def_bool IA32_EMULATION + +source "kernel/xenomai/Kconfig" +source "drivers/xenomai/Kconfig" diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile new file mode 100644 index 0000000..93929b6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile @@ -0,0 +1,5 @@ + +obj-$(CONFIG_XENOMAI) += xenomai.o +xenomai-y := machine.o smi.o c1e.o + +ccflags-y := -I$(srctree)/arch/x86/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c new file mode 120000 index 0000000..5dc924e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c @@ -0,0 +1 @@ +../ipipe/c1e.c \ No newline at end of file diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..7f2dde7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_CALIBRATION_H +#define _COBALT_X86_ASM_CALIBRATION_H + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ + unsigned long sched_latency; + +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 + sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT; +#else /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */ + sched_latency = num_online_cpus() > 1 ? 3350 : 2000; +#endif /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */ + + p->user = xnclock_ns_to_ticks(&nkclock, sched_latency); + p->kernel = xnclock_ns_to_ticks(&nkclock, + CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#endif /* !_COBALT_X86_ASM_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..463d9d3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_FPTEST_H +#define _COBALT_X86_ASM_FPTEST_H + +#include <linux/errno.h> +#include <asm/processor.h> +#include <asm/xenomai/wrappers.h> +#include <asm/xenomai/uapi/fptest.h> + +/* + * We do NOT support out-of-band FPU operations in kernel space for a + * reason: this is a mess. Out-of-band FPU is just fine and makes a + * lot of sense for many real-time applications, but you have to do + * that from userland. + */ +static inline int fp_kernel_supported(void) +{ + return 0; +} + +static inline void fp_init(void) +{ +} + +static inline int fp_linux_begin(void) +{ + kernel_fpu_begin(); + /* + * We need a clean context for testing the sanity of the FPU + * register stack across switches in fp_regs_check() + * (fildl->fistpl), which kernel_fpu_begin() does not + * guarantee us. Force this manually. + */ + asm volatile("fninit"); + + return true; +} + +static inline void fp_linux_end(void) +{ + kernel_fpu_end(); +} + +static inline int fp_detect(void) +{ + int features = 0; + + if (boot_cpu_has(X86_FEATURE_XMM2)) + features |= __COBALT_HAVE_SSE2; + + if (boot_cpu_has(X86_FEATURE_AVX)) + features |= __COBALT_HAVE_AVX; + + return features; +} + +#endif /* _COBALT_X86_ASM_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h new file mode 100644 index 0000000..56b1c48 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h @@ -0,0 +1,34 @@ +/** + * Copyright (C) 2007-2012 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_MACHINE_H +#define _COBALT_X86_ASM_MACHINE_H + +#include <linux/compiler.h> + +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + __asm__("bsfq %1, %0":"=r" (ul) : "rm" (ul)); + + return ul; +} + +/* Read this last to enable default settings. */ +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_X86_ASM_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..b2e1582 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_SYSCALL_H +#define _COBALT_X86_ASM_SYSCALL_H + +#include <linux/errno.h> +#include <asm/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +/* + * Cobalt and Linux syscall numbers can be fetched from ORIG_AX, + * masking out the __COBALT_SYSCALL_BIT marker. + */ +#define __xn_reg_sys(regs) ((regs)->orig_ax) +#define __xn_reg_rval(regs) ((regs)->ax) +#define __xn_reg_pc(regs) ((regs)->ip) +#define __xn_reg_sp(regs) ((regs)->sp) + +#define __xn_syscall_p(regs) (__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT) +#ifdef CONFIG_XENO_ARCH_SYS3264 +#define __xn_syscall(regs) __COBALT_CALL32_SYSNR(__xn_reg_sys(regs) \ + & ~__COBALT_SYSCALL_BIT) +#else +#define __xn_syscall(regs) (__xn_reg_sys(regs) & ~__COBALT_SYSCALL_BIT) +#endif + +#ifdef CONFIG_IA32_EMULATION +#define __xn_nr_root_syscalls \ + ({ \ + struct thread_info *__ti = current_thread_info(); \ + __ti->status & TS_COMPAT ? IA32_NR_syscalls : NR_syscalls; \ + }) +#else +#define __xn_nr_root_syscalls NR_syscalls +#endif +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = __xn_reg_sys(__regs); \ + *(__code) < __xn_nr_root_syscalls; \ + }) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +static inline +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + return -ENOSYS; +} + +#endif /* !_COBALT_X86_ASM_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h new file mode 100644 index 0000000..6eb71e2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_THREAD_H +#define _COBALT_X86_ASM_THREAD_H + +#include <asm-generic/xenomai/dovetail/thread.h> +#include <asm/traps.h> + +#define xnarch_fault_pc(__regs) ((__regs)->ip) +#define xnarch_fault_pf_p(__nr) ((__nr) == X86_TRAP_PF) +#define xnarch_fault_bp_p(__nr) ((current->ptrace & PT_PTRACED) && \ + ((__nr) == X86_TRAP_DB || (__nr) == X86_TRAP_BP)) +#define xnarch_fault_notify(__nr) (!xnarch_fault_bp_p(__nr)) + +#endif /* !_COBALT_X86_ASM_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c new file mode 100644 index 0000000..562de40 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c @@ -0,0 +1,70 @@ +/** + * Copyright (C) 2007-2012 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <asm/xenomai/machine.h> +#include <asm/xenomai/smi.h> +#include <asm/xenomai/c1e.h> + +static int mach_x86_init(void) +{ + mach_x86_c1e_disable(); + mach_x86_smi_init(); + mach_x86_smi_disable(); + + return 0; +} + +static void mach_x86_cleanup(void) +{ + mach_x86_smi_restore(); +} + +static const char *const fault_labels[] = { + [0] = "Divide error", + [1] = "Debug", + [2] = "", /* NMI is not pipelined. */ + [3] = "Int3", + [4] = "Overflow", + [5] = "Bounds", + [6] = "Invalid opcode", + [7] = "FPU not available", + [8] = "Double fault", + [9] = "FPU segment overrun", + [10] = "Invalid TSS", + [11] = "Segment not present", + [12] = "Stack segment", + [13] = "General protection", + [14] = "Page fault", + [15] = "Spurious interrupt", + [16] = "FPU error", + [17] = "Alignment check", + [18] = "Machine check", + [19] = "SIMD error", + [20] = NULL, +}; + +struct cobalt_machine cobalt_machine = { + .name = "x86", + .init = mach_x86_init, + .late_init = NULL, + .cleanup = mach_x86_cleanup, + .prefault = NULL, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c new file mode 120000 index 0000000..8d19721 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c @@ -0,0 +1 @@ +../ipipe/smi.c \ No newline at end of file diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h new file mode 100644 index 0000000..7e06014 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef C1E_H +#define C1E_H + +void mach_x86_c1e_disable(void); + +#endif /* C1E_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h new file mode 100644 index 0000000..a37c186 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2005-2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_FEATURES_H +#define _COBALT_X86_ASM_FEATURES_H + +struct cobalt_featinfo; +static inline void collect_arch_features(struct cobalt_featinfo *p) { } + +#include <asm/xenomai/uapi/features.h> + +#endif /* !_COBALT_X86_ASM_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h new file mode 100644 index 0000000..1ea90fb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h @@ -0,0 +1,32 @@ +/** + * Copyright © 2005 Gilles Chanteperdrix. + * + * SMI workaround for x86. + * + * Xenomai free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_SMI_H +#define _COBALT_X86_ASM_SMI_H + +#ifndef _COBALT_X86_ASM_MACHINE_H +#error "please don't include asm/smi.h directly" +#endif + +void mach_x86_smi_disable(void); +void mach_x86_smi_restore(void); +void mach_x86_smi_init(void); + +#endif /* !_COBALT_X86_ASM_SMI_64_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h new file mode 100644 index 0000000..3986b22 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_SYSCALL32_TABLE_H +#define _COBALT_X86_ASM_SYSCALL32_TABLE_H + +/* + * CAUTION: This file is read verbatim into the main syscall + * table. Only preprocessor stuff and syscall entries here. + */ + +__COBALT_CALL32emu_THUNK(thread_create) +__COBALT_CALL32emu_THUNK(thread_setschedparam_ex) +__COBALT_CALL32emu_THUNK(thread_getschedparam_ex) +__COBALT_CALL32emu_THUNK(thread_setschedprio) +__COBALT_CALL32emu_THUNK(sem_open) +__COBALT_CALL32emu_THUNK(sem_timedwait) +__COBALT_CALL32emu_THUNK(clock_getres) +__COBALT_CALL32emu_THUNK(clock_gettime) +__COBALT_CALL32emu_THUNK(clock_settime) +__COBALT_CALL32emu_THUNK(clock_nanosleep) +__COBALT_CALL32emu_THUNK(mutex_timedlock) +__COBALT_CALL32emu_THUNK(cond_wait_prologue) +__COBALT_CALL32emu_THUNK(mq_open) +__COBALT_CALL32emu_THUNK(mq_getattr) +__COBALT_CALL32emu_THUNK(mq_timedsend) +__COBALT_CALL32emu_THUNK(mq_timedreceive) +__COBALT_CALL32emu_THUNK(mq_notify) +__COBALT_CALL32emu_THUNK(sched_weightprio) +__COBALT_CALL32emu_THUNK(sched_setconfig_np) +__COBALT_CALL32emu_THUNK(sched_getconfig_np) +__COBALT_CALL32emu_THUNK(sched_setscheduler_ex) +__COBALT_CALL32emu_THUNK(sched_getscheduler_ex) +__COBALT_CALL32emu_THUNK(timer_create) +__COBALT_CALL32emu_THUNK(timer_settime) +__COBALT_CALL32emu_THUNK(timer_gettime) +__COBALT_CALL32emu_THUNK(timerfd_settime) +__COBALT_CALL32emu_THUNK(timerfd_gettime) +__COBALT_CALL32emu_THUNK(sigwait) +__COBALT_CALL32emu_THUNK(sigtimedwait) +__COBALT_CALL32emu_THUNK(sigwaitinfo) +__COBALT_CALL32emu_THUNK(sigpending) +__COBALT_CALL32emu_THUNK(sigqueue) +__COBALT_CALL32emu_THUNK(monitor_wait) +__COBALT_CALL32emu_THUNK(event_wait) +__COBALT_CALL32emu_THUNK(select) +__COBALT_CALL32emu_THUNK(recvmsg) +__COBALT_CALL32emu_THUNK(sendmsg) +__COBALT_CALL32emu_THUNK(mmap) +__COBALT_CALL32emu_THUNK(backtrace) +__COBALT_CALL32emu_THUNK(mq_timedreceive64) +__COBALT_CALL32emu_THUNK(sigtimedwait64) +__COBALT_CALL32emu_THUNK(recvmmsg64) + +#endif /* !_COBALT_X86_ASM_SYSCALL32_TABLE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h new file mode 100644 index 0000000..f023de3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_SYSCALL32_H +#define _COBALT_X86_ASM_SYSCALL32_H + +#include <asm/unistd.h> + +#ifdef CONFIG_IA32_EMULATION + +#define __COBALT_IA32_BASE 256 /* Power of two. */ + +#define __COBALT_SYSNR32emu(__reg) \ + ({ \ + long __nr = __reg; \ + if (in_ia32_syscall()) \ + __nr += __COBALT_IA32_BASE; \ + __nr; \ + }) + +#define __COBALT_COMPAT32emu(__reg) \ + (in_ia32_syscall() ? __COBALT_COMPAT_BIT : 0) + +#if __NR_COBALT_SYSCALLS > __COBALT_IA32_BASE +#error "__NR_COBALT_SYSCALLS > __COBALT_IA32_BASE" +#endif + +#define __syshand32emu__(__name) \ + ((cobalt_syshand)(void (*)(void))(CoBaLt32emu_ ## __name)) + +#define __COBALT_CALL32emu_INITHAND(__handler) \ + [__COBALT_IA32_BASE ... __COBALT_IA32_BASE + __NR_COBALT_SYSCALLS-1] = __handler, + +#define __COBALT_CALL32emu_INITMODE(__mode) \ + [__COBALT_IA32_BASE ... __COBALT_IA32_BASE + __NR_COBALT_SYSCALLS-1] = __mode, + +/* ia32 default entry (no thunk) */ +#define __COBALT_CALL32emu_ENTRY(__name, __handler) \ + [sc_cobalt_ ## __name + __COBALT_IA32_BASE] = __handler, + +/* ia32 thunk installation */ +#define __COBALT_CALL32emu_THUNK(__name) \ + __COBALT_CALL32emu_ENTRY(__name, __syshand32emu__(__name)) + +/* ia32 thunk implementation. */ +#define COBALT_SYSCALL32emu(__name, __mode, __args) \ + long CoBaLt32emu_ ## __name __args + +/* ia32 thunk declaration. */ +#define COBALT_SYSCALL32emu_DECL(__name, __args) \ + long CoBaLt32emu_ ## __name __args + +#else /* !CONFIG_IA32_EMULATION */ + +/* ia32 emulation support disabled. */ + +#define __COBALT_SYSNR32emu(__reg) (__reg) + +#define __COBALT_COMPAT32emu(__reg) 0 + +#define __COBALT_CALL32emu_INITHAND(__handler) + +#define __COBALT_CALL32emu_INITMODE(__mode) + +#define __COBALT_CALL32emu_ENTRY(__name, __handler) + +#define __COBALT_CALL32emu_THUNK(__name) + +#define COBALT_SYSCALL32emu_DECL(__name, __args) + +#endif /* !CONFIG_IA32_EMULATION */ + +#define __COBALT_CALL32_ENTRY(__name, __handler) \ + __COBALT_CALL32emu_ENTRY(__name, __handler) + +#define __COBALT_CALL32_INITHAND(__handler) \ + __COBALT_CALL32emu_INITHAND(__handler) + +#define __COBALT_CALL32_INITMODE(__mode) \ + __COBALT_CALL32emu_INITMODE(__mode) + +/* Already checked for __COBALT_SYSCALL_BIT */ +#define __COBALT_CALL32_SYSNR(__reg) __COBALT_SYSNR32emu(__reg) + +#define __COBALT_CALL_COMPAT(__reg) __COBALT_COMPAT32emu(__reg) + +#endif /* !_COBALT_X86_ASM_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h new file mode 100644 index 0000000..3682736 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h @@ -0,0 +1,243 @@ +/** + * Arithmetic/conversion routines for x86. + * + * Copyright © 2005 Gilles Chanteperdrix, 32bit version. + * Copyright © 2007 Jan Kiszka, 64bit version. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_X86_ASM_UAPI_ARITH_H +#define _COBALT_X86_ASM_UAPI_ARITH_H +#define _COBALT_X86_ASM_UAPI_ARITH_H + +#include <asm/xenomai/uapi/features.h> + +#ifdef __i386__ + +#define xnarch_u64tou32(ull, h, l) ({ \ + unsigned long long _ull = (ull); \ + (l) = _ull & 0xffffffff; \ + (h) = _ull >> 32; \ +}) + +#define xnarch_u64fromu32(h, l) ({ \ + unsigned long long _ull; \ + asm ( "": "=A"(_ull) : "d"(h), "a"(l)); \ + _ull; \ +}) + +/* const helper for xnarch_uldivrem, so that the compiler will eliminate + multiple calls with same arguments, at no additionnal cost. */ +static inline __attribute__((__const__)) unsigned long long +__mach_x86_32_uldivrem(const unsigned long long ull, const unsigned long d) +{ + unsigned long long ret; + __asm__ ("divl %1" : "=A,A"(ret) : "r,?m"(d), "A,A"(ull)); + /* Exception if quotient does not fit on unsigned long. */ + return ret; +} + +/* Fast long long division: when the quotient and remainder fit on 32 bits. */ +static inline unsigned long mach_x86_32_uldivrem(unsigned long long ull, + const unsigned d, + unsigned long *const rp) +{ + unsigned long q, r; + ull = __mach_x86_32_uldivrem(ull, d); + __asm__ ( "": "=d"(r), "=a"(q) : "A"(ull)); + if(rp) + *rp = r; + return q; +} +#define xnarch_uldivrem(ull, d, rp) mach_x86_32_uldivrem((ull),(d),(rp)) + +/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits. + Building block for ulldiv. */ +static inline unsigned long long mach_x86_32_div96by32(const unsigned long long h, + const unsigned long l, + const unsigned long d, + unsigned long *const rp) +{ + unsigned long rh; + const unsigned long qh = xnarch_uldivrem(h, d, &rh); + const unsigned long long t = xnarch_u64fromu32(rh, l); + const unsigned long ql = xnarch_uldivrem(t, d, rp); + + return xnarch_u64fromu32(qh, ql); +} + +/* Slow long long division. Uses xnarch_uldivrem, hence has the same property: + the compiler removes redundant calls. */ +static inline unsigned long long +mach_x86_32_ulldiv(const unsigned long long ull, + const unsigned d, + unsigned long *const rp) +{ + unsigned long h, l; + xnarch_u64tou32(ull, h, l); + return mach_x86_32_div96by32(h, l, d, rp); +} +#define xnarch_ulldiv(ull,d,rp) mach_x86_32_ulldiv((ull),(d),(rp)) + +/* Fast scaled-math-based replacement for long long multiply-divide */ +#define xnarch_llmulshft(ll, m, s) \ +({ \ + long long __ret; \ + unsigned __lo, __hi; \ + \ + __asm__ ( \ + /* HI = HIWORD(ll) * m */ \ + "mov %%eax,%%ecx\n\t" \ + "mov %%edx,%%eax\n\t" \ + "imull %[__m]\n\t" \ + "mov %%eax,%[__lo]\n\t" \ + "mov %%edx,%[__hi]\n\t" \ + \ + /* LO = LOWORD(ll) * m */ \ + "mov %%ecx,%%eax\n\t" \ + "mull %[__m]\n\t" \ + \ + /* ret = (HI << 32) + LO */ \ + "add %[__lo],%%edx\n\t" \ + "adc $0,%[__hi]\n\t" \ + \ + /* ret = ret >> s */ \ + "mov %[__s],%%ecx\n\t" \ + "shrd %%cl,%%edx,%%eax\n\t" \ + "shrd %%cl,%[__hi],%%edx\n\t" \ + : "=A" (__ret), [__lo] "=&r" (__lo), [__hi] "=&r" (__hi) \ + : "A" (ll), [__m] "m" (m), [__s] "m" (s) \ + : "ecx"); \ + __ret; \ +}) + +static inline __attribute__((const)) unsigned long long +mach_x86_32_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + unsigned rhs_integ) +{ + register unsigned rl __asm__("ecx"); + register unsigned rm __asm__("esi"); + register unsigned rh __asm__("edi"); + unsigned fracl, frach, opl, oph; + volatile unsigned integ = rhs_integ; + register unsigned long long t; + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(frac, frach, fracl); + + __asm__ ("mov %[oph], %%eax\n\t" + "mull %[frach]\n\t" + "mov %%eax, %[rm]\n\t" + "mov %%edx, %[rh]\n\t" + "mov %[opl], %%eax\n\t" + "mull %[fracl]\n\t" + "mov %%edx, %[rl]\n\t" + "shl $1, %%eax\n\t" + "adc $0, %[rl]\n\t" + "adc $0, %[rm]\n\t" + "adc $0, %[rh]\n\t" + "mov %[oph], %%eax\n\t" + "mull %[fracl]\n\t" + "add %%eax, %[rl]\n\t" + "adc %%edx, %[rm]\n\t" + "adc $0, %[rh]\n\t" + "mov %[opl], %%eax\n\t" + "mull %[frach]\n\t" + "add %%eax, %[rl]\n\t" + "adc %%edx, %[rm]\n\t" + "adc $0, %[rh]\n\t" + "mov %[opl], %%eax\n\t" + "mull %[integ]\n\t" + "add %[rm], %%eax\n\t" + "adc %%edx, %[rh]\n\t" + "mov %[oph], %%edx\n\t" + "imul %[integ], %%edx\n\t" + "add %[rh], %%edx\n\t" + : [rl]"=&c"(rl), [rm]"=&S"(rm), [rh]"=&D"(rh), "=&A"(t) + : [opl]"m"(opl), [oph]"m"(oph), + [fracl]"m"(fracl), [frach]"m"(frach), [integ]"m"(integ) + : "cc"); + + return t; +} + +#define xnarch_nodiv_ullimd(op, frac, integ) \ + mach_x86_32_nodiv_ullimd((op), (frac), (integ)) + +#else /* x86_64 */ + +static inline __attribute__((__const__)) long long +mach_x86_64_llimd (long long op, unsigned m, unsigned d) +{ + long long result; + + __asm__ ( + "imul %[m]\n\t" + "idiv %[d]\n\t" + : "=a" (result) + : "a" (op), [m] "r" ((unsigned long long)m), + [d] "r" ((unsigned long long)d) + : "rdx"); + + return result; +} +#define xnarch_llimd(ll,m,d) mach_x86_64_llimd((ll),(m),(d)) + +static inline __attribute__((__const__)) long long +mach_x86_64_llmulshft(long long op, unsigned m, unsigned s) +{ + long long result; + + __asm__ ( + "imulq %[m]\n\t" + "shrd %%cl,%%rdx,%%rax\n\t" + : "=a,a" (result) + : "a,a" (op), [m] "m,r" ((unsigned long long)m), + "c,c" (s) + : "rdx"); + + return result; +} +#define xnarch_llmulshft(op, m, s) mach_x86_64_llmulshft((op), (m), (s)) + +static inline __attribute__((__const__)) unsigned long long +mach_x86_64_nodiv_ullimd(unsigned long long op, + unsigned long long frac, unsigned rhs_integ) +{ + register unsigned long long rl __asm__("rax") = frac; + register unsigned long long rh __asm__("rdx"); + register unsigned long long integ __asm__("rsi") = rhs_integ; + register unsigned long long t __asm__("r8") = 0x80000000ULL; + + __asm__ ("mulq %[op]\n\t" + "addq %[t], %[rl]\n\t" + "adcq $0, %[rh]\n\t" + "imulq %[op], %[integ]\n\t" + "leaq (%[integ], %[rh], 1),%[rl]": + [rh]"=&d"(rh), [rl]"+&a"(rl), [integ]"+S"(integ): + [op]"D"(op), [t]"r"(t): "cc"); + + return rl; +} + +#define xnarch_nodiv_ullimd(op, frac, integ) \ + mach_x86_64_nodiv_ullimd((op), (frac), (integ)) + +#endif /* x86_64 */ + +#include <cobalt/uapi/asm-generic/arith.h> + +#endif /* _COBALT_X86_ASM_UAPI_ARITH_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h new file mode 100644 index 0000000..65f8164 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2005-2013 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_X86_ASM_UAPI_FEATURES_H +#define _COBALT_X86_ASM_UAPI_FEATURES_H + +/* The ABI revision level we use on this arch. */ +#define XENOMAI_ABI_REV 18UL + +#define XENOMAI_FEAT_DEP __xn_feat_generic_mask + +#define XENOMAI_FEAT_MAN __xn_feat_generic_man_mask + +#define XNARCH_HAVE_LLMULSHFT 1 +#define XNARCH_HAVE_NODIV_LLIMD 1 + +struct cobalt_featinfo_archdep { /* no arch-specific feature */ }; + +#include <cobalt/uapi/asm-generic/features.h> + +static inline const char *get_feature_label(unsigned int feature) +{ + return get_generic_feature_label(feature); +} + +#endif /* !_COBALT_X86_ASM_UAPI_FEATURES_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h new file mode 100644 index 0000000..d406cc3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_X86_ASM_UAPI_FPTEST_H +#define _COBALT_X86_ASM_UAPI_FPTEST_H + +#define __COBALT_HAVE_SSE2 0x1 +#define __COBALT_HAVE_AVX 0x2 + +static inline void fp_regs_set(int features, unsigned int val) +{ + unsigned long long vec[4] = { val, 0, val, 0 }; + unsigned i; + + for (i = 0; i < 8; i++) + __asm__ __volatile__("fildl %0": /* no output */ :"m"(val)); + + if (features & __COBALT_HAVE_AVX) { + __asm__ __volatile__( + "vmovupd %0,%%ymm0;" + "vmovupd %0,%%ymm1;" + "vmovupd %0,%%ymm2;" + "vmovupd %0,%%ymm3;" + "vmovupd %0,%%ymm4;" + "vmovupd %0,%%ymm5;" + "vmovupd %0,%%ymm6;" + "vmovupd %0,%%ymm7;" + : : "m"(vec[0]), "m"(vec[1]), "m"(vec[2]), "m"(vec[3])); + } else if (features & __COBALT_HAVE_SSE2) { + __asm__ __volatile__( + "movupd %0,%%xmm0;" + "movupd %0,%%xmm1;" + "movupd %0,%%xmm2;" + "movupd %0,%%xmm3;" + "movupd %0,%%xmm4;" + "movupd %0,%%xmm5;" + "movupd %0,%%xmm6;" + "movupd %0,%%xmm7;" + : : "m"(vec[0]), "m"(vec[1]), "m"(vec[2]), "m"(vec[3])); + } +} + +static inline unsigned int fp_regs_check(int features, unsigned int val, + int (*report)(const char *fmt, ...)) +{ + unsigned long long vec[8][4]; + unsigned int i, result = val; + unsigned e[8]; + + for (i = 0; i < 8; i++) + __asm__ __volatile__("fistpl %0":"=m"(e[7 - i])); + + if (features & __COBALT_HAVE_AVX) { + __asm__ __volatile__( + "vmovupd %%ymm0,%0;" + "vmovupd %%ymm1,%1;" + "vmovupd %%ymm2,%2;" + "vmovupd %%ymm3,%3;" + "vmovupd %%ymm4,%4;" + "vmovupd %%ymm5,%5;" + "vmovupd %%ymm6,%6;" + "vmovupd %%ymm7,%7;" + : "=m" (vec[0][0]), "=m" (vec[1][0]), + "=m" (vec[2][0]), "=m" (vec[3][0]), + "=m" (vec[4][0]), "=m" (vec[5][0]), + "=m" (vec[6][0]), "=m" (vec[7][0])); + } else if (features & __COBALT_HAVE_SSE2) { + __asm__ __volatile__( + "movupd %%xmm0,%0;" + "movupd %%xmm1,%1;" + "movupd %%xmm2,%2;" + "movupd %%xmm3,%3;" + "movupd %%xmm4,%4;" + "movupd %%xmm5,%5;" + "movupd %%xmm6,%6;" + "movupd %%xmm7,%7;" + : "=m" (vec[0][0]), "=m" (vec[1][0]), + "=m" (vec[2][0]), "=m" (vec[3][0]), + "=m" (vec[4][0]), "=m" (vec[5][0]), + "=m" (vec[6][0]), "=m" (vec[7][0])); + } + + for (i = 0; i < 8; i++) + if (e[i] != val) { + report("r%d: %u != %u\n", i, e[i], val); + result = e[i]; + } + + if (features & __COBALT_HAVE_AVX) { + for (i = 0; i < 8; i++) { + int error = 0; + if (vec[i][0] != val) { + result = vec[i][0]; + error = 1; + } + if (vec[i][2] != val) { + result = vec[i][2]; + error = 1; + } + if (error) + report("ymm%d: %llu/%llu != %u/%u\n", + i, (unsigned long long)vec[i][0], + (unsigned long long)vec[i][2], + val, val); + } + } else if (features & __COBALT_HAVE_SSE2) { + for (i = 0; i < 8; i++) + if (vec[i][0] != val) { + report("xmm%d: %llu != %u\n", + i, (unsigned long long)vec[i][0], val); + result = vec[i][0]; + } + } + + return result; +} + +#endif /* _COBALT_X86_ASM_UAPI_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h new file mode 100644 index 0000000..500d169 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_X86_ASM_UAPI_SYSCALL_H +#define _COBALT_X86_ASM_UAPI_SYSCALL_H + +#define __xn_syscode(__nr) (__COBALT_SYSCALL_BIT | __nr) + +#endif /* !_COBALT_X86_ASM_UAPI_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h new file mode 100644 index 0000000..f873277 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_WRAPPERS_H +#define _COBALT_X86_ASM_WRAPPERS_H + +#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */ + +#define __get_user_inatomic __get_user +#define __put_user_inatomic __put_user + +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,9,108) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) +#define IPIPE_X86_FPU_EAGER +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,4,137) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) +#define IPIPE_X86_FPU_EAGER +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) +#define IPIPE_X86_FPU_EAGER +#endif + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0) +#include <asm/i387.h> +#include <asm/fpu-internal.h> +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0) +#include <asm/fpu/internal.h> +#else +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) + +static inline void kernel_fpu_disable(void) +{ + __thread_clear_has_fpu(current); +} + +static inline void kernel_fpu_enable(void) +{ +} + +static inline bool kernel_fpu_disabled(void) +{ + return __thread_has_fpu(current) == 0 && (read_cr0() & X86_CR0_TS) == 0; +} +#endif /* linux < 4.1.0 */ + +#endif /* _COBALT_X86_ASM_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile new file mode 100644 index 0000000..1ef407c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile @@ -0,0 +1,5 @@ + +obj-$(CONFIG_XENOMAI) += xenomai.o +xenomai-y := machine.o thread.o smi.o c1e.o + +ccflags-y := -I$(srctree)/arch/x86/xenomai/include -I$(srctree)/include/xenomai diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README new file mode 100644 index 0000000..80f954a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README @@ -0,0 +1,3 @@ +Get the interrupt pipeline code for the target kernel from +http://xenomai.org/downloads/ipipe/, or +git://git.xenomai.org/ipipe.git diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c new file mode 100644 index 0000000..9bd4e92 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c @@ -0,0 +1,72 @@ +/* + * Disable Intel automatic promotion to C1E mode. + * Lifted from drivers/idle/intel_idle.c + * Copyright (c) 2013, Intel Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/smp.h> +#include <asm/processor.h> +#include <asm/cpu_device_id.h> +#include <asm/msr.h> + +#define ICPU(model) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, 1UL } + +static const struct x86_cpu_id c1e_ids[] = { + ICPU(0x1a), + ICPU(0x1e), + ICPU(0x1f), + ICPU(0x25), + ICPU(0x2c), + ICPU(0x2e), + ICPU(0x2f), + ICPU(0x2a), + ICPU(0x2d), + ICPU(0x3a), + ICPU(0x3e), + ICPU(0x3c), + ICPU(0x3f), + ICPU(0x45), + ICPU(0x46), + ICPU(0x4D), + {} +}; + +#undef ICPU + +static void c1e_promotion_disable(void *dummy) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits &= ~0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} + +void mach_x86_c1e_disable(void) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(c1e_ids); + if (id) { + printk("[Xenomai] disabling automatic C1E state promotion on Intel processor\n"); + /* + * cpu uses C1E, disable this feature (copied from + * intel_idle driver) + */ + on_each_cpu(c1e_promotion_disable, NULL, 1); + } +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h new file mode 100644 index 0000000..eaecc48 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_CALIBRATION_H +#define _COBALT_X86_ASM_CALIBRATION_H + +#include <asm/processor.h> + +static inline unsigned long __get_bogomips(void) +{ + return this_cpu_read(cpu_info.loops_per_jiffy)/(500000/HZ); +} + +static inline void xnarch_get_latencies(struct xnclock_gravity *p) +{ + unsigned long sched_latency; + +#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0 + sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT; +#else /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */ + + if (strcmp(ipipe_timer_name(), "lapic") == 0) { +#ifdef CONFIG_SMP + if (num_online_cpus() > 1) + sched_latency = 3350; + else + sched_latency = 2000; +#else /* !SMP */ + sched_latency = 1000; +#endif /* !SMP */ + } else if (strcmp(ipipe_timer_name(), "pit")) { /* HPET */ +#ifdef CONFIG_SMP + if (num_online_cpus() > 1) + sched_latency = 3350; + else + sched_latency = 1500; +#else /* !SMP */ + sched_latency = 1000; +#endif /* !SMP */ + } else { + sched_latency = (__get_bogomips() < 250 ? 17000 : + __get_bogomips() < 2500 ? 4200 : + 3500); +#ifdef CONFIG_SMP + sched_latency += 1000; +#endif /* CONFIG_SMP */ + } +#endif /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */ + + p->user = xnclock_ns_to_ticks(&nkclock, sched_latency); + p->kernel = xnclock_ns_to_ticks(&nkclock, + CONFIG_XENO_OPT_TIMING_KSCHEDLAT); + p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT); +} + +#endif /* !_COBALT_X86_ASM_CALIBRATION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h new file mode 100644 index 0000000..7a2b17d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_FPTEST_H +#define _COBALT_X86_ASM_FPTEST_H + +#include <linux/errno.h> +#include <asm/processor.h> +#include <asm/xenomai/wrappers.h> +#include <asm/xenomai/uapi/fptest.h> + +static inline int fp_kernel_supported(void) +{ + return 1; +} + +static inline void fp_init(void) +{ + __asm__ __volatile__("fninit"); +} + +static inline int fp_linux_begin(void) +{ + kernel_fpu_begin(); + /* kernel_fpu_begin() does no re-initialize the fpu context, but + fp_regs_set() implicitely expects an initialized fpu context, so + initialize it here. */ + fp_init(); + return 0; +} + +static inline void fp_linux_end(void) +{ + kernel_fpu_end(); +} + +static inline int fp_detect(void) +{ + int features = 0; + +#ifndef cpu_has_xmm2 +#ifdef cpu_has_sse2 +#define cpu_has_xmm2 cpu_has_sse2 +#else +#define cpu_has_xmm2 0 +#endif +#endif + if (cpu_has_xmm2) + features |= __COBALT_HAVE_SSE2; + +#ifndef cpu_has_avx +#define cpu_has_avx 0 +#endif + if (cpu_has_avx) + features |= __COBALT_HAVE_AVX; + + return features; +} + +#endif /* _COBALT_X86_ASM_FPTEST_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h new file mode 100644 index 0000000..750eb1e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2007-2012 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_MACHINE_H +#define _COBALT_X86_ASM_MACHINE_H + +#include <linux/compiler.h> + +static inline __attribute_const__ unsigned long ffnz(unsigned long ul) +{ + __asm__("bsfq %1, %0":"=r" (ul) : "rm" (ul)); + return ul; +} + +#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq + +/* Read this last to enable default settings. */ +#include <asm-generic/xenomai/machine.h> + +#endif /* !_COBALT_X86_ASM_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h new file mode 100644 index 0000000..f889f5f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_SYSCALL_H +#define _COBALT_X86_ASM_SYSCALL_H + +#include <linux/errno.h> +#include <asm/ptrace.h> +#include <asm-generic/xenomai/syscall.h> + +/* + * Cobalt and Linux syscall numbers can be fetched from ORIG_AX, + * masking out the __COBALT_SYSCALL_BIT marker. + */ +#define __xn_reg_sys(regs) ((regs)->orig_ax) +#define __xn_reg_rval(regs) ((regs)->ax) +#define __xn_reg_arg1(regs) ((regs)->di) +#define __xn_reg_arg2(regs) ((regs)->si) +#define __xn_reg_arg3(regs) ((regs)->dx) +#define __xn_reg_arg4(regs) ((regs)->r10) +#define __xn_reg_arg5(regs) ((regs)->r8) +#define __xn_reg_pc(regs) ((regs)->ip) +#define __xn_reg_sp(regs) ((regs)->sp) + +#define __xn_syscall_p(regs) (__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT) +#ifdef CONFIG_XENO_ARCH_SYS3264 +#define __xn_syscall(regs) __COBALT_CALL32_SYSNR(__xn_reg_sys(regs) \ + & ~__COBALT_SYSCALL_BIT) +#else +#define __xn_syscall(regs) (__xn_reg_sys(regs) & ~__COBALT_SYSCALL_BIT) +#endif + +/* + * Root syscall number with predicate (valid only if + * !__xn_syscall_p(__regs)). + */ +#define __xn_rootcall_p(__regs, __code) \ + ({ \ + *(__code) = __xn_reg_sys(__regs); \ + *(__code) < ipipe_root_nr_syscalls(current_thread_info()); \ + }) + +static inline void __xn_error_return(struct pt_regs *regs, int v) +{ + __xn_reg_rval(regs) = v; +} + +static inline void __xn_status_return(struct pt_regs *regs, long v) +{ + __xn_reg_rval(regs) = v; +} + +static inline int __xn_interrupted_p(struct pt_regs *regs) +{ + return __xn_reg_rval(regs) == -EINTR; +} + +static inline +int xnarch_local_syscall(unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5) +{ + return -ENOSYS; +} + +#endif /* !_COBALT_X86_ASM_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h new file mode 100644 index 0000000..a1a79bb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_X86_ASM_THREAD_H +#define _COBALT_X86_ASM_THREAD_H + +#include <asm-generic/xenomai/ipipe/thread.h> +#include <asm/xenomai/wrappers.h> +#include <asm/traps.h> + +#ifndef IPIPE_X86_FPU_EAGER +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0) +typedef union thread_xstate x86_fpustate; +#define x86_fpustate_ptr(t) ((t)->fpu.state) +#else +typedef union fpregs_state x86_fpustate; +#define x86_fpustate_ptr(t) ((t)->fpu.active_state) +#endif +#endif + +struct xnarchtcb { + struct xntcb core; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + unsigned long sp; + unsigned long *spp; + unsigned long ip; + unsigned long *ipp; +#endif +#ifdef IPIPE_X86_FPU_EAGER + struct fpu *kfpu; +#else + x86_fpustate *fpup; + unsigned int root_used_math: 1; + x86_fpustate *kfpu_state; +#endif + unsigned int root_kfpu: 1; +}; + +#define xnarch_fpu_ptr(tcb) ((tcb)->fpup) + +#define xnarch_fault_regs(d) ((d)->regs) +#define xnarch_fault_trap(d) ((d)->exception) +#define xnarch_fault_code(d) ((d)->regs->orig_ax) +#define xnarch_fault_pc(d) ((d)->regs->ip) +#define xnarch_fault_fpu_p(d) ((d)->exception == X86_TRAP_NM) +#define xnarch_fault_pf_p(d) ((d)->exception == X86_TRAP_PF) +#define xnarch_fault_bp_p(d) ((current->ptrace & PT_PTRACED) && \ + ((d)->exception == X86_TRAP_DB || (d)->exception == X86_TRAP_BP)) +#define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d)) + +static inline +struct task_struct *xnarch_host_task(struct xnarchtcb *tcb) +{ + return tcb->core.host_task; +} + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to); + +int xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d); + +void xnarch_leave_root(struct xnthread *root); + +void xnarch_init_root_tcb(struct xnthread *thread); + +void xnarch_init_shadow_tcb(struct xnthread *thread); + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in); + +static inline void xnarch_enter_root(struct xnthread *root) { } + +static inline int xnarch_escalate(void) +{ + if (ipipe_root_p) { + ipipe_raise_irq(cobalt_pipeline.escalate_virq); + return 1; + } + + return 0; +} + +int mach_x86_thread_init(void); +void mach_x86_thread_cleanup(void); + +#endif /* !_COBALT_X86_ASM_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c new file mode 100644 index 0000000..d51a91f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c @@ -0,0 +1,78 @@ +/** + * Copyright (C) 2007-2012 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, Inc., 675 Mass Ave, + * Cambridge MA 02139, USA; either version 2 of the License, or (at + * your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <asm/xenomai/machine.h> +#include <asm/xenomai/thread.h> +#include <asm/xenomai/syscall.h> +#include <asm/xenomai/smi.h> +#include <asm/xenomai/c1e.h> + +static int mach_x86_init(void) +{ + int ret; + + ret = mach_x86_thread_init(); + if (ret) + return ret; + + mach_x86_c1e_disable(); + mach_x86_smi_init(); + mach_x86_smi_disable(); + + return 0; +} + +static void mach_x86_cleanup(void) +{ + mach_x86_smi_restore(); + mach_x86_thread_cleanup(); +} + +static const char *const fault_labels[] = { + [0] = "Divide error", + [1] = "Debug", + [2] = "", /* NMI is not pipelined. */ + [3] = "Int3", + [4] = "Overflow", + [5] = "Bounds", + [6] = "Invalid opcode", + [7] = "FPU not available", + [8] = "Double fault", + [9] = "FPU segment overrun", + [10] = "Invalid TSS", + [11] = "Segment not present", + [12] = "Stack segment", + [13] = "General protection", + [14] = "Page fault", + [15] = "Spurious interrupt", + [16] = "FPU error", + [17] = "Alignment check", + [18] = "Machine check", + [19] = "SIMD error", + [20] = NULL, +}; + +struct cobalt_machine cobalt_machine = { + .name = "x86", + .init = mach_x86_init, + .late_init = NULL, + .cleanup = mach_x86_cleanup, + .prefault = NULL, + .fault_labels = fault_labels, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c new file mode 100644 index 0000000..f28af9a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c @@ -0,0 +1,168 @@ +/** + * SMI workaround for x86. + * + * Cut/Pasted from Vitor Angelo "smi" module. + * Adapted by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/ctype.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/reboot.h> +#include <cobalt/kernel/assert.h> +#include <asm-generic/xenomai/pci_ids.h> +#include <asm/xenomai/machine.h> + +#define DEVFN 0xf8 /* device 31, function 0 */ + +#define PMBASE_B0 0x40 +#define PMBASE_B1 0x41 + +#define SMI_CTRL_ADDR 0x30 + +static int smi_state; +static char smi_state_arg[16] = "detect"; +module_param_string(smi, smi_state_arg, sizeof(smi_state_arg), 0444); + +static unsigned int smi_masked_bits = 1; /* Global disable bit */ +module_param_named(smi_mask, smi_masked_bits, int, 0400); + +static unsigned int smi_saved_bits; +static unsigned short smi_en_addr; + +#define mask_bits(v, p) outl(inl(p)&~(v),(p)) +#define set_bits(v, p) outl(inl(p)|(v), (p)) + +static int smi_reboot(struct notifier_block *nb, ulong event, void *buf); + +static struct notifier_block smi_notifier = { + .notifier_call = smi_reboot +}; + +static int smi_reboot(struct notifier_block *nb, ulong event, void *buf) +{ + if (((event == SYS_RESTART) || (event == SYS_HALT) || + (event == SYS_POWER_OFF)) && smi_en_addr) + set_bits(smi_saved_bits, smi_en_addr); + + return NOTIFY_DONE; +} + +void mach_x86_smi_disable(void) +{ + if (smi_en_addr == 0) + return; + + smi_saved_bits = inl(smi_en_addr) & smi_masked_bits; + mask_bits(smi_masked_bits, smi_en_addr); + + if (inl(smi_en_addr) & smi_masked_bits) + printk(XENO_WARNING "SMI workaround failed!\n"); + else + printk(XENO_INFO "SMI workaround enabled\n"); + + register_reboot_notifier(&smi_notifier); +} + +void mach_x86_smi_restore(void) +{ + if (smi_en_addr == 0) + return; + + printk(XENO_INFO "SMI configuration restored\n"); + + set_bits(smi_saved_bits, smi_en_addr); + + unregister_reboot_notifier(&smi_notifier); +} + +static unsigned short get_smi_en_addr(struct pci_dev *dev) +{ + u_int8_t byte0, byte1; + + pci_read_config_byte(dev, PMBASE_B0, &byte0); + pci_read_config_byte(dev, PMBASE_B1, &byte1); + return SMI_CTRL_ADDR + (((byte1 << 1) | (byte0 >> 7)) << 7); // bits 7-15 +} + + +static const char *smi_state_labels[] = { + "disabled", + "detect", + "enabled", +}; + +static void setup_smi_state(void) +{ + static char warn_bad_state[] = + XENO_WARNING "invalid SMI state '%s'\n"; + char *p; + int n; + + /* Backward compat with legacy state specifiers. */ + n = simple_strtol(smi_state_arg, &p, 10); + if (*p == '\0') { + smi_state = n; + return; + } + + for (n = 0; n < ARRAY_SIZE(smi_state_labels); n++) + if (strcmp(smi_state_labels[n], smi_state_arg) == 0) { + smi_state = n - 1; + return; + } + + printk(warn_bad_state, smi_state_arg); +} + +void mach_x86_smi_init(void) +{ + struct pci_dev *dev = NULL; + + setup_smi_state(); + + if (smi_state < 0) + return; + + /* + * Do not use pci_register_driver, pci_enable_device, ... + * Just register the used ports. + */ + dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); + if (dev == NULL || dev->bus->number || + dev->devfn != DEVFN || dev->vendor != PCI_VENDOR_ID_INTEL) { + pci_dev_put(dev); + return; + } + + if (smi_state == 0) { + printk(XENO_WARNING "SMI-enabled chipset found, but SMI workaround disabled\n" + " (see xenomai.smi parameter). You might encounter\n" + " high latencies!\n"); + pci_dev_put(dev); + return; + } + + printk(XENO_INFO "SMI-enabled chipset found\n"); + smi_en_addr = get_smi_en_addr(dev); + + pci_dev_put(dev); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c new file mode 100644 index 0000000..46c47af --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c @@ -0,0 +1,522 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#include <linux/sched.h> +#include <linux/ipipe.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <cobalt/kernel/thread.h> +#include <asm/mmu_context.h> +#include <asm/processor.h> + +static struct kmem_cache *xstate_cache; + +#ifdef IPIPE_X86_FPU_EAGER +#define fpu_kernel_xstate_size sizeof(struct fpu) +#else +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) +#define fpu_kernel_xstate_size xstate_size +#endif +#endif /* IPIPE_X86_FPU_EAGER */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) +#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) +#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) +#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) +#endif + +#ifndef IPIPE_X86_FPU_EAGER +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0) +#include <asm/i387.h> +#include <asm/fpu-internal.h> +#define x86_fpregs_active(t) __thread_has_fpu(t) +#define x86_fpregs_deactivate(t) __thread_clear_has_fpu(t) +#define x86_fpregs_activate(t) __thread_set_has_fpu(t) +#define x86_xstate_alignment __alignof__(union thread_xstate) +#else +#include <asm/fpu/internal.h> + +static inline int x86_fpregs_active(struct task_struct *t) +{ + return t->thread.fpu.fpregs_active; +} + +static inline void x86_fpregs_deactivate(struct task_struct *t) +{ + if (x86_fpregs_active(t)) + __fpregs_deactivate(&t->thread.fpu); +} + +static inline void x86_fpregs_activate(struct task_struct *t) +{ + if (!x86_fpregs_active(t)) + __fpregs_activate(&t->thread.fpu); +} + +#define x86_xstate_alignment __alignof__(union fpregs_state) + +#endif +#else /* IPIPE_X86_FPU_EAGER */ +#define x86_xstate_alignment __alignof__(union fpregs_state) +#endif /* ! IPIPE_X86_FPU_EAGER */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) +/* + * This is obsolete context switch code uselessly duplicating + * mainline's. + */ +#define __SWITCH_CLOBBER_LIST , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" + +#ifdef CONFIG_CC_STACKPROTECTOR + +#define __CANARY_OUTPUT \ + , [gs_canary] "=m" (irq_stack_union.stack_canary) + +#define __CANARY_INPUT \ + , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) \ + , [current_task] "m" (current_task) + +#define __CANARY_SWITCH \ + "movq "__percpu_arg([current_task])",%%rsi\n\t" \ + "movq %P[task_canary](%%rsi),%%r8\n\t" \ + "movq %%r8,"__percpu_arg([gs_canary])"\n\t" + +#else /* !CONFIG_CC_STACKPROTECTOR */ + +#define __CANARY_OUTPUT +#define __CANARY_INPUT +#define __CANARY_SWITCH + +#endif /* !CONFIG_CC_STACKPROTECTOR */ + +#define do_switch_threads(prev, next, p_rsp, n_rsp, p_rip, n_rip) \ + ({ \ + long __rdi, __rsi, __rax, __rbx, __rcx, __rdx; \ + \ + __asm__ __volatile__("pushfq\n\t" \ + "pushq %%rbp\n\t" \ + "movq %%rsi, %%rbp\n\t" \ + "movq %%rsp, (%%rdx)\n\t" \ + "movq $1f, (%%rax)\n\t" \ + "movq (%%rcx), %%rsp\n\t" \ + "pushq (%%rbx)\n\t" \ + "jmp __switch_to\n\t" \ + "1:\n\t" \ + __CANARY_SWITCH \ + "movq %%rbp, %%rsi\n\t" \ + "popq %%rbp\n\t" \ + "popfq\n\t" \ + : "=S" (__rsi), "=D" (__rdi), "=a" (__rax), \ + "=b" (__rbx), "=c" (__rcx), "=d" (__rdx) \ + __CANARY_OUTPUT \ + : "0" (next), "1" (prev), "5" (p_rsp), "4" (n_rsp), \ + "2" (p_rip), "3" (n_rip) \ + __CANARY_INPUT \ + : "memory", "cc" __SWITCH_CLOBBER_LIST); \ + }) + +#else /* LINUX_VERSION_CODE >= 4.8 */ + +#include <asm/switch_to.h> + +#endif /* LINUX_VERSION_CODE >= 4.8 */ + +void xnarch_switch_to(struct xnthread *out, struct xnthread *in) +{ + struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb; + struct task_struct *prev, *next, *last; + struct mm_struct *prev_mm, *next_mm; + + prev = out_tcb->core.host_task; +#ifndef IPIPE_X86_FPU_EAGER + if (x86_fpregs_active(prev)) + /* + * __switch_to will try and use __unlazy_fpu, so we + * need to clear the ts bit. + */ + clts(); +#endif /* ! IPIPE_X86_FPU_EAGER */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) + if (!xnthread_test_state(out, XNROOT | XNUSER) && + !test_thread_flag(TIF_NEED_FPU_LOAD)) { + /* + * This compensates that switch_fpu_prepare ignores kernel + * threads. + */ + struct fpu *prev_fpu = &prev->thread.fpu; + + if (!copy_fpregs_to_fpstate(prev_fpu)) + prev_fpu->last_cpu = -1; + else + prev_fpu->last_cpu = raw_smp_processor_id(); + } +#endif + + next = in_tcb->core.host_task; +#ifndef IPIPE_X86_FPU_EAGER +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) + next->thread.fpu.counter = 0; +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0) + next->thread.fpu_counter = 0; +#else + next->fpu_counter = 0; +#endif +#endif /* ! IPIPE_X86_FPU_EAGER */ + prev_mm = out_tcb->core.active_mm; + next_mm = in_tcb->core.mm; + if (next_mm == NULL) { + in_tcb->core.active_mm = prev_mm; + enter_lazy_tlb(prev_mm, next); + } else { + ipipe_switch_mm_head(prev_mm, next_mm, next); + /* + * We might be switching back to the root thread, + * which we preempted earlier, shortly after "current" + * dropped its mm context in the do_exit() path + * (next->mm == NULL). In that particular case, the + * kernel expects a lazy TLB state for leaving the mm. + */ + if (next->mm == NULL) + enter_lazy_tlb(prev_mm, next); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + do_switch_threads(prev, next, + out_tcb->spp, in_tcb->spp, + out_tcb->ipp, in_tcb->ipp); + (void)last; +#else /* LINUX_VERSION_CODE >= 4.8 */ + switch_to(prev, next, last); +#endif /* LINUX_VERSION_CODE >= 4.8 */ + +#ifndef IPIPE_X86_FPU_EAGER + stts(); +#endif /* ! IPIPE_X86_FPU_EAGER */ +} + +#ifndef IPIPE_X86_FPU_EAGER + +#define XSAVE_PREFIX "0x48," +#define XSAVE_SUFFIX "q" + +static inline void __do_save_fpu_state(x86_fpustate *fpup) +{ +#ifdef cpu_has_xsave + if (cpu_has_xsave) { +#ifdef CONFIG_AS_AVX + __asm__ __volatile__("xsave" XSAVE_SUFFIX " %0" + : "=m" (fpup->xsave) : "a" (-1), "d" (-1) + : "memory"); +#else /* !CONFIG_AS_AVX */ + __asm __volatile__(".byte " XSAVE_PREFIX "0x0f,0xae,0x27" + : : "D" (&fpup->xsave), "m" (fpup->xsave), + "a" (-1), "d" (-1) + : "memory"); +#endif /* !CONFIG_AS_AVX */ + return; + } +#endif /* cpu_has_xsave */ +#ifdef CONFIG_AS_FXSAVEQ + __asm __volatile__("fxsaveq %0" : "=m" (fpup->fxsave)); +#else /* !CONFIG_AS_FXSAVEQ */ + __asm__ __volatile__("rex64/fxsave (%[fx])" + : "=m" (fpup->fxsave) + : [fx] "R" (&fpup->fxsave)); +#endif /* !CONFIG_AS_FXSAVEQ */ +} + +static inline void __do_restore_fpu_state(x86_fpustate *fpup) +{ +#ifdef cpu_has_xsave + if (cpu_has_xsave) { +#ifdef CONFIG_AS_AVX + __asm__ __volatile__("xrstor" XSAVE_SUFFIX " %0" + : : "m" (fpup->xsave), "a" (-1), "d" (-1) + : "memory"); +#else /* !CONFIG_AS_AVX */ + __asm__ __volatile__(".byte " XSAVE_PREFIX "0x0f,0xae,0x2f" + : : "D" (&fpup->xsave), "m" (fpup->xsave), + "a" (-1), "d" (-1) + : "memory"); +#endif /* !CONFIG_AS_AVX */ + return; + } +#endif /* cpu_has_xsave */ +#ifdef CONFIG_AS_FXSAVEQ + __asm__ __volatile__("fxrstorq %0" : : "m" (fpup->fxsave)); +#else /* !CONFIG_AS_FXSAVEQ */ + __asm__ __volatile__("rex64/fxrstor (%0)" + : : "R" (&fpup->fxsave), "m" (fpup->fxsave)); +#endif /* !CONFIG_AS_FXSAVEQ */ +} + +int xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d) +{ + struct xnarchtcb *tcb = xnthread_archtcb(to); + struct task_struct *p = tcb->core.host_task; + + if (x86_fpregs_active(p)) + return 0; + + if (!(p->flags & PF_USED_MATH)) { + /* + * The faulting task is a shadow using the FPU for the first + * time, initialize the FPU context and tell linux about it. + */ + __asm__ __volatile__("clts; fninit"); + + if (cpu_has_xmm) { + unsigned long __mxcsr = 0x1f80UL & 0xffbfUL; + __asm__ __volatile__("ldmxcsr %0"::"m"(__mxcsr)); + } + p->flags |= PF_USED_MATH; + } else { + /* + * The faulting task already used FPU in secondary + * mode. + */ + clts(); + __do_restore_fpu_state(tcb->fpup); + } + + x86_fpregs_activate(p); + + xnlock_get(&nklock); + xnthread_set_state(to, XNFPU); + xnlock_put(&nklock); + + return 1; +} +#else /* IPIPE_X86_FPU_EAGER */ + +int xnarch_handle_fpu_fault(struct xnthread *from, + struct xnthread *to, struct ipipe_trap_data *d) +{ + /* in eager mode there are no such faults */ + BUG_ON(1); +} +#endif /* ! IPIPE_X86_FPU_EAGER */ + +#define current_task_used_kfpu() kernel_fpu_disabled() + +#define tcb_used_kfpu(t) ((t)->root_kfpu) + +#ifndef IPIPE_X86_FPU_EAGER +void xnarch_leave_root(struct xnthread *root) +{ + struct xnarchtcb *const rootcb = xnthread_archtcb(root); + struct task_struct *const p = current; + x86_fpustate *const current_task_fpup = x86_fpustate_ptr(&p->thread); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + rootcb->spp = &p->thread.sp; + rootcb->ipp = &p->thread.rip; +#endif + if (!current_task_used_kfpu()) { + rootcb->root_kfpu = 0; + rootcb->fpup = x86_fpregs_active(p) ? current_task_fpup : NULL; + return; + } + + /* + * We need to save the kernel FPU context before preempting, + * store it in our root control block. + */ + rootcb->root_kfpu = 1; + rootcb->fpup = current_task_fpup; + rootcb->root_used_math = !!(p->flags & PF_USED_MATH); + x86_fpustate_ptr(&p->thread) = rootcb->kfpu_state; + x86_fpregs_activate(p); + p->flags |= PF_USED_MATH; + kernel_fpu_enable(); +} + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) +{ + x86_fpustate *const prev_fpup = from ? from->tcb.fpup : NULL; + struct xnarchtcb *const tcb = xnthread_archtcb(to); + struct task_struct *const p = tcb->core.host_task; + x86_fpustate *const next_task_fpup = x86_fpustate_ptr(&p->thread); + + /* Restore lazy mode only if root fpu owner is not current. */ + if (xnthread_test_state(to, XNROOT) && + prev_fpup != next_task_fpup && + !tcb_used_kfpu(tcb)) + return; + + clts(); + /* + * The only case where we can skip restoring the FPU is: + * - the fpu context of the next task is the current fpu + * context; + * - root thread has not used fpu in kernel-space; + * - cpu has fxsr (because if it does not, last context switch + * reinitialized fpu) + */ + if (prev_fpup != next_task_fpup || !cpu_has_fxsr) + __do_restore_fpu_state(next_task_fpup); + + if (!tcb_used_kfpu(tcb)) { + x86_fpregs_activate(p); + return; + } + kernel_fpu_disable(); + + x86_fpustate_ptr(&p->thread) = to->tcb.fpup; + if (!tcb->root_used_math) { + x86_fpregs_deactivate(p); + p->flags &= ~PF_USED_MATH; + } +} +#else /* IPIPE_X86_FPU_EAGER */ +void xnarch_leave_root(struct xnthread *root) +{ + struct xnarchtcb *const rootcb = xnthread_archtcb(root); + + rootcb->root_kfpu = current_task_used_kfpu(); + + if (!tcb_used_kfpu(rootcb)) + return; + + /* save fpregs from in-kernel use */ + copy_fpregs_to_fpstate(rootcb->kfpu); + kernel_fpu_enable(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) + /* restore current's fpregs */ + __cpu_invalidate_fpregs_state(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,182) + switch_fpu_finish(current); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) + switch_fpu_finish(¤t->thread.fpu); +#else + switch_fpu_finish(¤t->thread.fpu, raw_smp_processor_id()); +#endif +#else + /* mark current thread as not owning the FPU anymore */ + if (fpregs_active()) + fpregs_deactivate(¤t->thread.fpu); +#endif +} + +void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to) +{ + struct xnarchtcb *const to_tcb = xnthread_archtcb(to); + + if (tcb_used_kfpu(to_tcb)) { + copy_kernel_to_fpregs(&to_tcb->kfpu->state); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) + /* redo the invalidation done by kernel_fpu_begin */ + __cpu_invalidate_fpregs_state(); +#endif + kernel_fpu_disable(); + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) + else if (!xnthread_test_state(to, XNROOT) && + test_thread_flag(TIF_NEED_FPU_LOAD)) { + /* + * This is open-coded switch_fpu_return but without a test for + * PF_KTHREAD, i.e including kernel threads. + */ + struct fpu *fpu = ¤t->thread.fpu; + int cpu = raw_smp_processor_id(); + + if (!fpregs_state_valid(fpu, cpu)) { + copy_kernel_to_fpregs(&fpu->state); + fpregs_activate(fpu); + fpu->last_cpu = cpu; + } + clear_thread_flag(TIF_NEED_FPU_LOAD); + } +#endif +} +#endif /* ! IPIPE_X86_FPU_EAGER */ + +void xnarch_init_root_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + tcb->sp = 0; + tcb->spp = &tcb->sp; + tcb->ipp = &tcb->ip; +#endif +#ifndef IPIPE_X86_FPU_EAGER + tcb->fpup = NULL; + tcb->kfpu_state = kmem_cache_zalloc(xstate_cache, GFP_KERNEL); +#else /* IPIPE_X86_FPU_EAGER */ + tcb->kfpu = kmem_cache_zalloc(xstate_cache, GFP_KERNEL); +#endif /* ! IPIPE_X86_FPU_EAGER */ + tcb->root_kfpu = 0; +} + +void xnarch_init_shadow_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + struct task_struct *p = tcb->core.host_task; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + tcb->sp = 0; + tcb->spp = &p->thread.sp; + tcb->ipp = &p->thread.rip; /* <!> raw naming intended. */ +#endif +#ifndef IPIPE_X86_FPU_EAGER + tcb->fpup = x86_fpustate_ptr(&p->thread); + tcb->kfpu_state = NULL; +#else /* IPIPE_X86_FPU_EAGER */ + tcb->kfpu = NULL; +#endif /* ! IPIPE_X86_FPU_EAGER */ + tcb->root_kfpu = 0; + +#ifndef IPIPE_X86_FPU_EAGER + /* XNFPU is set upon first FPU fault */ + xnthread_clear_state(thread, XNFPU); +#else /* IPIPE_X86_FPU_EAGER */ + /* XNFPU is always set */ + xnthread_set_state(thread, XNFPU); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + fpu__activate_fpstate_read(&p->thread.fpu); +#else + fpu__initialize(&p->thread.fpu); +#endif +#endif /* ! IPIPE_X86_FPU_EAGER */ +} + +int mach_x86_thread_init(void) +{ + xstate_cache = kmem_cache_create("cobalt_x86_xstate", + fpu_kernel_xstate_size, + x86_xstate_alignment, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + SLAB_NOTRACK, +#else + 0, +#endif + NULL); + if (xstate_cache == NULL) + return -ENOMEM; + + return 0; +} + +void mach_x86_thread_cleanup(void) +{ + kmem_cache_destroy(xstate_cache); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c new file mode 100644 index 0000000..5603c2d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c @@ -0,0 +1,65 @@ +/* + * Copyright © 2005 Gilles Chanteperdrix. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/module.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_arith In-kernel arithmetics + * + * A collection of helpers performing arithmetics not implicitly + * available from kernel context via GCC helpers. Many of these + * routines enable 64bit arithmetics on 32bit systems. Xenomai + * architecture ports normally implement the performance critical ones + * in hand-crafted assembly code (see + * kernel/cobalt/arch/\<arch\>/include/asm/xenomai/uapi/arith.h). + * @{ + */ + +/** + * Architecture-independent div64 operation with remainder. + * + * @param a dividend + * + * @param b divisor + * + * @param rem if non-NULL, a pointer to a 64bit variable for + * collecting the remainder from the division. + */ +unsigned long long xnarch_generic_full_divmod64(unsigned long long a, + unsigned long long b, + unsigned long long *rem) +{ + unsigned long long q = 0, r = a; + int i; + + for (i = fls(a >> 32) - fls(b >> 32), b <<= i; i >= 0; i--, b >>= 1) { + q <<= 1; + if (b <= r) { + r -= b; + q++; + } + } + + if (rem) + *rem = r; + return q; +} +EXPORT_SYMBOL_GPL(xnarch_generic_full_divmod64); + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c b/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c new file mode 100644 index 0000000..3b79505 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c @@ -0,0 +1,653 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/bufd.h> +#include <cobalt/kernel/assert.h> +#include <asm/xenomai/syscall.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_bufd Buffer descriptor + * + * Abstraction for copying data to/from different address spaces + * + * A buffer descriptor is a simple abstraction dealing with copy + * operations to/from memory buffers which may belong to different + * address spaces. + * + * To this end, the buffer descriptor library provides a small set of + * copy routines which are aware of address space restrictions when + * moving data, and a generic container type which can hold a + * reference to - or cover - a particular memory area, either present + * in kernel space, or in any of the existing user memory contexts. + * + * The goal of the buffer descriptor abstraction is to hide address + * space specifics from Xenomai services dealing with memory areas, + * allowing them to operate on multiple address spaces seamlessly. + * + * The common usage patterns are as follows: + * + * - Implementing a Xenomai syscall returning a bulk of data to the + * caller, which may have to be copied back to either kernel or user + * space: + * + * @code + * [Syscall implementation] + * ssize_t rt_bulk_read_inner(struct xnbufd *bufd) + * { + * ssize_t ret; + * size_t len; + * void *bulk; + * + * bulk = get_next_readable_bulk(&len); + * ret = xnbufd_copy_from_kmem(bufd, bulk, min(bufd->b_len, len)); + * free_bulk(bulk); + * + * ret = this_may_fail(); + * if (ret) + * xnbufd_invalidate(bufd); + * + * return ret; + * } + * + * [Kernel wrapper for in-kernel calls] + * int rt_bulk_read(void *ptr, size_t len) + * { + * struct xnbufd bufd; + * ssize_t ret; + * + * xnbufd_map_kwrite(&bufd, ptr, len); + * ret = rt_bulk_read_inner(&bufd); + * xnbufd_unmap_kwrite(&bufd); + * + * return ret; + * } + * + * [Userland trampoline for user syscalls] + * int __rt_bulk_read(struct pt_regs *regs) + * { + * struct xnbufd bufd; + * void __user *ptr; + * ssize_t ret; + * size_t len; + * + * ptr = (void __user *)__xn_reg_arg1(regs); + * len = __xn_reg_arg2(regs); + * + * xnbufd_map_uwrite(&bufd, ptr, len); + * ret = rt_bulk_read_inner(&bufd); + * xnbufd_unmap_uwrite(&bufd); + * + * return ret; + * } + * @endcode + * + * - Implementing a Xenomai syscall receiving a bulk of data from the + * caller, which may have to be read from either kernel or user + * space: + * + * @code + * [Syscall implementation] + * ssize_t rt_bulk_write_inner(struct xnbufd *bufd) + * { + * void *bulk = get_free_bulk(bufd->b_len); + * return xnbufd_copy_to_kmem(bulk, bufd, bufd->b_len); + * } + * + * [Kernel wrapper for in-kernel calls] + * int rt_bulk_write(const void *ptr, size_t len) + * { + * struct xnbufd bufd; + * ssize_t ret; + * + * xnbufd_map_kread(&bufd, ptr, len); + * ret = rt_bulk_write_inner(&bufd); + * xnbufd_unmap_kread(&bufd); + * + * return ret; + * } + * + * [Userland trampoline for user syscalls] + * int __rt_bulk_write(struct pt_regs *regs) + * { + * struct xnbufd bufd; + * void __user *ptr; + * ssize_t ret; + * size_t len; + * + * ptr = (void __user *)__xn_reg_arg1(regs); + * len = __xn_reg_arg2(regs); + * + * xnbufd_map_uread(&bufd, ptr, len); + * ret = rt_bulk_write_inner(&bufd); + * xnbufd_unmap_uread(&bufd); + * + * return ret; + * } + * @endcode + * + *@{*/ + +/** + * @fn void xnbufd_map_kread(struct xnbufd *bufd, const void *ptr, size_t len) + * @brief Initialize a buffer descriptor for reading from kernel memory. + * + * The new buffer descriptor may be used to copy data from kernel + * memory. This routine should be used in pair with + * xnbufd_unmap_kread(). + * + * @param bufd The address of the buffer descriptor which will map a + * @a len bytes kernel memory area, starting from @a ptr. + * + * @param ptr The start of the kernel buffer to map. + * + * @param len The length of the kernel buffer starting at @a ptr. + * + * @coretags{unrestricted} + */ + +/** + * @fn void xnbufd_map_kwrite(struct xnbufd *bufd, void *ptr, size_t len) + * @brief Initialize a buffer descriptor for writing to kernel memory. + * + * The new buffer descriptor may be used to copy data to kernel + * memory. This routine should be used in pair with + * xnbufd_unmap_kwrite(). + * + * @param bufd The address of the buffer descriptor which will map a + * @a len bytes kernel memory area, starting from @a ptr. + * + * @param ptr The start of the kernel buffer to map. + * + * @param len The length of the kernel buffer starting at @a ptr. + * + * @coretags{unrestricted} + */ +void xnbufd_map_kmem(struct xnbufd *bufd, void *ptr, size_t len) +{ + bufd->b_ptr = ptr; + bufd->b_len = len; + bufd->b_mm = NULL; + bufd->b_off = 0; + bufd->b_carry = NULL; +} +EXPORT_SYMBOL_GPL(xnbufd_map_kmem); + +/** + * @fn void xnbufd_map_uread(struct xnbufd *bufd, const void __user *ptr, size_t len) + * @brief Initialize a buffer descriptor for reading from user memory. + * + * The new buffer descriptor may be used to copy data from user + * memory. This routine should be used in pair with + * xnbufd_unmap_uread(). + * + * @param bufd The address of the buffer descriptor which will map a + * @a len bytes user memory area, starting from @a ptr. @a ptr is + * never dereferenced directly, since it may refer to a buffer that + * lives in another address space. + * + * @param ptr The start of the user buffer to map. + * + * @param len The length of the user buffer starting at @a ptr. + * + * @coretags{task-unrestricted} + */ + +/** + * @fn void xnbufd_map_uwrite(struct xnbufd *bufd, void __user *ptr, size_t len) + * @brief Initialize a buffer descriptor for writing to user memory. + * + * The new buffer descriptor may be used to copy data to user + * memory. This routine should be used in pair with + * xnbufd_unmap_uwrite(). + * + * @param bufd The address of the buffer descriptor which will map a + * @a len bytes user memory area, starting from @a ptr. @a ptr is + * never dereferenced directly, since it may refer to a buffer that + * lives in another address space. + * + * @param ptr The start of the user buffer to map. + * + * @param len The length of the user buffer starting at @a ptr. + * + * @coretags{task-unrestricted} + */ + +void xnbufd_map_umem(struct xnbufd *bufd, void __user *ptr, size_t len) +{ + bufd->b_ptr = ptr; + bufd->b_len = len; + bufd->b_mm = current->mm; + bufd->b_off = 0; + bufd->b_carry = NULL; +} +EXPORT_SYMBOL_GPL(xnbufd_map_umem); + +/** + * @fn ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len) + * @brief Copy memory covered by a buffer descriptor to kernel memory. + * + * This routine copies @a len bytes from the area referred to by the + * buffer descriptor @a bufd to the kernel memory area @a to. + * xnbufd_copy_to_kmem() tracks the read offset within the source + * memory internally, so that it may be called several times in a + * loop, until the entire memory area is loaded. + * + * The source address space is dealt with, according to the following + * rules: + * + * - if @a bufd refers to readable kernel area (i.e. see + * xnbufd_map_kread()), the copy is immediately and fully performed + * with no restriction. + * + * - if @a bufd refers to a readable user area (i.e. see + * xnbufd_map_uread()), the copy is performed only if that area + * lives in the currently active address space, and only if the + * caller may sleep Linux-wise to process any potential page fault + * which may arise while reading from that memory. + * + * - any attempt to read from @a bufd from a non-suitable context is + * considered as a bug, and will raise a panic assertion when the + * nucleus is compiled in debug mode. + * + * @param to The start address of the kernel memory to copy to. + * + * @param bufd The address of the buffer descriptor covering the user + * memory to copy data from. + * + * @param len The length of the user memory to copy from @a bufd. + * + * @return The number of bytes read so far from the memory area + * covered by @a ubufd. Otherwise: + * + * - -EINVAL is returned upon attempt to read from the user area from + * an invalid context. This error is only returned when the debug + * mode is disabled; otherwise a panic assertion is raised. + * + * @coretags{task-unrestricted} + * + * @note Calling this routine while holding the nklock and/or running + * with interrupts disabled is invalid, and doing so will trigger a + * debug assertion. + * + * This routine may switch the caller to secondary mode if a page + * fault occurs while reading from the user area. For that reason, + * xnbufd_copy_to_kmem() may only be called from a preemptible section + * (Linux-wise). + */ +ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len) +{ + caddr_t from; + + thread_only(); + + if (len == 0) + goto out; + + from = bufd->b_ptr + bufd->b_off; + + /* + * If the descriptor covers a source buffer living in the + * kernel address space, we may read from it directly. + */ + if (bufd->b_mm == NULL) { + memcpy(to, from, len); + goto advance_offset; + } + + /* + * We want to read data from user-space, check whether: + * 1) the source buffer lies in the current address space, + * 2) we may fault while reading from the buffer directly. + * + * If we can't reach the buffer, or the current context may + * not fault while reading data from it, copy_from_user() is + * not an option and we have a bug somewhere, since there is + * no way we could fetch the data to kernel space immediately. + * + * Note that we don't check for non-preemptible Linux context + * here, since the source buffer would live in kernel space in + * such a case. + */ + if (current->mm == bufd->b_mm) { + preemptible_only(); + if (cobalt_copy_from_user(to, (void __user *)from, len)) + return -EFAULT; + goto advance_offset; + } + + XENO_BUG(COBALT); + + return -EINVAL; + +advance_offset: + bufd->b_off += len; +out: + return (ssize_t)bufd->b_off; +} +EXPORT_SYMBOL_GPL(xnbufd_copy_to_kmem); + +/** + * @fn ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len) + * @brief Copy kernel memory to the area covered by a buffer descriptor. + * + * This routine copies @a len bytes from the kernel memory starting at + * @a from to the area referred to by the buffer descriptor @a + * bufd. xnbufd_copy_from_kmem() tracks the write offset within the + * destination memory internally, so that it may be called several + * times in a loop, until the entire memory area is stored. + * + * The destination address space is dealt with, according to the + * following rules: + * + * - if @a bufd refers to a writable kernel area (i.e. see + * xnbufd_map_kwrite()), the copy is immediatly and fully performed + * with no restriction. + * + * - if @a bufd refers to a writable user area (i.e. see + * xnbufd_map_uwrite()), the copy is performed only if that area + * lives in the currently active address space, and only if the + * caller may sleep Linux-wise to process any potential page fault + * which may arise while writing to that memory. + * + * - if @a bufd refers to a user area which may not be immediately + * written to from the current context, the copy is postponed until + * xnbufd_unmap_uwrite() is invoked for @a ubufd, at which point the + * copy will take place. In such a case, the source memory is + * transferred to a carry over buffer allocated internally; this + * operation may lead to request dynamic memory from the nucleus + * heap if @a len is greater than 64 bytes. + * + * @param bufd The address of the buffer descriptor covering the user + * memory to copy data to. + * + * @param from The start address of the kernel memory to copy from. + * + * @param len The length of the kernel memory to copy to @a bufd. + * + * @return The number of bytes written so far to the memory area + * covered by @a ubufd. Otherwise, + * + * - -ENOMEM is returned when no memory is available from the nucleus + * heap to allocate the carry over buffer. + * + * @coretags{unrestricted} + * + * @note Calling this routine while holding the nklock and/or running + * with interrupts disabled is invalid, and doing so will trigger a + * debug assertion. + * + * This routine may switch the caller to secondary mode if a page + * fault occurs while reading from the user area. For that reason, + * xnbufd_copy_to_kmem() may only be called from a preemptible section + * (Linux-wise). + */ +ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len) +{ + caddr_t to; + + thread_only(); + + if (len == 0) + goto out; + + to = bufd->b_ptr + bufd->b_off; + + /* + * If the descriptor covers a destination buffer living in the + * kernel address space, we may copy to it directly. + */ + if (bufd->b_mm == NULL) + goto direct_copy; + + /* + * We want to pass data to user-space, check whether: + * 1) the destination buffer lies in the current address space, + * 2) we may fault while writing to the buffer directly. + * + * If we can't reach the buffer, or the current context may + * not fault while copying data to it, copy_to_user() is not + * an option and we have to convey the data from kernel memory + * through the carry over buffer. + * + * Note that we don't check for non-preemptible Linux context + * here: feeding a RT activity with data from a non-RT context + * is wrong in the first place, so never mind. + */ + if (current->mm == bufd->b_mm) { + preemptible_only(); + if (cobalt_copy_to_user((void __user *)to, from, len)) + return -EFAULT; + goto advance_offset; + } + + /* + * We need a carry over buffer to convey the data to + * user-space. xnbufd_unmap_uwrite() should be called on the + * way back to user-space to update the destination buffer + * from the carry over area. + */ + if (bufd->b_carry == NULL) { + /* + * Try to use the fast carry over area available + * directly from the descriptor for short messages, to + * save a dynamic allocation request. + */ + if (bufd->b_len <= sizeof(bufd->b_buf)) + bufd->b_carry = bufd->b_buf; + else { + bufd->b_carry = xnmalloc(bufd->b_len); + if (bufd->b_carry == NULL) + return -ENOMEM; + } + to = bufd->b_carry; + } else + to = bufd->b_carry + bufd->b_off; + +direct_copy: + memcpy(to, from, len); + +advance_offset: + bufd->b_off += len; +out: + return (ssize_t)bufd->b_off; +} +EXPORT_SYMBOL_GPL(xnbufd_copy_from_kmem); + +/** + * @fn void xnbufd_unmap_uread(struct xnbufd *bufd) + * @brief Finalize a buffer descriptor obtained from xnbufd_map_uread(). + * + * This routine finalizes a buffer descriptor previously initialized + * by a call to xnbufd_map_uread(), to read data from a user area. + * + * @param bufd The address of the buffer descriptor to finalize. + * + * @return The number of bytes read so far from the memory area + * covered by @a ubufd. + * + * @coretags{task-unrestricted} + * + * @note Calling this routine while holding the nklock and/or running + * with interrupts disabled is invalid, and doing so will trigger a + * debug assertion. + */ +ssize_t xnbufd_unmap_uread(struct xnbufd *bufd) +{ + preemptible_only(); + +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + bufd->b_ptr = (caddr_t)-1; +#endif + return bufd->b_off; +} +EXPORT_SYMBOL_GPL(xnbufd_unmap_uread); + +/** + * @fn void xnbufd_unmap_uwrite(struct xnbufd *bufd) + * @brief Finalize a buffer descriptor obtained from xnbufd_map_uwrite(). + * + * This routine finalizes a buffer descriptor previously initialized + * by a call to xnbufd_map_uwrite(), to write data to a user area. + * + * The main action taken is to write the contents of the kernel memory + * area passed to xnbufd_copy_from_kmem() whenever the copy operation + * was postponed at that time; the carry over buffer is eventually + * released as needed. If xnbufd_copy_from_kmem() was allowed to copy + * to the destination user memory at once, then xnbufd_unmap_uwrite() + * leads to a no-op. + * + * @param bufd The address of the buffer descriptor to finalize. + * + * @return The number of bytes written so far to the memory area + * covered by @a ubufd. + * + * @coretags{task-unrestricted} + * + * @note Calling this routine while holding the nklock and/or running + * with interrupts disabled is invalid, and doing so will trigger a + * debug assertion. + */ +ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd) +{ + ssize_t ret = 0; + void __user *to; + void *from; + size_t len; + + preemptible_only(); + + len = bufd->b_off; + + if (bufd->b_carry == NULL) + /* Copy took place directly. Fine. */ + goto done; + + /* + * Something was written to the carry over area, copy the + * contents to user-space, then release the area if needed. + */ + to = (void __user *)bufd->b_ptr; + from = bufd->b_carry; + ret = cobalt_copy_to_user(to, from, len); + + if (bufd->b_len > sizeof(bufd->b_buf)) + xnfree(bufd->b_carry); +done: +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + bufd->b_ptr = (caddr_t)-1; +#endif + return ret ?: (ssize_t)len; +} +EXPORT_SYMBOL_GPL(xnbufd_unmap_uwrite); + +/** + * @fn void xnbufd_reset(struct xnbufd *bufd) + * @brief Reset a buffer descriptor. + * + * The buffer descriptor is reset, so that all data already copied is + * forgotten. Any carry over buffer allocated is kept, though. + * + * @param bufd The address of the buffer descriptor to reset. + * + * @coretags{unrestricted} + */ + +/** + * @fn void xnbufd_invalidate(struct xnbufd *bufd) + * @brief Invalidate a buffer descriptor. + * + * The buffer descriptor is invalidated, making it unusable for + * further copy operations. If an outstanding carry over buffer was + * allocated by a previous call to xnbufd_copy_from_kmem(), it is + * immediately freed so that no data transfer will happen when the + * descriptor is finalized. + * + * The only action that may subsequently be performed on an + * invalidated descriptor is calling the relevant unmapping routine + * for it. For that reason, xnbufd_invalidate() should be invoked on + * the error path when data may have been transferred to the carry + * over buffer. + * + * @param bufd The address of the buffer descriptor to invalidate. + * + * @coretags{unrestricted} + */ +void xnbufd_invalidate(struct xnbufd *bufd) +{ +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + bufd->b_ptr = (caddr_t)-1; +#endif + if (bufd->b_carry) { + if (bufd->b_len > sizeof(bufd->b_buf)) + xnfree(bufd->b_carry); + bufd->b_carry = NULL; + } + bufd->b_off = 0; +} +EXPORT_SYMBOL_GPL(xnbufd_invalidate); + +/** + * @fn void xnbufd_unmap_kread(struct xnbufd *bufd) + * @brief Finalize a buffer descriptor obtained from xnbufd_map_kread(). + * + * This routine finalizes a buffer descriptor previously initialized + * by a call to xnbufd_map_kread(), to read data from a kernel area. + * + * @param bufd The address of the buffer descriptor to finalize. + * + * @return The number of bytes read so far from the memory area + * covered by @a ubufd. + * + * @coretags{task-unrestricted} + */ +ssize_t xnbufd_unmap_kread(struct xnbufd *bufd) +{ +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + bufd->b_ptr = (caddr_t)-1; +#endif + return bufd->b_off; +} +EXPORT_SYMBOL_GPL(xnbufd_unmap_kread); + +/** + * @fn void xnbufd_unmap_kwrite(struct xnbufd *bufd) + * @brief Finalize a buffer descriptor obtained from xnbufd_map_kwrite(). + * + * This routine finalizes a buffer descriptor previously initialized + * by a call to xnbufd_map_kwrite(), to write data to a kernel area. + * + * @param bufd The address of the buffer descriptor to finalize. + * + * @return The number of bytes written so far to the memory area + * covered by @a ubufd. + * + * @coretags{task-unrestricted} + */ +ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd) +{ +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + bufd->b_ptr = (caddr_t)-1; +#endif + return bufd->b_off; +} +EXPORT_SYMBOL_GPL(xnbufd_unmap_kwrite); + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c new file mode 100644 index 0000000..2b9efad --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c @@ -0,0 +1,830 @@ +/* + * Copyright (C) 2006-2011 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/percpu.h> +#include <linux/errno.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/arith.h> +#include <cobalt/kernel/vdso.h> +#include <cobalt/uapi/time.h> +#include <asm/xenomai/calibration.h> +#include <trace/events/cobalt-core.h> +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_clock Clock services + * + * @{ + */ + +#ifdef XNARCH_HAVE_NODIV_LLIMD + +static struct xnarch_u32frac bln_frac; + +unsigned long long xnclock_divrem_billion(unsigned long long value, + unsigned long *rem) +{ + unsigned long long q; + unsigned r; + + q = xnarch_nodiv_ullimd(value, bln_frac.frac, bln_frac.integ); + r = value - q * 1000000000; + if (r >= 1000000000) { + ++q; + r -= 1000000000; + } + *rem = r; + return q; +} + +#else + +unsigned long long xnclock_divrem_billion(unsigned long long value, + unsigned long *rem) +{ + return xnarch_ulldiv(value, 1000000000, rem); + +} + +#endif /* !XNARCH_HAVE_NODIV_LLIMD */ + +EXPORT_SYMBOL_GPL(xnclock_divrem_billion); + +DEFINE_PRIVATE_XNLOCK(ratelimit_lock); + +int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func) +{ + spl_t s; + int ret; + + if (!rs->interval) + return 1; + + xnlock_get_irqsave(&ratelimit_lock, s); + + if (!rs->begin) + rs->begin = xnclock_read_realtime(&nkclock); + if (xnclock_read_realtime(&nkclock) >= rs->begin + rs->interval) { + if (rs->missed) + printk(KERN_WARNING "%s: %d callbacks suppressed\n", + func, rs->missed); + rs->begin = 0; + rs->printed = 0; + rs->missed = 0; + } + if (rs->burst && rs->burst > rs->printed) { + rs->printed++; + ret = 1; + } else { + rs->missed++; + ret = 0; + } + xnlock_put_irqrestore(&ratelimit_lock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(__xnclock_ratelimit); + +void xnclock_core_local_shot(struct xnsched *sched) +{ + struct xntimerdata *tmd; + struct xntimer *timer; + xnsticks_t delay; + xntimerh_t *h; + + /* + * Do not reprogram locally when inside the tick handler - + * will be done on exit anyway. Also exit if there is no + * pending timer. + */ + if (sched->status & XNINTCK) + return; + + /* + * Assume the core clock device always has percpu semantics in + * SMP. + */ + tmd = xnclock_this_timerdata(&nkclock); + h = xntimerq_head(&tmd->q); + if (h == NULL) { + sched->lflags |= XNIDLE; + return; + } + + /* + * Here we try to defer the host tick heading the timer queue, + * so that it does not preempt a real-time activity uselessly, + * in two cases: + * + * 1) a rescheduling is pending for the current CPU. We may + * assume that a real-time thread is about to resume, so we + * want to move the host tick out of the way until the host + * kernel resumes, unless there is no other outstanding + * timers. + * + * 2) the current thread is running in primary mode, in which + * case we may also defer the host tick until the host kernel + * resumes. + * + * The host tick deferral is cleared whenever Xenomai is about + * to yield control to the host kernel (see ___xnsched_run()), + * or a timer with an earlier timeout date is scheduled, + * whichever comes first. + */ + sched->lflags &= ~(XNHDEFER|XNIDLE|XNTSTOP); + timer = container_of(h, struct xntimer, aplink); + if (unlikely(timer == &sched->htimer)) { + if (xnsched_resched_p(sched) || + !xnthread_test_state(sched->curr, XNROOT)) { + h = xntimerq_second(&tmd->q, h); + if (h) { + sched->lflags |= XNHDEFER; + timer = container_of(h, struct xntimer, aplink); + } + } + } + + delay = xntimerh_date(&timer->aplink) - xnclock_core_read_raw(); + if (delay < 0) + delay = 0; + else if (delay > ULONG_MAX) + delay = ULONG_MAX; + + xntrace_tick((unsigned)delay); + + pipeline_set_timer_shot(delay); +} + +#ifdef CONFIG_SMP +void xnclock_core_remote_shot(struct xnsched *sched) +{ + pipeline_send_timer_ipi(cpumask_of(xnsched_cpu(sched))); +} +#endif + +static void adjust_timer(struct xntimer *timer, xntimerq_t *q, + xnsticks_t delta) +{ + struct xnclock *clock = xntimer_clock(timer); + xnticks_t period, div; + xnsticks_t diff; + + xntimerh_date(&timer->aplink) -= delta; + + if (xntimer_periodic_p(timer) == 0) + goto enqueue; + + timer->start_date -= delta; + period = xntimer_interval(timer); + diff = xnclock_ticks_to_ns(clock, + xnclock_read_raw(clock) - xntimer_expiry(timer)); + + if ((xnsticks_t)(diff - period) >= 0) { + /* + * Timer should tick several times before now, instead + * of calling timer->handler several times, we change + * the timer date without changing its pexpect, so + * that timer will tick only once and the lost ticks + * will be counted as overruns. + */ + div = xnarch_div64(diff, period); + timer->periodic_ticks += div; + xntimer_update_date(timer); + } else if (delta < 0 + && (timer->status & XNTIMER_FIRED) + && (xnsticks_t) (diff + period) <= 0) { + /* + * Timer is periodic and NOT waiting for its first + * shot, so we make it tick sooner than its original + * date in order to avoid the case where by adjusting + * time to a sooner date, real-time periodic timers do + * not tick until the original date has passed. + */ + div = xnarch_div64(-diff, period); + timer->periodic_ticks -= div; + timer->pexpect_ticks -= div; + xntimer_update_date(timer); + } + +enqueue: + xntimer_enqueue(timer, q); +} + +void xnclock_apply_offset(struct xnclock *clock, xnsticks_t delta_ns) +{ + struct xntimer *timer, *tmp; + struct list_head adjq; + struct xnsched *sched; + xnsticks_t delta; + xntimerq_it_t it; + unsigned int cpu; + xntimerh_t *h; + xntimerq_t *q; + + atomic_only(); + + /* + * The (real-time) epoch just changed for the clock. Since + * timeout dates of timers are expressed as monotonic ticks + * internally, we need to apply the new offset to the + * monotonic clock to all outstanding timers based on the + * affected clock. + */ + INIT_LIST_HEAD(&adjq); + delta = xnclock_ns_to_ticks(clock, delta_ns); + + for_each_online_cpu(cpu) { + sched = xnsched_struct(cpu); + q = &xnclock_percpu_timerdata(clock, cpu)->q; + + for (h = xntimerq_it_begin(q, &it); h; + h = xntimerq_it_next(q, &it, h)) { + timer = container_of(h, struct xntimer, aplink); + if (timer->status & XNTIMER_REALTIME) + list_add_tail(&timer->adjlink, &adjq); + } + + if (list_empty(&adjq)) + continue; + + list_for_each_entry_safe(timer, tmp, &adjq, adjlink) { + list_del(&timer->adjlink); + xntimer_dequeue(timer, q); + adjust_timer(timer, q, delta); + } + + if (sched != xnsched_current()) + xnclock_remote_shot(clock, sched); + else + xnclock_program_shot(clock, sched); + } +} +EXPORT_SYMBOL_GPL(xnclock_apply_offset); + +void xnclock_set_wallclock(xnticks_t epoch_ns) +{ + xnsticks_t old_offset_ns, offset_ns; + spl_t s; + + /* + * The epoch of CLOCK_REALTIME just changed. Since timeouts + * are expressed as monotonic ticks, we need to apply the + * wallclock-to-monotonic offset to all outstanding timers + * based on this clock. + */ + xnlock_get_irqsave(&nklock, s); + old_offset_ns = nkclock.wallclock_offset; + offset_ns = (xnsticks_t)(epoch_ns - xnclock_core_read_monotonic()); + nkclock.wallclock_offset = offset_ns; + nkvdso->wallclock_offset = offset_ns; + xnclock_apply_offset(&nkclock, offset_ns - old_offset_ns); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnclock_set_wallclock); + +xnticks_t xnclock_core_read_monotonic(void) +{ + return xnclock_core_ticks_to_ns(xnclock_core_read_raw()); +} +EXPORT_SYMBOL_GPL(xnclock_core_read_monotonic); + +#ifdef CONFIG_XENO_OPT_STATS + +static struct xnvfile_directory timerlist_vfroot; + +static struct xnvfile_snapshot_ops timerlist_ops; + +struct vfile_clock_priv { + struct xntimer *curr; +}; + +struct vfile_clock_data { + int cpu; + unsigned int scheduled; + unsigned int fired; + xnticks_t timeout; + xnticks_t interval; + unsigned long status; + char name[XNOBJECT_NAME_LEN]; +}; + +static int timerlist_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_clock_priv *priv = xnvfile_iterator_priv(it); + struct xnclock *clock = xnvfile_priv(it->vfile); + + if (list_empty(&clock->timerq)) + return -ESRCH; + + priv->curr = list_first_entry(&clock->timerq, struct xntimer, next_stat); + + return clock->nrtimers; +} + +static int timerlist_next(struct xnvfile_snapshot_iterator *it, void *data) +{ + struct vfile_clock_priv *priv = xnvfile_iterator_priv(it); + struct xnclock *clock = xnvfile_priv(it->vfile); + struct vfile_clock_data *p = data; + struct xntimer *timer; + + if (priv->curr == NULL) + return 0; + + timer = priv->curr; + if (list_is_last(&timer->next_stat, &clock->timerq)) + priv->curr = NULL; + else + priv->curr = list_entry(timer->next_stat.next, + struct xntimer, next_stat); + + if (clock == &nkclock && xnstat_counter_get(&timer->scheduled) == 0) + return VFILE_SEQ_SKIP; + + p->cpu = xnsched_cpu(xntimer_sched(timer)); + p->scheduled = xnstat_counter_get(&timer->scheduled); + p->fired = xnstat_counter_get(&timer->fired); + p->timeout = xntimer_get_timeout(timer); + p->interval = xntimer_interval(timer); + p->status = timer->status; + knamecpy(p->name, timer->name); + + return 1; +} + +static int timerlist_show(struct xnvfile_snapshot_iterator *it, void *data) +{ + struct vfile_clock_data *p = data; + char timeout_buf[] = "- "; + char interval_buf[] = "- "; + char hit_buf[32]; + + if (p == NULL) + xnvfile_printf(it, + "%-3s %-20s %-10s %-10s %s\n", + "CPU", "SCHED/SHOT", "TIMEOUT", + "INTERVAL", "NAME"); + else { + if (p->status & XNTIMER_RUNNING) + xntimer_format_time(p->timeout, timeout_buf, + sizeof(timeout_buf)); + if (p->status & XNTIMER_PERIODIC) + xntimer_format_time(p->interval, interval_buf, + sizeof(interval_buf)); + ksformat(hit_buf, sizeof(hit_buf), "%u/%u", + p->scheduled, p->fired); + xnvfile_printf(it, + "%-3u %-20s %-10s %-10s %s\n", + p->cpu, hit_buf, timeout_buf, + interval_buf, p->name); + } + + return 0; +} + +static struct xnvfile_snapshot_ops timerlist_ops = { + .rewind = timerlist_rewind, + .next = timerlist_next, + .show = timerlist_show, +}; + +static void init_timerlist_proc(struct xnclock *clock) +{ + memset(&clock->timer_vfile, 0, sizeof(clock->timer_vfile)); + clock->timer_vfile.privsz = sizeof(struct vfile_clock_priv); + clock->timer_vfile.datasz = sizeof(struct vfile_clock_data); + clock->timer_vfile.tag = &clock->timer_revtag; + clock->timer_vfile.ops = &timerlist_ops; + + xnvfile_init_snapshot(clock->name, &clock->timer_vfile, &timerlist_vfroot); + xnvfile_priv(&clock->timer_vfile) = clock; +} + +static void cleanup_timerlist_proc(struct xnclock *clock) +{ + xnvfile_destroy_snapshot(&clock->timer_vfile); +} + +void init_timerlist_root(void) +{ + xnvfile_init_dir("timer", &timerlist_vfroot, &cobalt_vfroot); +} + +void cleanup_timerlist_root(void) +{ + xnvfile_destroy_dir(&timerlist_vfroot); +} + +#else /* !CONFIG_XENO_OPT_STATS */ + +static inline void init_timerlist_root(void) { } + +static inline void cleanup_timerlist_root(void) { } + +static inline void init_timerlist_proc(struct xnclock *clock) { } + +static inline void cleanup_timerlist_proc(struct xnclock *clock) { } + +#endif /* !CONFIG_XENO_OPT_STATS */ + +#ifdef CONFIG_XENO_OPT_VFILE + +static struct xnvfile_directory clock_vfroot; + +void print_core_clock_status(struct xnclock *clock, + struct xnvfile_regular_iterator *it) +{ + const char *wd_status = "off"; + +#ifdef CONFIG_XENO_OPT_WATCHDOG + wd_status = "on"; +#endif /* CONFIG_XENO_OPT_WATCHDOG */ + + xnvfile_printf(it, "%8s: timer=%s, clock=%s\n", + "devices", pipeline_timer_name(), pipeline_clock_name()); + xnvfile_printf(it, "%8s: %s\n", "watchdog", wd_status); +} + +static int clock_show(struct xnvfile_regular_iterator *it, void *data) +{ + struct xnclock *clock = xnvfile_priv(it->vfile); + xnticks_t now = xnclock_read_raw(clock); + + if (clock->id >= 0) /* External clock, print id. */ + xnvfile_printf(it, "%7s: %d\n", "id", __COBALT_CLOCK_EXT(clock->id)); + + xnvfile_printf(it, "%7s: irq=%Ld kernel=%Ld user=%Ld\n", "gravity", + xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, irq)), + xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, kernel)), + xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, user))); + + xnclock_print_status(clock, it); + + xnvfile_printf(it, "%7s: %Lu (%.4Lx %.4x)\n", "ticks", + now, now >> 32, (u32)(now & -1U)); + + return 0; +} + +static ssize_t clock_store(struct xnvfile_input *input) +{ + char buf[128], *args = buf, *p; + struct xnclock_gravity gravity; + struct xnvfile_regular *vfile; + unsigned long ns, ticks; + struct xnclock *clock; + ssize_t nbytes; + int ret; + + nbytes = xnvfile_get_string(input, buf, sizeof(buf)); + if (nbytes < 0) + return nbytes; + + vfile = container_of(input->vfile, struct xnvfile_regular, entry); + clock = xnvfile_priv(vfile); + gravity = clock->gravity; + + while ((p = strsep(&args, " \t:/,")) != NULL) { + if (*p == '\0') + continue; + ns = simple_strtol(p, &p, 10); + ticks = xnclock_ns_to_ticks(clock, ns); + switch (*p) { + case 'i': + gravity.irq = ticks; + break; + case 'k': + gravity.kernel = ticks; + break; + case 'u': + case '\0': + gravity.user = ticks; + break; + default: + return -EINVAL; + } + ret = xnclock_set_gravity(clock, &gravity); + if (ret) + return ret; + } + + return nbytes; +} + +static struct xnvfile_regular_ops clock_ops = { + .show = clock_show, + .store = clock_store, +}; + +static void init_clock_proc(struct xnclock *clock) +{ + memset(&clock->vfile, 0, sizeof(clock->vfile)); + clock->vfile.ops = &clock_ops; + xnvfile_init_regular(clock->name, &clock->vfile, &clock_vfroot); + xnvfile_priv(&clock->vfile) = clock; + init_timerlist_proc(clock); +} + +static void cleanup_clock_proc(struct xnclock *clock) +{ + cleanup_timerlist_proc(clock); + xnvfile_destroy_regular(&clock->vfile); +} + +void xnclock_init_proc(void) +{ + xnvfile_init_dir("clock", &clock_vfroot, &cobalt_vfroot); + init_timerlist_root(); +} + +void xnclock_cleanup_proc(void) +{ + xnvfile_destroy_dir(&clock_vfroot); + cleanup_timerlist_root(); +} + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static inline void init_clock_proc(struct xnclock *clock) { } + +static inline void cleanup_clock_proc(struct xnclock *clock) { } + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/** + * @brief Register a Xenomai clock. + * + * This service installs a new clock which may be used to drive + * Xenomai timers. + * + * @param clock The new clock to register. + * + * @param affinity The set of CPUs we may expect the backing clock + * device to tick on. As a special case, passing a NULL affinity mask + * means that timer IRQs cannot be seen as percpu events, in which + * case all outstanding timers will be maintained into a single global + * queue instead of percpu timer queues. + * + * @coretags{secondary-only} + */ +int xnclock_register(struct xnclock *clock, const cpumask_t *affinity) +{ + struct xntimerdata *tmd; + int cpu; + + secondary_mode_only(); + +#ifdef CONFIG_SMP + /* + * A CPU affinity set may be defined for each clock, + * enumerating the CPUs which can receive ticks from the + * backing clock device. When given, this set must be a + * subset of the real-time CPU set. + */ + if (affinity) { + cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus); + if (cpumask_empty(&clock->affinity)) + return -EINVAL; + } else /* Device is global without particular IRQ affinity. */ + cpumask_clear(&clock->affinity); +#endif + + /* Allocate the percpu timer queue slot. */ + clock->timerdata = alloc_percpu(struct xntimerdata); + if (clock->timerdata == NULL) + return -ENOMEM; + + /* + * POLA: init all timer slots for the new clock, although some + * of them might remain unused depending on the CPU affinity + * of the event source(s). If the clock device is global + * without any particular IRQ affinity, all timers will be + * queued to CPU0. + */ + for_each_online_cpu(cpu) { + tmd = xnclock_percpu_timerdata(clock, cpu); + xntimerq_init(&tmd->q); + } + +#ifdef CONFIG_XENO_OPT_STATS + INIT_LIST_HEAD(&clock->timerq); +#endif /* CONFIG_XENO_OPT_STATS */ + + init_clock_proc(clock); + + return 0; +} +EXPORT_SYMBOL_GPL(xnclock_register); + +/** + * @fn void xnclock_deregister(struct xnclock *clock) + * @brief Deregister a Xenomai clock. + * + * This service uninstalls a Xenomai clock previously registered with + * xnclock_register(). + * + * This service may be called once all timers driven by @a clock have + * been stopped. + * + * @param clock The clock to deregister. + * + * @coretags{secondary-only} + */ +void xnclock_deregister(struct xnclock *clock) +{ + struct xntimerdata *tmd; + int cpu; + + secondary_mode_only(); + + cleanup_clock_proc(clock); + + for_each_online_cpu(cpu) { + tmd = xnclock_percpu_timerdata(clock, cpu); + XENO_BUG_ON(COBALT, !xntimerq_empty(&tmd->q)); + xntimerq_destroy(&tmd->q); + } + + free_percpu(clock->timerdata); +} +EXPORT_SYMBOL_GPL(xnclock_deregister); + +/** + * @fn void xnclock_tick(struct xnclock *clock) + * @brief Process a clock tick. + * + * This routine processes an incoming @a clock event, firing elapsed + * timers as appropriate. + * + * @param clock The clock for which a new event was received. + * + * @coretags{coreirq-only, atomic-entry} + * + * @note The current CPU must be part of the real-time affinity set + * unless the clock device has no particular IRQ affinity, otherwise + * weird things may happen. + */ +void xnclock_tick(struct xnclock *clock) +{ + struct xnsched *sched = xnsched_current(); + struct xntimer *timer; + xnsticks_t delta; + xntimerq_t *tmq; + xnticks_t now; + xntimerh_t *h; + + atomic_only(); + +#ifdef CONFIG_SMP + /* + * Some external clock devices may be global without any + * particular IRQ affinity, in which case the associated + * timers will be queued to CPU0. + */ + if (IS_ENABLED(CONFIG_XENO_OPT_EXTCLOCK) && + clock != &nkclock && + !cpumask_test_cpu(xnsched_cpu(sched), &clock->affinity)) + tmq = &xnclock_percpu_timerdata(clock, 0)->q; + else +#endif + tmq = &xnclock_this_timerdata(clock)->q; + + /* + * Optimisation: any local timer reprogramming triggered by + * invoked timer handlers can wait until we leave the tick + * handler. Use this status flag as hint to xntimer_start(). + */ + sched->status |= XNINTCK; + + now = xnclock_read_raw(clock); + while ((h = xntimerq_head(tmq)) != NULL) { + timer = container_of(h, struct xntimer, aplink); + delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now); + if (delta > 0) + break; + + trace_cobalt_timer_expire(timer); + + xntimer_dequeue(timer, tmq); + xntimer_account_fired(timer); + + /* + * By postponing the propagation of the low-priority + * host tick to the interrupt epilogue (see + * xnintr_irq_handler()), we save some I-cache, which + * translates into precious microsecs on low-end hw. + */ + if (unlikely(timer == &sched->htimer)) { + sched->lflags |= XNHTICK; + sched->lflags &= ~XNHDEFER; + if (timer->status & XNTIMER_PERIODIC) + goto advance; + continue; + } + + timer->handler(timer); + now = xnclock_read_raw(clock); + timer->status |= XNTIMER_FIRED; + /* + * Only requeue periodic timers which have not been + * requeued, stopped or killed. + */ + if ((timer->status & + (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_KILLED|XNTIMER_RUNNING)) != + (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_RUNNING)) + continue; + advance: + do { + timer->periodic_ticks++; + xntimer_update_date(timer); + } while (xntimerh_date(&timer->aplink) < now); + +#ifdef CONFIG_SMP + /* + * If the timer was migrated over its timeout handler, + * xntimer_migrate() re-queued it already. + */ + if (unlikely(timer->sched != sched)) + continue; +#endif + xntimer_enqueue(timer, tmq); + } + + sched->status &= ~XNINTCK; + + xnclock_program_shot(clock, sched); +} +EXPORT_SYMBOL_GPL(xnclock_tick); + +static int set_core_clock_gravity(struct xnclock *clock, + const struct xnclock_gravity *p) +{ + nkclock.gravity = *p; + + return 0; +} + +static void reset_core_clock_gravity(struct xnclock *clock) +{ + struct xnclock_gravity gravity; + + xnarch_get_latencies(&gravity); + if (gravity.kernel == 0) + gravity.kernel = gravity.user; + set_core_clock_gravity(clock, &gravity); +} + +struct xnclock nkclock = { + .name = "coreclk", + .resolution = 1, /* nanosecond. */ + .ops = { + .set_gravity = set_core_clock_gravity, + .reset_gravity = reset_core_clock_gravity, +#ifdef CONFIG_XENO_OPT_VFILE + .print_status = print_core_clock_status, +#endif + }, + .id = -1, +}; +EXPORT_SYMBOL_GPL(nkclock); + +void xnclock_cleanup(void) +{ + xnclock_deregister(&nkclock); +} + +int __init xnclock_init() +{ + spl_t s; + +#ifdef XNARCH_HAVE_NODIV_LLIMD + xnarch_init_u32frac(&bln_frac, 1, 1000000000); +#endif + pipeline_init_clock(); + xnclock_reset_gravity(&nkclock); + xnlock_get_irqsave(&nklock, s); + nkclock.wallclock_offset = pipeline_read_wallclock() - + xnclock_core_read_monotonic(); + xnlock_put_irqrestore(&nklock, s); + xnclock_register(&nkclock, &xnsched_realtime_cpus); + + return 0; +} + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c new file mode 100644 index 0000000..db0ecf0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c @@ -0,0 +1,657 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/types.h> +#include <linux/limits.h> +#include <linux/ctype.h> +#include <linux/jhash.h> +#include <linux/mm.h> +#include <linux/signal.h> +#include <linux/vmalloc.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/uapi/signal.h> +#include <asm/xenomai/syscall.h> +#include "posix/process.h" +#include "debug.h" + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_debug Debugging services + * @{ + */ +struct xnvfile_directory cobalt_debug_vfroot; +EXPORT_SYMBOL_GPL(cobalt_debug_vfroot); + +#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX + +#define SYMBOL_HSLOTS (1 << 8) + +struct hashed_symbol { + struct hashed_symbol *next; + char symbol[0]; +}; + +static struct hashed_symbol *symbol_jhash[SYMBOL_HSLOTS]; + +static struct xnheap memory_pool; + +/* + * This is a permanent storage for ASCII strings which comes handy to + * get a unique and constant reference to a symbol while preserving + * storage space. Hashed symbols have infinite lifetime and are never + * flushed. + */ +DEFINE_PRIVATE_XNLOCK(symbol_lock); + +static const char *hash_symbol(const char *symbol) +{ + struct hashed_symbol *p, **h; + const char *str; + size_t len; + u32 hash; + spl_t s; + + len = strlen(symbol); + hash = jhash(symbol, len, 0); + + xnlock_get_irqsave(&symbol_lock, s); + + h = &symbol_jhash[hash & (SYMBOL_HSLOTS - 1)]; + p = *h; + while (p && + (*p->symbol != *symbol || + strcmp(p->symbol + 1, symbol + 1))) + p = p->next; + + if (p) + goto done; + + p = xnheap_alloc(&memory_pool, sizeof(*p) + len + 1); + if (p == NULL) { + str = NULL; + goto out; + } + + strcpy(p->symbol, symbol); + p->next = *h; + *h = p; +done: + str = p->symbol; +out: + xnlock_put_irqrestore(&symbol_lock, s); + + return str; +} + +/* + * We define a static limit (RELAX_SPOTNR) for spot records to limit + * the memory consumption (we pull record memory from the system + * heap). The current value should be reasonable enough unless the + * application is extremely unsane, given that we only keep unique + * spots. Said differently, if the application has more than + * RELAX_SPOTNR distinct code locations doing spurious relaxes, then + * the first issue to address is likely PEBKAC. + */ +#define RELAX_SPOTNR 128 +#define RELAX_HSLOTS (1 << 8) + +struct relax_record { + /* Number of hits for this location */ + u32 hits; + struct relax_spot { + /* Faulty thread name. */ + char thread[XNOBJECT_NAME_LEN]; + /* call stack the relax originates from. */ + int depth; + struct backtrace { + unsigned long pc; + const char *mapname; + } backtrace[SIGSHADOW_BACKTRACE_DEPTH]; + /* Program hash value of the caller. */ + u32 proghash; + /* Pid of the caller. */ + pid_t pid; + /* Reason for relaxing. */ + int reason; + } spot; + struct relax_record *r_next; + struct relax_record *h_next; + const char *exe_path; +}; + +static struct relax_record *relax_jhash[RELAX_HSLOTS]; + +static struct relax_record *relax_record_list; + +static int relax_overall, relax_queued; + +DEFINE_PRIVATE_XNLOCK(relax_lock); + +/* + * The motivation to centralize tracing information about relaxes + * directly into kernel space is fourfold: + * + * - this allows to gather all the trace data into a single location + * and keep it safe there, with no external log file involved. + * + * - enabling the tracing does not impose any requirement on the + * application (aside of being compiled with debug symbols for best + * interpreting that information). We only need a kernel config switch + * for this (i.e. CONFIG_XENO_OPT_DEBUG_TRACE_RELAX). + * + * - the data is collected and can be made available exactly the same + * way regardless of the application emitting the relax requests, or + * whether it is still alive when the trace data are displayed. + * + * - the kernel is able to provide accurate and detailed trace + * information, such as the relative offset of instructions causing + * relax requests within dynamic shared objects, without having to + * guess it roughly from /proc/pid/maps, or relying on ldd's + * --function-relocs feature, which both require to run on the target + * system to get the needed information. Instead, we allow a build + * host to use a cross-compilation toolchain later to extract the + * source location, from the raw data the kernel has provided on the + * target system. + * + * However, collecting the call frames within the application to + * determine the full context of a relax spot is not something we can + * do purely from kernel space, notably because it depends on build + * options we just don't know about (e.g. frame pointers availability + * for the app, or other nitty-gritty details depending on the + * toolchain). To solve this, we ask the application to send us a + * complete backtrace taken from the context of a specific signal + * handler, which we know is stacked over the relax spot. That + * information is then stored by the kernel after some + * post-processing, along with other data identifying the caller, and + * made available through the /proc/xenomai/debug/relax vfile. + * + * Implementation-wise, xndebug_notify_relax and xndebug_trace_relax + * routines are paired: first, xndebug_notify_relax sends a SIGSHADOW + * request to userland when a relax spot is detected from + * xnthread_relax, which should then trigger a call back to + * xndebug_trace_relax with the complete backtrace information, as + * seen from userland (via the internal sc_cobalt_backtrace + * syscall). All this runs on behalf of the relaxing thread, so we can + * make a number of convenient assumptions (such as being able to scan + * the current vma list to get detailed information about the + * executable mappings that could be involved). + */ + +void xndebug_notify_relax(struct xnthread *thread, int reason) +{ + xnthread_signal(thread, SIGSHADOW, + sigshadow_int(SIGSHADOW_ACTION_BACKTRACE, reason)); +} + +void xndebug_trace_relax(int nr, unsigned long *backtrace, + int reason) +{ + struct relax_record *p, **h; + struct vm_area_struct *vma; + struct xnthread *thread; + struct relax_spot spot; + struct mm_struct *mm; + struct file *file; + unsigned long pc; + char *mapname; + int n, depth; + char *tmp; + u32 hash; + spl_t s; + + thread = xnthread_current(); + if (thread == NULL) + return; /* Can't be, right? What a mess. */ + + /* + * We compute PC values relative to the base of the shared + * executable mappings we find in the backtrace, which makes + * it possible for the slackspot utility to match the + * corresponding source code locations from unrelocated file + * offsets. + */ + + tmp = (char *)__get_free_page(GFP_KERNEL); + if (tmp == NULL) + /* + * The situation looks really bad, but we can't do + * anything about it. Just bail out. + */ + return; + + memset(&spot, 0, sizeof(spot)); + mm = get_task_mm(current); + mmap_read_lock(mm); + + for (n = 0, depth = 0; n < nr; n++) { + pc = backtrace[n]; + + vma = find_vma(mm, pc); + if (vma == NULL) + continue; + + /* + * Interpreter-generated executable mappings are not + * file-backed. Use this to determine when $pc should be fixed + * up by subtracting the mapping base address in the DSO case. + */ + file = vma->vm_file; + if (file != NULL) + pc -= vma->vm_start; + + spot.backtrace[depth].pc = pc; + + /* + * Even in case we can't fetch the map name, we still + * record the PC value, which may still give some hint + * downstream. + */ + if (file == NULL) + goto next_frame; + + mapname = d_path(&file->f_path, tmp, PAGE_SIZE); + if (IS_ERR(mapname)) + goto next_frame; + + spot.backtrace[depth].mapname = hash_symbol(mapname); + next_frame: + depth++; + } + + mmap_read_unlock(mm); + mmput(mm); + free_page((unsigned long)tmp); + + /* + * Most of the time we will be sent duplicates, since the odds + * of seeing the same thread running the same code doing the + * same mistake all over again are high. So we probe the hash + * table for an identical spot first, before going for a + * complete record allocation from the system heap if no match + * was found. Otherwise, we just take the fast exit path. + */ + spot.depth = depth; + spot.proghash = thread->proghash; + spot.pid = xnthread_host_pid(thread); + spot.reason = reason; + strcpy(spot.thread, thread->name); + hash = jhash2((u32 *)&spot, sizeof(spot) / sizeof(u32), 0); + + xnlock_get_irqsave(&relax_lock, s); + + h = &relax_jhash[hash & (RELAX_HSLOTS - 1)]; + p = *h; + while (p && + /* Try quick guesses first, then memcmp */ + (p->spot.depth != spot.depth || + p->spot.pid != spot.pid || + memcmp(&p->spot, &spot, sizeof(spot)))) + p = p->h_next; + + if (p) { + p->hits++; + goto out; /* Spot already recorded. */ + } + + if (relax_queued >= RELAX_SPOTNR) + goto out; /* No more space -- ignore. */ + /* + * We can only compete with other shadows which have just + * switched to secondary mode like us. So holding the + * relax_lock a bit more without disabling interrupts is not + * an issue. This allows us to postpone the record memory + * allocation while probing and updating the hash table in a + * single move. + */ + p = xnheap_alloc(&memory_pool, sizeof(*p)); + if (p == NULL) + goto out; /* Something is about to go wrong... */ + + memcpy(&p->spot, &spot, sizeof(p->spot)); + p->exe_path = hash_symbol(thread->exe_path); + p->hits = 1; + p->h_next = *h; + *h = p; + p->r_next = relax_record_list; + relax_record_list = p; + relax_queued++; +out: + relax_overall++; + + xnlock_put_irqrestore(&relax_lock, s); +} + +static DEFINE_VFILE_HOSTLOCK(relax_mutex); + +struct relax_vfile_priv { + int queued; + int overall; + int ncurr; + struct relax_record *head; + struct relax_record *curr; +}; + +static void *relax_vfile_begin(struct xnvfile_regular_iterator *it) +{ + struct relax_vfile_priv *priv = xnvfile_iterator_priv(it); + struct relax_record *p; + spl_t s; + int n; + + /* + * Snapshot the counters under lock, to make sure they remain + * mutually consistent despite we dump the record list in a + * lock-less manner. Additionally, the vfile layer already + * holds the relax_mutex lock for us, so that we can't race + * with ->store(). + */ + xnlock_get_irqsave(&relax_lock, s); + + if (relax_queued == 0 || it->pos > relax_queued) { + xnlock_put_irqrestore(&relax_lock, s); + return NULL; + } + priv->overall = relax_overall; + priv->queued = relax_queued; + priv->head = relax_record_list; + + xnlock_put_irqrestore(&relax_lock, s); + + if (it->pos == 0) { + priv->curr = NULL; + priv->ncurr = -1; + return VFILE_SEQ_START; + } + + for (n = 1, p = priv->head; n < it->pos; n++) + p = p->r_next; + + priv->curr = p; + priv->ncurr = n; + + return p; +} + +static void *relax_vfile_next(struct xnvfile_regular_iterator *it) +{ + struct relax_vfile_priv *priv = xnvfile_iterator_priv(it); + struct relax_record *p; + int n; + + if (it->pos > priv->queued) + return NULL; + + if (it->pos == priv->ncurr + 1) + p = priv->curr->r_next; + else { + for (n = 1, p = priv->head; n < it->pos; n++) + p = p->r_next; + } + + priv->curr = p; + priv->ncurr = it->pos; + + return p; +} + +static const char *reason_str[] = { + [SIGDEBUG_UNDEFINED] = "undefined", + [SIGDEBUG_MIGRATE_SIGNAL] = "signal", + [SIGDEBUG_MIGRATE_SYSCALL] = "syscall", + [SIGDEBUG_MIGRATE_FAULT] = "fault", + [SIGDEBUG_MIGRATE_PRIOINV] = "pi-error", + [SIGDEBUG_NOMLOCK] = "mlock-check", + [SIGDEBUG_WATCHDOG] = "runaway-break", + [SIGDEBUG_RESCNT_IMBALANCE] = "resource-count-imbalance", + [SIGDEBUG_MUTEX_SLEEP] = "sleep-holding-mutex", + [SIGDEBUG_LOCK_BREAK] = "scheduler-lock-break", +}; + +static int relax_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + struct relax_vfile_priv *priv = xnvfile_iterator_priv(it); + struct relax_record *p = data; + int n; + + /* + * No need to grab any lock to read a record from a previously + * validated index: the data must be there and won't be + * touched anymore. + */ + if (p == NULL) { + xnvfile_printf(it, "%d\n", priv->overall); + return 0; + } + + xnvfile_printf(it, "%s\n", p->exe_path ?: "?"); + xnvfile_printf(it, "%d %d %s %s\n", p->spot.pid, p->hits, + reason_str[p->spot.reason], p->spot.thread); + + for (n = 0; n < p->spot.depth; n++) + xnvfile_printf(it, "0x%lx %s\n", + p->spot.backtrace[n].pc, + p->spot.backtrace[n].mapname ?: "?"); + + xnvfile_printf(it, ".\n"); + + return 0; +} + +static ssize_t relax_vfile_store(struct xnvfile_input *input) +{ + struct relax_record *p, *np; + spl_t s; + + /* + * Flush out all records. Races with ->show() are prevented + * using the relax_mutex lock. The vfile layer takes care of + * this internally. + */ + xnlock_get_irqsave(&relax_lock, s); + p = relax_record_list; + relax_record_list = NULL; + relax_overall = 0; + relax_queued = 0; + memset(relax_jhash, 0, sizeof(relax_jhash)); + xnlock_put_irqrestore(&relax_lock, s); + + while (p) { + np = p->r_next; + xnheap_free(&memory_pool, p); + p = np; + } + + return input->size; +} + +static struct xnvfile_regular_ops relax_vfile_ops = { + .begin = relax_vfile_begin, + .next = relax_vfile_next, + .show = relax_vfile_show, + .store = relax_vfile_store, +}; + +static struct xnvfile_regular relax_vfile = { + .privsz = sizeof(struct relax_vfile_priv), + .ops = &relax_vfile_ops, + .entry = { .lockops = &relax_mutex.ops }, +}; + +static inline int init_trace_relax(void) +{ + u32 size = CONFIG_XENO_OPT_DEBUG_TRACE_LOGSZ * 1024; + void *p; + int ret; + + p = vmalloc(size); + if (p == NULL) + return -ENOMEM; + + ret = xnheap_init(&memory_pool, p, size); + if (ret) + return ret; + + xnheap_set_name(&memory_pool, "debug log"); + + ret = xnvfile_init_regular("relax", &relax_vfile, &cobalt_debug_vfroot); + if (ret) { + xnheap_destroy(&memory_pool); + vfree(p); + } + + return ret; +} + +static inline void cleanup_trace_relax(void) +{ + void *p; + + xnvfile_destroy_regular(&relax_vfile); + p = xnheap_get_membase(&memory_pool); + xnheap_destroy(&memory_pool); + vfree(p); +} + +#else /* !CONFIG_XENO_OPT_DEBUG_TRACE_RELAX */ + +static inline int init_trace_relax(void) +{ + return 0; +} + +static inline void cleanup_trace_relax(void) +{ +} + +static inline void init_thread_relax_trace(struct xnthread *thread) +{ +} + +#endif /* !XENO_OPT_DEBUG_TRACE_RELAX */ + +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING + +void xnlock_dbg_prepare_acquire(unsigned long long *start) +{ + *start = xnclock_read_raw(&nkclock); +} +EXPORT_SYMBOL_GPL(xnlock_dbg_prepare_acquire); + +void xnlock_dbg_acquired(struct xnlock *lock, int cpu, unsigned long long *start, + const char *file, int line, const char *function) +{ + lock->lock_date = *start; + lock->spin_time = xnclock_read_raw(&nkclock) - *start; + lock->file = file; + lock->function = function; + lock->line = line; + lock->cpu = cpu; +} +EXPORT_SYMBOL_GPL(xnlock_dbg_acquired); + +int xnlock_dbg_release(struct xnlock *lock, + const char *file, int line, const char *function) +{ + unsigned long long lock_time; + struct xnlockinfo *stats; + int cpu; + + lock_time = xnclock_read_raw(&nkclock) - lock->lock_date; + cpu = raw_smp_processor_id(); + stats = &per_cpu(xnlock_stats, cpu); + + if (lock->file == NULL) { + lock->file = "??"; + lock->line = 0; + lock->function = "invalid"; + } + + if (unlikely(lock->owner != cpu)) { + pipeline_prepare_panic(); + printk(XENO_ERR "lock %p already unlocked on CPU #%d\n" + " last owner = %s:%u (%s(), CPU #%d)\n", + lock, cpu, lock->file, lock->line, lock->function, + lock->cpu); + dump_stack(); + return 1; + } + + /* File that we released it. */ + lock->cpu = -lock->cpu; + lock->file = file; + lock->line = line; + lock->function = function; + + if (lock_time > stats->lock_time) { + stats->lock_time = lock_time; + stats->spin_time = lock->spin_time; + stats->file = lock->file; + stats->function = lock->function; + stats->line = lock->line; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xnlock_dbg_release); + +#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */ + +void xndebug_shadow_init(struct xnthread *thread) +{ + struct cobalt_ppd *sys_ppd; + size_t len; + + sys_ppd = cobalt_ppd_get(0); + /* + * The caller is current, so we know for sure that sys_ppd + * will still be valid after we dropped the lock. + * + * NOTE: Kernel shadows all share the system global ppd + * descriptor with no refcounting. + */ + thread->exe_path = sys_ppd->exe_path ?: "(unknown)"; + /* + * The program hash value is a unique token debug features may + * use to identify all threads which belong to a given + * executable file. Using this value for quick probes is often + * handier and more efficient than testing the whole exe_path. + */ + len = strlen(thread->exe_path); + thread->proghash = jhash(thread->exe_path, len, 0); +} + +int xndebug_init(void) +{ + int ret; + + ret = init_trace_relax(); + if (ret) + return ret; + + return 0; +} + +void xndebug_cleanup(void) +{ + cleanup_trace_relax(); +} + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h new file mode 100644 index 0000000..24dc354 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#ifndef _KERNEL_COBALT_DEBUG_H +#define _KERNEL_COBALT_DEBUG_H + +#include <cobalt/kernel/assert.h> + +struct xnthread; + +#ifdef CONFIG_XENO_OPT_DEBUG + +int xndebug_init(void); + +void xndebug_cleanup(void); + +void xndebug_shadow_init(struct xnthread *thread); + +extern struct xnvfile_directory cobalt_debug_vfroot; + +#else /* !XENO_OPT_DEBUG */ + +static inline int xndebug_init(void) +{ + return 0; +} + +static inline void xndebug_cleanup(void) +{ +} + +static inline void xndebug_shadow_init(struct xnthread *thread) +{ +} + +#endif /* !XENO_OPT_DEBUG */ + +#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX +void xndebug_notify_relax(struct xnthread *thread, + int reason); +void xndebug_trace_relax(int nr, unsigned long *backtrace, + int reason); +#else +static inline +void xndebug_notify_relax(struct xnthread *thread, int reason) +{ +} +static inline +void xndebug_trace_relax(int nr, unsigned long *backtrace, + int reason) +{ + /* Simply ignore. */ +} +#endif + +#endif /* !_KERNEL_COBALT_DEBUG_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile new file mode 100644 index 0000000..f49d3a0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile @@ -0,0 +1,5 @@ +ccflags-y += -I$(srctree)/kernel + +obj-y += pipeline.o + +pipeline-y := init.o kevents.o sched.o tick.o syscall.o intr.o diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c new file mode 100644 index 0000000..bc891b4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum <rpm@xenomai.org> + */ + +#include <linux/init.h> +#include <pipeline/machine.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/assert.h> + +int __init pipeline_init(void) +{ + int ret; + + if (cobalt_machine.init) { + ret = cobalt_machine.init(); + if (ret) + return ret; + } + + /* Enable the Xenomai out-of-band stage */ + enable_oob_stage("Xenomai"); + + ret = xnclock_init(); + if (ret) + goto fail_clock; + + return 0; + +fail_clock: + if (cobalt_machine.cleanup) + cobalt_machine.cleanup(); + + return ret; +} + +int __init pipeline_late_init(void) +{ + if (cobalt_machine.late_init) + return cobalt_machine.late_init(); + + return 0; +} + +__init void pipeline_cleanup(void) +{ + /* Disable the Xenomai stage */ + disable_oob_stage(); + + xnclock_cleanup(); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c new file mode 100644 index 0000000..88116c7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#include <linux/interrupt.h> +#include <linux/irq_pipeline.h> +#include <linux/tick.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/intr.h> + +void xnintr_host_tick(struct xnsched *sched) /* hard irqs off */ +{ + sched->lflags &= ~XNHTICK; + tick_notify_proxy(); +} + +/* + * Low-level core clock irq handler. This one forwards ticks from the + * Xenomai platform timer to nkclock exclusively. + */ +void xnintr_core_clock_handler(void) +{ + struct xnsched *sched; + + xnlock_get(&nklock); + xnclock_tick(&nkclock); + xnlock_put(&nklock); + + /* + * If the core clock interrupt preempted a real-time thread, + * any transition to the root thread has already triggered a + * host tick propagation from xnsched_run(), so at this point, + * we only need to propagate the host tick in case the + * interrupt preempted the root thread. + */ + sched = xnsched_current(); + if ((sched->lflags & XNHTICK) && + xnthread_test_state(sched->curr, XNROOT)) + xnintr_host_tick(sched); +} + +static irqreturn_t xnintr_irq_handler(int irq, void *dev_id) +{ + struct xnintr *intr = dev_id; + int ret; + + ret = intr->isr(intr); + XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0); + + if (ret & XN_IRQ_DISABLE) + disable_irq(irq); + else if (ret & XN_IRQ_PROPAGATE) + irq_post_inband(irq); + + return ret & XN_IRQ_NONE ? IRQ_NONE : IRQ_HANDLED; +} + +int xnintr_init(struct xnintr *intr, const char *name, + unsigned int irq, xnisr_t isr, xniack_t iack, + int flags) +{ + secondary_mode_only(); + + intr->irq = irq; + intr->isr = isr; + intr->iack = NULL; /* unused */ + intr->cookie = NULL; + intr->name = name ? : "<unknown>"; + intr->flags = flags; + intr->status = 0; + intr->unhandled = 0; /* unused */ + raw_spin_lock_init(&intr->lock); /* unused */ + + return 0; +} +EXPORT_SYMBOL_GPL(xnintr_init); + +void xnintr_destroy(struct xnintr *intr) +{ + secondary_mode_only(); + xnintr_detach(intr); +} +EXPORT_SYMBOL_GPL(xnintr_destroy); + +int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask) +{ + cpumask_t tmp_mask, *effective_mask; + int ret; + + secondary_mode_only(); + + intr->cookie = cookie; + + if (!cpumask) { + effective_mask = &xnsched_realtime_cpus; + } else { + effective_mask = &tmp_mask; + cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask); + if (cpumask_empty(effective_mask)) + return -EINVAL; + } +#ifdef CONFIG_SMP + ret = irq_set_affinity_hint(intr->irq, effective_mask); + if (ret) + return ret; +#endif + + return request_irq(intr->irq, xnintr_irq_handler, IRQF_OOB, + intr->name, intr); +} +EXPORT_SYMBOL_GPL(xnintr_attach); + +void xnintr_detach(struct xnintr *intr) +{ + secondary_mode_only(); +#ifdef CONFIG_SMP + irq_set_affinity_hint(intr->irq, NULL); +#endif + free_irq(intr->irq, intr); +} +EXPORT_SYMBOL_GPL(xnintr_detach); + +void xnintr_enable(struct xnintr *intr) +{ +} +EXPORT_SYMBOL_GPL(xnintr_enable); + +void xnintr_disable(struct xnintr *intr) +{ +} +EXPORT_SYMBOL_GPL(xnintr_disable); + +int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask) +{ + cpumask_t effective_mask; + + secondary_mode_only(); + + cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask); + if (cpumask_empty(&effective_mask)) + return -EINVAL; + + return irq_set_affinity_hint(intr->irq, &effective_mask); +} +EXPORT_SYMBOL_GPL(xnintr_affinity); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c new file mode 100644 index 0000000..4da4f51 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c @@ -0,0 +1,351 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org> + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org> + * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org> + */ + +#include <linux/ptrace.h> +#include <pipeline/kevents.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/vdso.h> +#include <cobalt/kernel/init.h> +#include <rtdm/driver.h> +#include <trace/events/cobalt-core.h> +#include "../posix/process.h" +#include "../posix/thread.h" +#include "../posix/memory.h" + +void arch_inband_task_init(struct task_struct *tsk) +{ + struct cobalt_threadinfo *p = dovetail_task_state(tsk); + + p->thread = NULL; + p->process = NULL; +} + +void handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs) +{ + struct xnthread *thread; + struct xnsched *sched; + spl_t s; + + sched = xnsched_current(); + thread = sched->curr; + + /* + * Enable back tracing. + */ + trace_cobalt_thread_fault(xnarch_fault_pc(regs), trapnr); + + if (xnthread_test_state(thread, XNROOT)) + return; + + if (xnarch_fault_bp_p(trapnr) && user_mode(regs)) { + XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX)); + xnlock_get_irqsave(&nklock, s); + xnthread_set_info(thread, XNCONTHI); + dovetail_request_ucall(current); + cobalt_stop_debugged_process(thread); + xnlock_put_irqrestore(&nklock, s); + xnsched_run(); + } + + /* + * If we experienced a trap on behalf of a shadow thread + * running in primary mode, move it to the Linux domain, + * leaving the kernel process the exception. + */ +#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER) + if (!user_mode(regs)) { + printk(XENO_WARNING + "switching %s to secondary mode after exception #%u in " + "kernel-space at 0x%lx (pid %d)\n", thread->name, + trapnr, + xnarch_fault_pc(regs), + xnthread_host_pid(thread)); + } else if (xnarch_fault_notify(trapnr)) /* Don't report debug traps */ + printk(XENO_WARNING + "switching %s to secondary mode after exception #%u from " + "user-space at 0x%lx (pid %d)\n", thread->name, + trapnr, + xnarch_fault_pc(regs), + xnthread_host_pid(thread)); +#endif + + if (xnarch_fault_pf_p(trapnr)) + /* + * The page fault counter is not SMP-safe, but it's a + * simple indicator that something went wrong wrt + * memory locking anyway. + */ + xnstat_counter_inc(&thread->stat.pf); + + xnthread_relax(xnarch_fault_notify(trapnr), SIGDEBUG_MIGRATE_FAULT); +} + +static inline int handle_setaffinity_event(struct dovetail_migration_data *d) +{ + return cobalt_handle_setaffinity_event(d->task); +} + +static inline int handle_taskexit_event(struct task_struct *p) +{ + return cobalt_handle_taskexit_event(p); +} + +static inline int handle_user_return(struct task_struct *task) +{ + return cobalt_handle_user_return(task); +} + +void handle_oob_mayday(struct pt_regs *regs) +{ + XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER)); + + xnthread_relax(0, 0); +} + +static int handle_sigwake_event(struct task_struct *p) +{ + struct xnthread *thread; + sigset_t pending; + spl_t s; + + thread = xnthread_from_task(p); + if (thread == NULL) + return KEVENT_PROPAGATE; + + xnlock_get_irqsave(&nklock, s); + + /* + * CAUTION: __TASK_TRACED is not set in p->state yet. This + * state bit will be set right after we return, when the task + * is woken up. + */ + if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) { + /* We already own the siglock. */ + sigorsets(&pending, + &p->pending.signal, + &p->signal->shared_pending.signal); + + if (sigismember(&pending, SIGTRAP) || + sigismember(&pending, SIGSTOP) + || sigismember(&pending, SIGINT)) + cobalt_register_debugged_thread(thread); + } + + if (xnthread_test_state(thread, XNRELAX)) + goto out; + + /* + * Allow a thread stopped for debugging to resume briefly in order to + * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP. + */ + if (xnthread_test_state(thread, XNDBGSTOP)) + xnthread_resume(thread, XNDBGSTOP); + + __xnthread_kick(thread); +out: + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); + + return KEVENT_PROPAGATE; +} + +static inline int handle_cleanup_event(struct mm_struct *mm) +{ + return cobalt_handle_cleanup_event(mm); +} + +void pipeline_cleanup_process(void) +{ + dovetail_stop_altsched(); +} + +int handle_ptrace_resume(struct task_struct *tracee) +{ + struct xnthread *thread; + spl_t s; + + thread = xnthread_from_task(tracee); + if (thread == NULL) + return KEVENT_PROPAGATE; + + if (xnthread_test_state(thread, XNSSTEP)) { + xnlock_get_irqsave(&nklock, s); + + xnthread_resume(thread, XNDBGSTOP); + cobalt_unregister_debugged_thread(thread); + + xnlock_put_irqrestore(&nklock, s); + } + + return KEVENT_PROPAGATE; +} + +static void handle_ptrace_cont(void) +{ + struct xnthread *curr = xnthread_current(); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(curr, XNSSTEP)) { + if (!xnthread_test_info(curr, XNCONTHI)) + cobalt_unregister_debugged_thread(curr); + + xnthread_set_localinfo(curr, XNHICCUP); + + dovetail_request_ucall(current); + } + + xnlock_put_irqrestore(&nklock, s); +} + +void handle_inband_event(enum inband_event_type event, void *data) +{ + switch (event) { + case INBAND_TASK_SIGNAL: + handle_sigwake_event(data); + break; + case INBAND_TASK_MIGRATION: + handle_setaffinity_event(data); + break; + case INBAND_TASK_EXIT: + if (xnthread_current()) + handle_taskexit_event(current); + break; + case INBAND_TASK_RETUSER: + handle_user_return(data); + break; + case INBAND_TASK_PTSTEP: + handle_ptrace_resume(data); + break; + case INBAND_TASK_PTCONT: + handle_ptrace_cont(); + break; + case INBAND_TASK_PTSTOP: + break; + case INBAND_PROCESS_CLEANUP: + handle_cleanup_event(data); + break; + } +} + +/* + * Called by the in-band kernel when the CLOCK_REALTIME epoch changes. + */ +void inband_clock_was_set(void) +{ + if (realtime_core_enabled()) + xnclock_set_wallclock(ktime_get_real_fast_ns()); +} + +#ifdef CONFIG_MMU + +int pipeline_prepare_current(void) +{ + struct task_struct *p = current; + kernel_siginfo_t si; + + if ((p->mm->def_flags & VM_LOCKED) == 0) { + memset(&si, 0, sizeof(si)); + si.si_signo = SIGDEBUG; + si.si_code = SI_QUEUE; + si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker; + send_sig_info(SIGDEBUG, &si, p); + } + + return 0; +} + +static inline int get_mayday_prot(void) +{ + return PROT_READ|PROT_EXEC; +} + +#else /* !CONFIG_MMU */ + +int pipeline_prepare_current(void) +{ + return 0; +} + +static inline int get_mayday_prot(void) +{ + /* + * Until we stop backing /dev/mem with the mayday page, we + * can't ask for PROT_EXEC since the former does not define + * mmap capabilities, and default ones won't allow an + * executable mapping with MAP_SHARED. In the NOMMU case, this + * is (currently) not an issue. + */ + return PROT_READ; +} + +#endif /* !CONFIG_MMU */ + +void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */ +{ + struct xnthread *thread = xnthread_from_task(p); + + xnlock_get(&nklock); + + /* + * We fire the handler before the thread is migrated, so that + * thread->sched does not change between paired invocations of + * relax_thread/harden_thread handlers. + */ + xnthread_run_handler_stack(thread, harden_thread); + + cobalt_adjust_affinity(p); + + xnthread_resume(thread, XNRELAX); + + /* + * In case we migrated independently of the user return notifier, clear + * XNCONTHI here and also disable the notifier - we are already done. + */ + if (unlikely(xnthread_test_info(thread, XNCONTHI))) { + xnthread_clear_info(thread, XNCONTHI); + dovetail_clear_ucall(); + } + + /* Unregister as debugged thread in case we postponed this. */ + if (unlikely(xnthread_test_state(thread, XNSSTEP))) + cobalt_unregister_debugged_thread(thread); + + xnlock_put(&nklock); + + xnsched_run(); + +} + +void pipeline_attach_current(struct xnthread *thread) +{ + struct cobalt_threadinfo *p; + + p = pipeline_current(); + p->thread = thread; + p->process = cobalt_search_process(current->mm); + dovetail_init_altsched(&xnthread_archtcb(thread)->altsched); +} + +int pipeline_trap_kevents(void) +{ + dovetail_start(); + return 0; +} + +void pipeline_enable_kevents(void) +{ + dovetail_start_altsched(); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c new file mode 100644 index 0000000..01ea442 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001-2020 Philippe Gerum <rpm@xenomai.org>. + */ + +#include <linux/cpuidle.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/sched.h> +#include <pipeline/sched.h> +#include <trace/events/cobalt-core.h> + +/* in-band stage, hard_irqs_disabled() */ +bool irq_cpuidle_control(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + /* + * Deny entering sleep state if this entails stopping the + * timer (i.e. C3STOP misfeature). + */ + if (state && (state->flags & CPUIDLE_FLAG_TIMER_STOP)) + return false; + + return true; +} + +bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next, + bool leaving_inband) +{ + return dovetail_context_switch(&xnthread_archtcb(prev)->altsched, + &xnthread_archtcb(next)->altsched, leaving_inband); +} + +void pipeline_init_shadow_tcb(struct xnthread *thread) +{ + /* + * Initialize the alternate scheduling control block. + */ + dovetail_init_altsched(&xnthread_archtcb(thread)->altsched); + + trace_cobalt_shadow_map(thread); +} + +void pipeline_init_root_tcb(struct xnthread *thread) +{ + /* + * Initialize the alternate scheduling control block. + */ + dovetail_init_altsched(&xnthread_archtcb(thread)->altsched); +} + +int pipeline_leave_inband(void) +{ + return dovetail_leave_inband(); +} + +int pipeline_leave_oob_prepare(void) +{ + int suspmask = XNRELAX; + struct xnthread *curr = xnthread_current(); + + dovetail_leave_oob(); + /* + * If current is being debugged, record that it should migrate + * back in case it resumes in userspace. If it resumes in + * kernel space, i.e. over a restarting syscall, the + * associated hardening will clear XNCONTHI. + */ + if (xnthread_test_state(curr, XNSSTEP)) { + xnthread_set_info(curr, XNCONTHI); + dovetail_request_ucall(current); + suspmask |= XNDBGSTOP; + } + return suspmask; +} + +void pipeline_leave_oob_finish(void) +{ + dovetail_resume_inband(); +} + +void pipeline_raise_mayday(struct task_struct *tsk) +{ + dovetail_send_mayday(tsk); +} + +void pipeline_clear_mayday(void) /* May solely affect current. */ +{ + clear_thread_flag(TIF_MAYDAY); +} + +irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id) +{ + trace_cobalt_schedule_remote(xnsched_current()); + + /* Will reschedule from irq_exit_pipeline(). */ + + return IRQ_HANDLED; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c new file mode 100644 index 0000000..440c069 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org> + * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + */ + +#include <linux/irqstage.h> +#include <pipeline/pipeline.h> +#include <pipeline/kevents.h> +#include <cobalt/kernel/assert.h> +#include <xenomai/posix/syscall.h> + +int handle_pipelined_syscall(struct irq_stage *stage, struct pt_regs *regs) +{ + if (unlikely(running_inband())) + return handle_root_syscall(regs); + + return handle_head_syscall(stage == &inband_stage, regs); +} + +int handle_oob_syscall(struct pt_regs *regs) +{ + return handle_head_syscall(false, regs); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c new file mode 100644 index 0000000..873b624 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + */ + +#include <linux/tick.h> +#include <linux/clockchips.h> +#include <cobalt/kernel/intr.h> +#include <pipeline/tick.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/timer.h> + +static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_device); + +const char *pipeline_timer_name(void) +{ + struct clock_proxy_device *dev = per_cpu(proxy_device, 0); + struct clock_event_device *real_dev = dev->real_device; + + /* + * Return the name of the current clock event chip, which is + * the real device controlled by the proxy tick device. + */ + return real_dev->name; +} + +void pipeline_set_timer_shot(unsigned long delay) /* ns */ +{ + struct clock_proxy_device *dev = __this_cpu_read(proxy_device); + struct clock_event_device *real_dev = dev->real_device; + u64 cycles; + ktime_t t; + int ret; + + if (real_dev->features & CLOCK_EVT_FEAT_KTIME) { + t = ktime_add(delay, xnclock_core_read_raw()); + real_dev->set_next_ktime(t, real_dev); + } else { + if (delay <= 0) { + delay = real_dev->min_delta_ns; + } else { + delay = min_t(int64_t, delay, + real_dev->max_delta_ns); + delay = max_t(int64_t, delay, + real_dev->min_delta_ns); + } + cycles = ((u64)delay * real_dev->mult) >> real_dev->shift; + ret = real_dev->set_next_event(cycles, real_dev); + if (ret) + real_dev->set_next_event(real_dev->min_delta_ticks, + real_dev); + } +} + +static int proxy_set_next_ktime(ktime_t expires, + struct clock_event_device *proxy_dev) /* hard irqs on/off */ +{ + struct xnsched *sched; + unsigned long flags; + ktime_t delta; + int ret; + + /* + * Expiration dates of in-band timers are based on the common + * monotonic time base. If the timeout date has already + * elapsed, make sure xntimer_start() does not fail with + * -ETIMEDOUT but programs the hardware for ticking + * immediately instead. + */ + delta = ktime_sub(expires, ktime_get()); + if (delta < 0) + delta = 0; + + xnlock_get_irqsave(&nklock, flags); + sched = xnsched_current(); + ret = xntimer_start(&sched->htimer, delta, XN_INFINITE, XN_RELATIVE); + xnlock_put_irqrestore(&nklock, flags); + + return ret ? -ETIME : 0; +} + +bool pipeline_must_force_program_tick(struct xnsched *sched) +{ + return sched->lflags & XNTSTOP; +} + +static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev) +{ + struct clock_event_device *real_dev; + struct clock_proxy_device *dev; + struct xnsched *sched; + spl_t s; + + dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device); + + /* + * In-band wants to disable the clock hardware on entering a + * tickless state, so we have to stop our in-band tick + * emulation. Propagate the request for shutting down the + * hardware to the real device only if we have no outstanding + * OOB timers. CAUTION: the in-band timer is counted when + * assessing the RQ_IDLE condition, so we need to stop it + * prior to testing the latter. + */ + xnlock_get_irqsave(&nklock, s); + sched = xnsched_current(); + xntimer_stop(&sched->htimer); + sched->lflags |= XNTSTOP; + + if (sched->lflags & XNIDLE) { + real_dev = dev->real_device; + real_dev->set_state_oneshot_stopped(real_dev); + } + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static void setup_proxy(struct clock_proxy_device *dev) +{ + struct clock_event_device *proxy_dev = &dev->proxy_device; + + dev->handle_oob_event = (typeof(dev->handle_oob_event)) + xnintr_core_clock_handler; + proxy_dev->features |= CLOCK_EVT_FEAT_KTIME; + proxy_dev->set_next_ktime = proxy_set_next_ktime; + if (proxy_dev->set_state_oneshot_stopped) + proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped; + __this_cpu_write(proxy_device, dev); +} + +#ifdef CONFIG_SMP +static irqreturn_t tick_ipi_handler(int irq, void *dev_id) +{ + xnintr_core_clock_handler(); + + return IRQ_HANDLED; +} +#endif + +int pipeline_install_tick_proxy(void) +{ + int ret; + +#ifdef CONFIG_SMP + /* + * We may be running a SMP kernel on a uniprocessor machine + * whose interrupt controller provides no IPI: attempt to hook + * the timer IPI only if the hardware can support multiple + * CPUs. + */ + if (num_possible_cpus() > 1) { + ret = __request_percpu_irq(TIMER_OOB_IPI, + tick_ipi_handler, + IRQF_OOB, "Xenomai timer IPI", + &cobalt_machine_cpudata); + if (ret) + return ret; + } +#endif + + /* Install the proxy tick device */ + ret = tick_install_proxy(setup_proxy, &xnsched_realtime_cpus); + if (ret) + goto fail_proxy; + + return 0; + +fail_proxy: +#ifdef CONFIG_SMP + if (num_possible_cpus() > 1) + free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata); +#endif + + return ret; +} + +void pipeline_uninstall_tick_proxy(void) +{ + /* Uninstall the proxy tick device. */ + tick_uninstall_proxy(&xnsched_realtime_cpus); + +#ifdef CONFIG_SMP + if (num_possible_cpus() > 1) + free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata); +#endif +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c b/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c new file mode 100644 index 0000000..f106d5d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c @@ -0,0 +1,863 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/stdarg.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/bitops.h> +#include <linux/mm.h> +#include <asm/pgtable.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/vfile.h> +#include <cobalt/kernel/ancillaries.h> +#include <asm/xenomai/wrappers.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_heap Dynamic memory allocation services + * + * This code implements a variant of the allocator described in + * "Design of a General Purpose Memory Allocator for the 4.3BSD Unix + * Kernel" by Marshall K. McKusick and Michael J. Karels (USENIX + * 1988), see http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf. + * The free page list is maintained in rbtrees for fast lookups of + * multi-page memory ranges, and pages holding bucketed memory have a + * fast allocation bitmap to manage their blocks internally. + *@{ + */ +struct xnheap cobalt_heap; /* System heap */ +EXPORT_SYMBOL_GPL(cobalt_heap); + +static LIST_HEAD(heapq); /* Heap list for v-file dump */ + +static int nrheaps; + +#ifdef CONFIG_XENO_OPT_VFILE + +static struct xnvfile_rev_tag vfile_tag; + +static struct xnvfile_snapshot_ops vfile_ops; + +struct vfile_priv { + struct xnheap *curr; +}; + +struct vfile_data { + size_t all_mem; + size_t free_mem; + char name[XNOBJECT_NAME_LEN]; +}; + +static struct xnvfile_snapshot vfile = { + .privsz = sizeof(struct vfile_priv), + .datasz = sizeof(struct vfile_data), + .tag = &vfile_tag, + .ops = &vfile_ops, +}; + +static int vfile_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_priv *priv = xnvfile_iterator_priv(it); + + if (list_empty(&heapq)) { + priv->curr = NULL; + return 0; + } + + priv->curr = list_first_entry(&heapq, struct xnheap, next); + + return nrheaps; +} + +static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) +{ + struct vfile_priv *priv = xnvfile_iterator_priv(it); + struct vfile_data *p = data; + struct xnheap *heap; + + if (priv->curr == NULL) + return 0; /* We are done. */ + + heap = priv->curr; + if (list_is_last(&heap->next, &heapq)) + priv->curr = NULL; + else + priv->curr = list_entry(heap->next.next, + struct xnheap, next); + + p->all_mem = xnheap_get_size(heap); + p->free_mem = xnheap_get_free(heap); + knamecpy(p->name, heap->name); + + return 1; +} + +static int vfile_show(struct xnvfile_snapshot_iterator *it, void *data) +{ + struct vfile_data *p = data; + + if (p == NULL) + xnvfile_printf(it, "%9s %9s %s\n", + "TOTAL", "FREE", "NAME"); + else + xnvfile_printf(it, "%9zu %9zu %s\n", + p->all_mem, + p->free_mem, + p->name); + return 0; +} + +static struct xnvfile_snapshot_ops vfile_ops = { + .rewind = vfile_rewind, + .next = vfile_next, + .show = vfile_show, +}; + +void xnheap_init_proc(void) +{ + xnvfile_init_snapshot("heap", &vfile, &cobalt_vfroot); +} + +void xnheap_cleanup_proc(void) +{ + xnvfile_destroy_snapshot(&vfile); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +enum xnheap_pgtype { + page_free =0, + page_cont =1, + page_list =2 +}; + +static inline u32 __always_inline +gen_block_mask(int log2size) +{ + return -1U >> (32 - (XNHEAP_PAGE_SIZE >> log2size)); +} + +static inline __always_inline +int addr_to_pagenr(struct xnheap *heap, void *p) +{ + return ((void *)p - heap->membase) >> XNHEAP_PAGE_SHIFT; +} + +static inline __always_inline +void *pagenr_to_addr(struct xnheap *heap, int pg) +{ + return heap->membase + (pg << XNHEAP_PAGE_SHIFT); +} + +#ifdef CONFIG_XENO_OPT_DEBUG_MEMORY +/* + * Setting page_cont/page_free in the page map is only required for + * enabling full checking of the block address in free requests, which + * may be extremely time-consuming when deallocating huge blocks + * spanning thousands of pages. We only do such marking when running + * in memory debug mode. + */ +static inline bool +page_is_valid(struct xnheap *heap, int pg) +{ + switch (heap->pagemap[pg].type) { + case page_free: + case page_cont: + return false; + case page_list: + default: + return true; + } +} + +static void mark_pages(struct xnheap *heap, + int pg, int nrpages, + enum xnheap_pgtype type) +{ + while (nrpages-- > 0) + heap->pagemap[pg].type = type; +} + +#else + +static inline bool +page_is_valid(struct xnheap *heap, int pg) +{ + return true; +} + +static void mark_pages(struct xnheap *heap, + int pg, int nrpages, + enum xnheap_pgtype type) +{ } + +#endif + +static struct xnheap_range * +search_size_ge(struct rb_root *t, size_t size) +{ + struct rb_node *rb, *deepest = NULL; + struct xnheap_range *r; + + /* + * We first try to find an exact match. If that fails, we walk + * the tree in logical order by increasing size value from the + * deepest node traversed until we find the first successor to + * that node, or nothing beyond it, whichever comes first. + */ + rb = t->rb_node; + while (rb) { + deepest = rb; + r = rb_entry(rb, struct xnheap_range, size_node); + if (size < r->size) { + rb = rb->rb_left; + continue; + } + if (size > r->size) { + rb = rb->rb_right; + continue; + } + return r; + } + + rb = deepest; + while (rb) { + r = rb_entry(rb, struct xnheap_range, size_node); + if (size <= r->size) + return r; + rb = rb_next(rb); + } + + return NULL; +} + +static struct xnheap_range * +search_left_mergeable(struct xnheap *heap, struct xnheap_range *r) +{ + struct rb_node *node = heap->addr_tree.rb_node; + struct xnheap_range *p; + + while (node) { + p = rb_entry(node, struct xnheap_range, addr_node); + if ((void *)p + p->size == (void *)r) + return p; + if (&r->addr_node < node) + node = node->rb_left; + else + node = node->rb_right; + } + + return NULL; +} + +static struct xnheap_range * +search_right_mergeable(struct xnheap *heap, struct xnheap_range *r) +{ + struct rb_node *node = heap->addr_tree.rb_node; + struct xnheap_range *p; + + while (node) { + p = rb_entry(node, struct xnheap_range, addr_node); + if ((void *)r + r->size == (void *)p) + return p; + if (&r->addr_node < node) + node = node->rb_left; + else + node = node->rb_right; + } + + return NULL; +} + +static void insert_range_bysize(struct xnheap *heap, struct xnheap_range *r) +{ + struct rb_node **new = &heap->size_tree.rb_node, *parent = NULL; + struct xnheap_range *p; + + while (*new) { + p = container_of(*new, struct xnheap_range, size_node); + parent = *new; + if (r->size <= p->size) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + rb_link_node(&r->size_node, parent, new); + rb_insert_color(&r->size_node, &heap->size_tree); +} + +static void insert_range_byaddr(struct xnheap *heap, struct xnheap_range *r) +{ + struct rb_node **new = &heap->addr_tree.rb_node, *parent = NULL; + struct xnheap_range *p; + + while (*new) { + p = container_of(*new, struct xnheap_range, addr_node); + parent = *new; + if (r < p) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + rb_link_node(&r->addr_node, parent, new); + rb_insert_color(&r->addr_node, &heap->addr_tree); +} + +static int reserve_page_range(struct xnheap *heap, size_t size) +{ + struct xnheap_range *new, *splitr; + + /* Find a suitable range of pages covering 'size'. */ + new = search_size_ge(&heap->size_tree, size); + if (new == NULL) + return -1; + + rb_erase(&new->size_node, &heap->size_tree); + if (new->size == size) { + rb_erase(&new->addr_node, &heap->addr_tree); + return addr_to_pagenr(heap, new); + } + + /* + * The free range fetched is larger than what we need: split + * it in two, the upper part is returned to the caller, the + * lower part is sent back to the free list, which makes + * reindexing by address pointless. + */ + splitr = new; + splitr->size -= size; + new = (struct xnheap_range *)((void *)new + splitr->size); + insert_range_bysize(heap, splitr); + + return addr_to_pagenr(heap, new); +} + +static void release_page_range(struct xnheap *heap, + void *page, size_t size) +{ + struct xnheap_range *freed = page, *left, *right; + bool addr_linked = false; + + freed->size = size; + + left = search_left_mergeable(heap, freed); + if (left) { + rb_erase(&left->size_node, &heap->size_tree); + left->size += freed->size; + freed = left; + addr_linked = true; + } + + right = search_right_mergeable(heap, freed); + if (right) { + rb_erase(&right->size_node, &heap->size_tree); + freed->size += right->size; + if (addr_linked) + rb_erase(&right->addr_node, &heap->addr_tree); + else + rb_replace_node(&right->addr_node, &freed->addr_node, + &heap->addr_tree); + } else if (!addr_linked) + insert_range_byaddr(heap, freed); + + insert_range_bysize(heap, freed); + mark_pages(heap, addr_to_pagenr(heap, page), + size >> XNHEAP_PAGE_SHIFT, page_free); +} + +static void add_page_front(struct xnheap *heap, + int pg, int log2size) +{ + struct xnheap_pgentry *new, *head, *next; + int ilog; + + /* Insert page at front of the per-bucket page list. */ + + ilog = log2size - XNHEAP_MIN_LOG2; + new = &heap->pagemap[pg]; + if (heap->buckets[ilog] == -1U) { + heap->buckets[ilog] = pg; + new->prev = new->next = pg; + } else { + head = &heap->pagemap[heap->buckets[ilog]]; + new->prev = heap->buckets[ilog]; + new->next = head->next; + next = &heap->pagemap[new->next]; + next->prev = pg; + head->next = pg; + heap->buckets[ilog] = pg; + } +} + +static void remove_page(struct xnheap *heap, + int pg, int log2size) +{ + struct xnheap_pgentry *old, *prev, *next; + int ilog = log2size - XNHEAP_MIN_LOG2; + + /* Remove page from the per-bucket page list. */ + + old = &heap->pagemap[pg]; + if (pg == old->next) + heap->buckets[ilog] = -1U; + else { + if (pg == heap->buckets[ilog]) + heap->buckets[ilog] = old->next; + prev = &heap->pagemap[old->prev]; + prev->next = old->next; + next = &heap->pagemap[old->next]; + next->prev = old->prev; + } +} + +static void move_page_front(struct xnheap *heap, + int pg, int log2size) +{ + int ilog = log2size - XNHEAP_MIN_LOG2; + + /* Move page at front of the per-bucket page list. */ + + if (heap->buckets[ilog] == pg) + return; /* Already at front, no move. */ + + remove_page(heap, pg, log2size); + add_page_front(heap, pg, log2size); +} + +static void move_page_back(struct xnheap *heap, + int pg, int log2size) +{ + struct xnheap_pgentry *old, *last, *head, *next; + int ilog; + + /* Move page at end of the per-bucket page list. */ + + old = &heap->pagemap[pg]; + if (pg == old->next) /* Singleton, no move. */ + return; + + remove_page(heap, pg, log2size); + + ilog = log2size - XNHEAP_MIN_LOG2; + head = &heap->pagemap[heap->buckets[ilog]]; + last = &heap->pagemap[head->prev]; + old->prev = head->prev; + old->next = last->next; + next = &heap->pagemap[old->next]; + next->prev = pg; + last->next = pg; +} + +static void *add_free_range(struct xnheap *heap, + size_t bsize, int log2size) +{ + int pg; + + pg = reserve_page_range(heap, ALIGN(bsize, XNHEAP_PAGE_SIZE)); + if (pg < 0) + return NULL; + + /* + * Update the page entry. If @log2size is non-zero + * (i.e. bsize < XNHEAP_PAGE_SIZE), bsize is (1 << log2Size) + * between 2^XNHEAP_MIN_LOG2 and 2^(XNHEAP_PAGE_SHIFT - 1). + * Save the log2 power into entry.type, then update the + * per-page allocation bitmap to reserve the first block. + * + * Otherwise, we have a larger block which may span multiple + * pages: set entry.type to page_list, indicating the start of + * the page range, and entry.bsize to the overall block size. + */ + if (log2size) { + heap->pagemap[pg].type = log2size; + /* + * Mark the first object slot (#0) as busy, along with + * the leftmost bits we won't use for this log2 size. + */ + heap->pagemap[pg].map = ~gen_block_mask(log2size) | 1; + /* + * Insert the new page at front of the per-bucket page + * list, enforcing the assumption that pages with free + * space live close to the head of this list. + */ + add_page_front(heap, pg, log2size); + } else { + heap->pagemap[pg].type = page_list; + heap->pagemap[pg].bsize = (u32)bsize; + mark_pages(heap, pg + 1, + (bsize >> XNHEAP_PAGE_SHIFT) - 1, page_cont); + } + + heap->used_size += bsize; + + return pagenr_to_addr(heap, pg); +} + +/** + * @fn void *xnheap_alloc(struct xnheap *heap, size_t size) + * @brief Allocate a memory block from a memory heap. + * + * Allocates a contiguous region of memory from an active memory heap. + * Such allocation is guaranteed to be time-bounded. + * + * @param heap The descriptor address of the heap to get memory from. + * + * @param size The size in bytes of the requested block. + * + * @return The address of the allocated region upon success, or NULL + * if no memory is available from the specified heap. + * + * @coretags{unrestricted} + */ +void *xnheap_alloc(struct xnheap *heap, size_t size) +{ + int log2size, ilog, pg, b = -1; + size_t bsize; + void *block; + spl_t s; + + if (size == 0) + return NULL; + + if (size < XNHEAP_MIN_ALIGN) { + bsize = size = XNHEAP_MIN_ALIGN; + log2size = XNHEAP_MIN_LOG2; + } else { + log2size = ilog2(size); + if (log2size < XNHEAP_PAGE_SHIFT) { + if (size & (size - 1)) + log2size++; + bsize = 1 << log2size; + } else + bsize = ALIGN(size, XNHEAP_PAGE_SIZE); + } + + /* + * Allocate entire pages directly from the pool whenever the + * block is larger or equal to XNHEAP_PAGE_SIZE. Otherwise, + * use bucketed memory. + * + * NOTE: Fully busy pages from bucketed memory are moved back + * at the end of the per-bucket page list, so that we may + * always assume that either the heading page has some room + * available, or no room is available from any page linked to + * this list, in which case we should immediately add a fresh + * page. + */ + xnlock_get_irqsave(&heap->lock, s); + + if (bsize >= XNHEAP_PAGE_SIZE) + /* Add a range of contiguous free pages. */ + block = add_free_range(heap, bsize, 0); + else { + ilog = log2size - XNHEAP_MIN_LOG2; + XENO_WARN_ON(MEMORY, ilog < 0 || ilog >= XNHEAP_MAX_BUCKETS); + pg = heap->buckets[ilog]; + /* + * Find a block in the heading page if any. If there + * is none, there won't be any down the list: add a + * new page right away. + */ + if (pg < 0 || heap->pagemap[pg].map == -1U) + block = add_free_range(heap, bsize, log2size); + else { + b = ffs(~heap->pagemap[pg].map) - 1; + /* + * Got one block from the heading per-bucket + * page, tag it as busy in the per-page + * allocation map. + */ + heap->pagemap[pg].map |= (1U << b); + heap->used_size += bsize; + block = heap->membase + + (pg << XNHEAP_PAGE_SHIFT) + + (b << log2size); + if (heap->pagemap[pg].map == -1U) + move_page_back(heap, pg, log2size); + } + } + + xnlock_put_irqrestore(&heap->lock, s); + + return block; +} +EXPORT_SYMBOL_GPL(xnheap_alloc); + +/** + * @fn void xnheap_free(struct xnheap *heap, void *block) + * @brief Release a block to a memory heap. + * + * Releases a memory block to a heap. + * + * @param heap The heap descriptor. + * + * @param block The block to be returned to the heap. + * + * @coretags{unrestricted} + */ +void xnheap_free(struct xnheap *heap, void *block) +{ + unsigned long pgoff, boff; + int log2size, pg, n; + size_t bsize; + u32 oldmap; + spl_t s; + + xnlock_get_irqsave(&heap->lock, s); + + /* Compute the heading page number in the page map. */ + pgoff = block - heap->membase; + pg = pgoff >> XNHEAP_PAGE_SHIFT; + + if (!page_is_valid(heap, pg)) + goto bad; + + switch (heap->pagemap[pg].type) { + case page_list: + bsize = heap->pagemap[pg].bsize; + XENO_WARN_ON(MEMORY, (bsize & (XNHEAP_PAGE_SIZE - 1)) != 0); + release_page_range(heap, pagenr_to_addr(heap, pg), bsize); + break; + + default: + log2size = heap->pagemap[pg].type; + bsize = (1 << log2size); + XENO_WARN_ON(MEMORY, bsize >= XNHEAP_PAGE_SIZE); + boff = pgoff & ~XNHEAP_PAGE_MASK; + if ((boff & (bsize - 1)) != 0) /* Not at block start? */ + goto bad; + + n = boff >> log2size; /* Block position in page. */ + oldmap = heap->pagemap[pg].map; + heap->pagemap[pg].map &= ~(1U << n); + + /* + * If the page the block was sitting on is fully idle, + * return it to the pool. Otherwise, check whether + * that page is transitioning from fully busy to + * partially busy state, in which case it should move + * toward the front of the per-bucket page list. + */ + if (heap->pagemap[pg].map == ~gen_block_mask(log2size)) { + remove_page(heap, pg, log2size); + release_page_range(heap, pagenr_to_addr(heap, pg), + XNHEAP_PAGE_SIZE); + } else if (oldmap == -1U) + move_page_front(heap, pg, log2size); + } + + heap->used_size -= bsize; + + xnlock_put_irqrestore(&heap->lock, s); + + return; +bad: + xnlock_put_irqrestore(&heap->lock, s); + + XENO_WARN(MEMORY, 1, "invalid block %p in heap %s", + block, heap->name); +} +EXPORT_SYMBOL_GPL(xnheap_free); + +ssize_t xnheap_check_block(struct xnheap *heap, void *block) +{ + unsigned long pg, pgoff, boff; + ssize_t ret = -EINVAL; + size_t bsize; + spl_t s; + + xnlock_get_irqsave(&heap->lock, s); + + /* Calculate the page number from the block address. */ + pgoff = block - heap->membase; + pg = pgoff >> XNHEAP_PAGE_SHIFT; + if (page_is_valid(heap, pg)) { + if (heap->pagemap[pg].type == page_list) + bsize = heap->pagemap[pg].bsize; + else { + bsize = (1 << heap->pagemap[pg].type); + boff = pgoff & ~XNHEAP_PAGE_MASK; + if ((boff & (bsize - 1)) != 0) /* Not at block start? */ + goto out; + } + ret = (ssize_t)bsize; + } +out: + xnlock_put_irqrestore(&heap->lock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnheap_check_block); + +/** + * @fn xnheap_init(struct xnheap *heap, void *membase, u32 size) + * @brief Initialize a memory heap. + * + * Initializes a memory heap suitable for time-bounded allocation + * requests of dynamic memory. + * + * @param heap The address of a heap descriptor to initialize. + * + * @param membase The address of the storage area. + * + * @param size The size in bytes of the storage area. @a size must be + * a multiple of XNHEAP_PAGE_SIZE and smaller than (4Gb - PAGE_SIZE) + * in the current implementation. + * + * @return 0 is returned upon success, or: + * + * - -EINVAL is returned if @a size is either greater than + * XNHEAP_MAX_HEAPSZ, or not aligned on PAGE_SIZE. + * + * - -ENOMEM is returned upon failure of allocating the meta-data area + * used internally to maintain the heap. + * + * @coretags{secondary-only} + */ +int xnheap_init(struct xnheap *heap, void *membase, size_t size) +{ + int n, nrpages; + spl_t s; + + secondary_mode_only(); + + if (size > XNHEAP_MAX_HEAPSZ || !PAGE_ALIGNED(size)) + return -EINVAL; + + /* Reset bucket page lists, all empty. */ + for (n = 0; n < XNHEAP_MAX_BUCKETS; n++) + heap->buckets[n] = -1U; + + xnlock_init(&heap->lock); + + nrpages = size >> XNHEAP_PAGE_SHIFT; + heap->pagemap = vzalloc(sizeof(struct xnheap_pgentry) * nrpages); + if (heap->pagemap == NULL) + return -ENOMEM; + + heap->membase = membase; + heap->usable_size = size; + heap->used_size = 0; + + /* + * The free page pool is maintained as a set of ranges of + * contiguous pages indexed by address and size in rbtrees. + * Initially, we have a single range in those trees covering + * the whole memory we have been given for the heap. Over + * time, that range will be split then possibly re-merged back + * as allocations and deallocations take place. + */ + heap->size_tree = RB_ROOT; + heap->addr_tree = RB_ROOT; + release_page_range(heap, membase, size); + + /* Default name, override with xnheap_set_name() */ + ksformat(heap->name, sizeof(heap->name), "(%p)", heap); + + xnlock_get_irqsave(&nklock, s); + list_add_tail(&heap->next, &heapq); + nrheaps++; + xnvfile_touch_tag(&vfile_tag); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnheap_init); + +/** + * @fn void xnheap_destroy(struct xnheap *heap) + * @brief Destroys a memory heap. + * + * Destroys a memory heap. + * + * @param heap The heap descriptor. + * + * @coretags{secondary-only} + */ +void xnheap_destroy(struct xnheap *heap) +{ + spl_t s; + + secondary_mode_only(); + + xnlock_get_irqsave(&nklock, s); + list_del(&heap->next); + nrheaps--; + xnvfile_touch_tag(&vfile_tag); + xnlock_put_irqrestore(&nklock, s); + vfree(heap->pagemap); +} +EXPORT_SYMBOL_GPL(xnheap_destroy); + +/** + * @fn xnheap_set_name(struct xnheap *heap,const char *name,...) + * @brief Set the heap's name string. + * + * Set the heap name that will be used in statistic outputs. + * + * @param heap The address of a heap descriptor. + * + * @param name Name displayed in statistic outputs. This parameter can + * be a printk()-like format argument list. + * + * @coretags{task-unrestricted} + */ +void xnheap_set_name(struct xnheap *heap, const char *name, ...) +{ + va_list args; + + va_start(args, name); + kvsformat(heap->name, sizeof(heap->name), name, args); + va_end(args); +} +EXPORT_SYMBOL_GPL(xnheap_set_name); + +void *xnheap_vmalloc(size_t size) +{ + /* + * We want memory used in real-time context to be pulled from + * ZONE_NORMAL, however we don't need it to be physically + * contiguous. + * + * 32bit systems which would need HIGHMEM for running a Cobalt + * configuration would also be required to support PTE + * pinning, which not all architectures provide. Moreover, + * pinning PTEs eagerly for a potentially (very) large amount + * of memory may quickly degrade performance. + * + * If using a different kernel/user memory split cannot be the + * answer for those configs, it's likely that basing such + * software on a 32bit system had to be wrong in the first + * place anyway. + */ + return vmalloc_kernel(size, 0); +} +EXPORT_SYMBOL_GPL(xnheap_vmalloc); + +void xnheap_vfree(void *p) +{ + vfree(p); +} +EXPORT_SYMBOL_GPL(xnheap_vfree); + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h new file mode 100644 index 0000000..8939e45 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H +#define _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H + +#include <linux/dovetail.h> + +struct xnarchtcb { + struct dovetail_altsched_context altsched; +}; + +static inline +struct task_struct *xnarch_host_task(struct xnarchtcb *tcb) +{ + return tcb->altsched.task; +} + +#endif /* !_COBALT_ASM_GENERIC_DOVETAIL_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h new file mode 100644 index 0000000..fcd7275 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_IPIPE_THREAD_H +#define _COBALT_ASM_GENERIC_IPIPE_THREAD_H + +#include <asm/ptrace.h> +#include <asm/processor.h> + +struct task_struct; + +struct xntcb { + struct task_struct *host_task; + struct thread_struct *tsp; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct thread_struct ts; + struct thread_info *tip; +#ifdef CONFIG_XENO_ARCH_FPU + struct task_struct *user_fpu_owner; +#endif +}; + +#endif /* !_COBALT_ASM_GENERIC_IPIPE_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h new file mode 100644 index 0000000..f45e523 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h @@ -0,0 +1,28 @@ +/** + * Copyright © 2012 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_MACHINE_H +#define _COBALT_ASM_GENERIC_MACHINE_H + +#include <pipeline/machine.h> + +#ifndef xnarch_cache_aliasing +#define xnarch_cache_aliasing() 0 +#endif + +#endif /* !_COBALT_ASM_GENERIC_MACHINE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h new file mode 100644 index 0000000..1a6c308 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_PCI_IDS_H +#define _COBALT_ASM_GENERIC_PCI_IDS_H + +#include <linux/pci_ids.h> + +/* SMI */ +#ifndef PCI_DEVICE_ID_INTEL_ESB2_0 +#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH7_0 +#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH7_1 +#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH8_4 +#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH9_1 +#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH9_5 +#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH10_1 +#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 +#endif +#ifndef PCI_DEVICE_ID_INTEL_PCH_LPC_MIN +#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00 +#endif + +/* RTCAN */ +#ifndef PCI_VENDOR_ID_ESDGMBH +#define PCI_VENDOR_ID_ESDGMBH 0x12fe +#endif +#ifndef PCI_DEVICE_ID_PLX_9030 +#define PCI_DEVICE_ID_PLX_9030 0x9030 +#endif +#ifndef PCI_DEVICE_ID_PLX_9056 +#define PCI_DEVICE_ID_PLX_9056 0x9056 +#endif + +#endif /* _COBALT_ASM_GENERIC_PCI_IDS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h new file mode 100644 index 0000000..117bb3f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_SYSCALL_H +#define _COBALT_ASM_GENERIC_SYSCALL_H + +#include <linux/types.h> +#include <linux/version.h> +#include <linux/uaccess.h> +#include <asm/xenomai/features.h> +#include <asm/xenomai/wrappers.h> +#include <asm/xenomai/machine.h> +#include <cobalt/uapi/asm-generic/syscall.h> +#include <cobalt/uapi/kernel/types.h> + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define access_rok(addr, size) access_ok((addr), (size)) +#define access_wok(addr, size) access_ok((addr), (size)) +#else +#define access_rok(addr, size) access_ok(VERIFY_READ, (addr), (size)) +#define access_wok(addr, size) access_ok(VERIFY_WRITE, (addr), (size)) +#endif + +#define __xn_copy_from_user(dstP, srcP, n) raw_copy_from_user(dstP, srcP, n) +#define __xn_copy_to_user(dstP, srcP, n) raw_copy_to_user(dstP, srcP, n) +#define __xn_put_user(src, dstP) __put_user(src, dstP) +#define __xn_get_user(dst, srcP) __get_user(dst, srcP) +#define __xn_strncpy_from_user(dstP, srcP, n) strncpy_from_user(dstP, srcP, n) + +static inline int cobalt_copy_from_user(void *dst, const void __user *src, + size_t size) +{ + size_t remaining = size; + + if (likely(access_rok(src, size))) + remaining = __xn_copy_from_user(dst, src, size); + + if (unlikely(remaining > 0)) { + memset(dst + (size - remaining), 0, remaining); + return -EFAULT; + } + return 0; +} + +static inline int cobalt_copy_to_user(void __user *dst, const void *src, + size_t size) +{ + if (unlikely(!access_wok(dst, size) || + __xn_copy_to_user(dst, src, size))) + return -EFAULT; + return 0; +} + +static inline int cobalt_strncpy_from_user(char *dst, const char __user *src, + size_t count) +{ + if (unlikely(!access_rok(src, 1))) + return -EFAULT; + + return __xn_strncpy_from_user(dst, src, count); +} + + +/* + * NOTE: those copy helpers won't work in compat mode: use + * sys32_get_*(), sys32_put_*() instead. + */ + +static inline int cobalt_get_u_timespec(struct timespec64 *dst, + const struct __user_old_timespec __user *src) +{ + struct __user_old_timespec u_ts; + int ret; + + ret = cobalt_copy_from_user(&u_ts, src, sizeof(u_ts)); + if (ret) + return ret; + + dst->tv_sec = u_ts.tv_sec; + dst->tv_nsec = u_ts.tv_nsec; + + return 0; +} + +static inline int cobalt_put_u_timespec( + struct __user_old_timespec __user *dst, + const struct timespec64 *src) +{ + struct __user_old_timespec u_ts; + int ret; + + u_ts.tv_sec = src->tv_sec; + u_ts.tv_nsec = src->tv_nsec; + + ret = cobalt_copy_to_user(dst, &u_ts, sizeof(*dst)); + if (ret) + return ret; + + return 0; +} + +static inline int cobalt_get_u_itimerspec(struct itimerspec64 *dst, + const struct __user_old_itimerspec __user *src) +{ + struct __user_old_itimerspec u_its; + int ret; + + ret = cobalt_copy_from_user(&u_its, src, sizeof(u_its)); + if (ret) + return ret; + + dst->it_interval.tv_sec = u_its.it_interval.tv_sec; + dst->it_interval.tv_nsec = u_its.it_interval.tv_nsec; + dst->it_value.tv_sec = u_its.it_value.tv_sec; + dst->it_value.tv_nsec = u_its.it_value.tv_nsec; + + return 0; +} + +static inline int cobalt_put_u_itimerspec( + struct __user_old_itimerspec __user *dst, + const struct itimerspec64 *src) +{ + struct __user_old_itimerspec u_its; + + u_its.it_interval.tv_sec = src->it_interval.tv_sec; + u_its.it_interval.tv_nsec = src->it_interval.tv_nsec; + u_its.it_value.tv_sec = src->it_value.tv_sec; + u_its.it_value.tv_nsec = src->it_value.tv_nsec; + + return cobalt_copy_to_user(dst, &u_its, sizeof(*dst)); +} + +/* 32bit syscall emulation */ +#define __COBALT_COMPAT_BIT 0x1 +/* 32bit syscall emulation - extended form */ +#define __COBALT_COMPATX_BIT 0x2 + +#endif /* !_COBALT_ASM_GENERIC_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h new file mode 100644 index 0000000..b0c6f4a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_SYSCALL32_H +#define _COBALT_ASM_GENERIC_SYSCALL32_H + +#define __COBALT_CALL32_INITHAND(__handler) + +#define __COBALT_CALL32_INITMODE(__mode) + +#define __COBALT_CALL32_ENTRY(__name, __handler) + +#define __COBALT_CALL_COMPAT(__reg) 0 + +#endif /* !_COBALT_ASM_GENERIC_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h new file mode 100644 index 0000000..7654047 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2005-2012 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_WRAPPERS_H + +#include <linux/xenomai/wrappers.h> + +#define COBALT_BACKPORT(__sym) __cobalt_backport_ ##__sym + +/* + * To keep the #ifdefery as readable as possible, please: + * + * - keep the conditional structure flat, no nesting (e.g. do not fold + * the pre-3.11 conditions into the pre-3.14 ones). + * - group all wrappers for a single kernel revision. + * - list conditional blocks in order of kernel release, latest first + * - identify the first kernel release for which the wrapper should + * be defined, instead of testing the existence of a preprocessor + * symbol, so that obsolete wrappers can be spotted. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) +#define raw_copy_to_user(__to, __from, __n) __copy_to_user_inatomic(__to, __from, __n) +#define raw_copy_from_user(__to, __from, __n) __copy_from_user_inatomic(__to, __from, __n) +#define raw_put_user(__from, __to) __put_user_inatomic(__from, __to) +#define raw_get_user(__to, __from) __get_user_inatomic(__to, __from) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) +#define in_ia32_syscall() (current_thread_info()->status & TS_COMPAT) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) +#define cobalt_gpiochip_dev(__gc) ((__gc)->dev) +#else +#define cobalt_gpiochip_dev(__gc) ((__gc)->parent) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) +#define cobalt_get_restart_block(p) (&task_thread_info(p)->restart_block) +#else +#define cobalt_get_restart_block(p) (&(p)->restart_block) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) +#define user_msghdr msghdr +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) +#include <linux/netdevice.h> + +#undef alloc_netdev +#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ + alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) + +#include <linux/trace_seq.h> + +static inline unsigned char * +trace_seq_buffer_ptr(struct trace_seq *s) +{ + return s->buffer + s->len; +} + +#endif /* < 3.17 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif /* < 3.16 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) +#define raw_cpu_ptr(v) __this_cpu_ptr(v) +#endif /* < 3.15 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) +#include <linux/pci.h> + +#ifdef CONFIG_PCI +#define pci_enable_msix_range COBALT_BACKPORT(pci_enable_msix_range) +#ifdef CONFIG_PCI_MSI +int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec); +#else /* !CONFIG_PCI_MSI */ +static inline +int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec) +{ + return -ENOSYS; +} +#endif /* !CONFIG_PCI_MSI */ +#endif /* CONFIG_PCI */ +#endif /* < 3.14 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) +#include <linux/dma-mapping.h> +#include <linux/hwmon.h> + +#define dma_set_mask_and_coherent COBALT_BACKPORT(dma_set_mask_and_coherent) +static inline +int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} + +#ifdef CONFIG_HWMON +#define hwmon_device_register_with_groups \ + COBALT_BACKPORT(hwmon_device_register_with_groups) +struct device * +hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups); + +#define devm_hwmon_device_register_with_groups \ + COBALT_BACKPORT(devm_hwmon_device_register_with_groups) +struct device * +devm_hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups); +#endif /* !CONFIG_HWMON */ + +#define reinit_completion(__x) INIT_COMPLETION(*(__x)) + +#endif /* < 3.13 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) +#define DEVICE_ATTR_RW(_name) __ATTR_RW(_name) +#define DEVICE_ATTR_RO(_name) __ATTR_RO(_name) +#define DEVICE_ATTR_WO(_name) __ATTR_WO(_name) +#endif /* < 3.11 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) +#error "Xenomai/cobalt requires Linux kernel 3.10 or above" +#endif /* < 3.10 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0) +#define __kernel_timex timex +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) +#define old_timex32 compat_timex +#define SO_RCVTIMEO_OLD SO_RCVTIMEO +#define SO_SNDTIMEO_OLD SO_SNDTIMEO +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) +#define mmiowb() do { } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#define __kernel_old_timeval timeval +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,208) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define mmap_read_lock(__mm) down_read(&mm->mmap_sem) +#define mmap_read_unlock(__mm) up_read(&mm->mmap_sem) +#define mmap_write_lock(__mm) down_write(&mm->mmap_sem) +#define mmap_write_unlock(__mm) up_write(&mm->mmap_sem) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) +#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \ + struct file_operations __name = { \ + .open = (__open), \ + .release = (__release), \ + .read = (__read), \ + .write = (__write), \ + .llseek = seq_lseek, \ +} +#else +#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \ + struct proc_ops __name = { \ + .proc_open = (__open), \ + .proc_release = (__release), \ + .proc_read = (__read), \ + .proc_write = (__write), \ + .proc_lseek = seq_lseek, \ +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL) +#else +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#define old_timespec32 compat_timespec +#define old_itimerspec32 compat_itimerspec +#define old_timeval32 compat_timeval +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL) +#else +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0) +#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \ + ({ \ + loff_t ___file_size; \ + int __ret; \ + __ret = kernel_read_file(__file, __buf, &___file_size, \ + __buf_size, __id); \ + (*__file_size) = ___file_size; \ + __ret; \ + }) +#else +#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \ + kernel_read_file(__file, 0, __buf, __buf_size, __file_size, __id) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0) +#define IRQ_WORK_INIT(_func) (struct irq_work) { \ + .flags = ATOMIC_INIT(0), \ + .func = (_func), \ +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0) +#define close_fd(__ufd) __close_fd(current->files, __ufd) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0) || \ + LINUX_VERSION_CODE < KERNEL_VERSION(5,10,188)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) || \ + LINUX_VERSION_CODE < KERNEL_VERSION(5,4,251)) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,20,0) || \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,19,291)) +#define dev_addr_set(dev, addr) memcpy((dev)->dev_addr, addr, MAX_ADDR_LEN) +#define eth_hw_addr_set(dev, addr) memcpy((dev)->dev_addr, addr, ETH_ALEN) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0) +#define pde_data(i) PDE_DATA(i) +#endif + +#endif /* _COBALT_ASM_GENERIC_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h new file mode 100644 index 0000000..66d020f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org> + */ + +#ifndef _COBALT_DOVETAIL_IRQ_H +#define _COBALT_DOVETAIL_IRQ_H + +#ifdef CONFIG_XENOMAI + +#include <cobalt/kernel/sched.h> + +/* hard irqs off. */ +static inline void irq_enter_pipeline(void) +{ + struct xnsched *sched = xnsched_current(); + + sched->lflags |= XNINIRQ; +} + +/* hard irqs off. */ +static inline void irq_exit_pipeline(void) +{ + struct xnsched *sched = xnsched_current(); + + sched->lflags &= ~XNINIRQ; + + /* + * CAUTION: Switching stages as a result of rescheduling may + * re-enable irqs, shut them off before returning if so. + */ + if ((sched->status|sched->lflags) & XNRESCHED) { + xnsched_run(); + if (!hard_irqs_disabled()) + hard_local_irq_disable(); + } +} + +#else /* !CONFIG_XENOMAI */ + +static inline void irq_enter_pipeline(void) +{ +} + +static inline void irq_exit_pipeline(void) +{ +} + +#endif /* !CONFIG_XENOMAI */ + +#endif /* !_COBALT_DOVETAIL_IRQ_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h new file mode 100644 index 0000000..69b89de --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h @@ -0,0 +1,33 @@ +/** + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>. + * Copyright (c) Siemens AG, 2020 + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_DOVETAIL_THREAD_INFO_H +#define _COBALT_DOVETAIL_THREAD_INFO_H + +struct xnthread; +struct cobalt_process; + +struct oob_thread_state { + /* Core thread backlink. */ + struct xnthread *thread; + /* User process backlink. NULL for core threads. */ + struct cobalt_process *process; +}; + +#endif /* !_COBALT_DOVETAIL_THREAD_INFO_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h new file mode 100644 index 0000000..3fc467a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h @@ -0,0 +1,38 @@ +/** + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_IPIPE_THREAD_INFO_H +#define _COBALT_IPIPE_THREAD_INFO_H + +struct xnthread; +struct cobalt_process; + +struct ipipe_threadinfo { + /* Core thread backlink. */ + struct xnthread *thread; + /* User process backlink. NULL for core threads. */ + struct cobalt_process *process; +}; + +static inline void __ipipe_init_threadinfo(struct ipipe_threadinfo *p) +{ + p->thread = NULL; + p->process = NULL; +} + +#endif /* !_COBALT_IPIPE_THREAD_INFO_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h new file mode 100644 index 0000000..38ade6d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h @@ -0,0 +1 @@ +#include <stdarg.h> diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h new file mode 100644 index 0000000..349123c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_LINUX_WRAPPERS_H +#define _COBALT_LINUX_WRAPPERS_H + +#include <linux/version.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) +#include <linux/signal.h> +typedef siginfo_t kernel_siginfo_t; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) +#include <linux/sched.h> +#include <linux/sched/rt.h> +#else +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/sched/rt.h> +#include <linux/sched/mm.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <uapi/linux/sched/types.h> +#endif + +#include <pipeline/wrappers.h> + +#endif /* !_COBALT_LINUX_WRAPPERS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/init.c b/kernel/xenomai-v3.2.4/kernel/cobalt/init.c new file mode 100644 index 0000000..5168b56 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/init.c @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <xenomai/version.h> +#include <pipeline/machine.h> +#include <pipeline/tick.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/pipe.h> +#include <cobalt/kernel/select.h> +#include <cobalt/kernel/vdso.h> +#include <rtdm/fd.h> +#include "rtdm/internal.h" +#include "posix/internal.h" +#include "procfs.h" + +/** + * @defgroup cobalt Cobalt + * + * Cobalt supplements the native Linux kernel in dual kernel + * configurations. It deals with all time-critical activities, such as + * handling interrupts, and scheduling real-time threads. The Cobalt + * kernel has higher priority over all the native kernel activities. + * + * Cobalt provides an implementation of the POSIX and RTDM interfaces + * based on a set of generic RTOS building blocks. + */ + +#ifdef CONFIG_SMP +static unsigned long supported_cpus_arg = -1; +module_param_named(supported_cpus, supported_cpus_arg, ulong, 0444); +#endif /* CONFIG_SMP */ + +static unsigned long sysheap_size_arg; +module_param_named(sysheap_size, sysheap_size_arg, ulong, 0444); + +static char init_state_arg[16] = "enabled"; +module_param_string(state, init_state_arg, sizeof(init_state_arg), 0444); + +static BLOCKING_NOTIFIER_HEAD(state_notifier_list); + +struct cobalt_pipeline cobalt_pipeline; +EXPORT_SYMBOL_GPL(cobalt_pipeline); + +DEFINE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata); +EXPORT_PER_CPU_SYMBOL_GPL(cobalt_machine_cpudata); + +atomic_t cobalt_runstate = ATOMIC_INIT(COBALT_STATE_WARMUP); +EXPORT_SYMBOL_GPL(cobalt_runstate); + +struct cobalt_ppd cobalt_kernel_ppd = { + .exe_path = "vmlinux", +}; +EXPORT_SYMBOL_GPL(cobalt_kernel_ppd); + +#ifdef CONFIG_XENO_OPT_DEBUG +#define boot_debug_notice "[DEBUG]" +#else +#define boot_debug_notice "" +#endif + +#ifdef CONFIG_ENABLE_DEFAULT_TRACERS +#define boot_evt_trace_notice "[ETRACE]" +#else +#define boot_evt_trace_notice "" +#endif + +#define boot_state_notice \ + ({ \ + realtime_core_state() == COBALT_STATE_STOPPED ? \ + "[STOPPED]" : ""; \ + }) + +void cobalt_add_state_chain(struct notifier_block *nb) +{ + blocking_notifier_chain_register(&state_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cobalt_add_state_chain); + +void cobalt_remove_state_chain(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&state_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cobalt_remove_state_chain); + +void cobalt_call_state_chain(enum cobalt_run_states newstate) +{ + blocking_notifier_call_chain(&state_notifier_list, newstate, NULL); +} +EXPORT_SYMBOL_GPL(cobalt_call_state_chain); + +static void sys_shutdown(void) +{ + void *membase; + + pipeline_uninstall_tick_proxy(); + xnsched_destroy_all(); + xnregistry_cleanup(); + membase = xnheap_get_membase(&cobalt_heap); + xnheap_destroy(&cobalt_heap); + xnheap_vfree(membase); +} + +static struct { + const char *label; + enum cobalt_run_states state; +} init_states[] __initdata = { + { "disabled", COBALT_STATE_DISABLED }, + { "stopped", COBALT_STATE_STOPPED }, + { "enabled", COBALT_STATE_WARMUP }, +}; + +static void __init setup_init_state(void) +{ + static char warn_bad_state[] __initdata = + XENO_WARNING "invalid init state '%s'\n"; + int n; + + for (n = 0; n < ARRAY_SIZE(init_states); n++) + if (strcmp(init_states[n].label, init_state_arg) == 0) { + set_realtime_core_state(init_states[n].state); + return; + } + + printk(warn_bad_state, init_state_arg); +} + +static __init int sys_init(void) +{ + void *heapaddr; + int ret; + + if (sysheap_size_arg == 0) + sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ; + + heapaddr = xnheap_vmalloc(sysheap_size_arg * 1024); + if (heapaddr == NULL || + xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)) { + return -ENOMEM; + } + xnheap_set_name(&cobalt_heap, "system heap"); + + xnsched_init_all(); + + xnregistry_init(); + + /* + * If starting in stopped mode, do all initializations, but do + * not enable the core timer. + */ + if (realtime_core_state() == COBALT_STATE_WARMUP) { + ret = pipeline_install_tick_proxy(); + if (ret) { + sys_shutdown(); + return ret; + } + set_realtime_core_state(COBALT_STATE_RUNNING); + } + + return 0; +} + +static int __init xenomai_init(void) +{ + int ret, __maybe_unused cpu; + + setup_init_state(); + + if (!realtime_core_enabled()) { + printk(XENO_WARNING "disabled on kernel command line\n"); + return 0; + } + +#ifdef CONFIG_SMP + cpumask_clear(&xnsched_realtime_cpus); + for_each_online_cpu(cpu) { + if (supported_cpus_arg & (1UL << cpu)) + cpumask_set_cpu(cpu, &xnsched_realtime_cpus); + } + if (cpumask_empty(&xnsched_realtime_cpus)) { + printk(XENO_WARNING "disabled via empty real-time CPU mask\n"); + set_realtime_core_state(COBALT_STATE_DISABLED); + return 0; + } + if (!cpumask_test_cpu(0, &xnsched_realtime_cpus)) { + printk(XENO_ERR "CPU 0 is missing in real-time CPU mask\n"); + set_realtime_core_state(COBALT_STATE_DISABLED); + return -EINVAL; + } + cobalt_cpu_affinity = xnsched_realtime_cpus; +#endif /* CONFIG_SMP */ + + xnsched_register_classes(); + + ret = xnprocfs_init_tree(); + if (ret) + goto fail; + + ret = pipeline_init(); + if (ret) + goto cleanup_proc; + + xnintr_mount(); + + ret = xnpipe_mount(); + if (ret) + goto cleanup_mach; + + ret = xnselect_mount(); + if (ret) + goto cleanup_pipe; + + ret = sys_init(); + if (ret) + goto cleanup_select; + + ret = pipeline_late_init(); + if (ret) + goto cleanup_sys; + + ret = rtdm_init(); + if (ret) + goto cleanup_sys; + + ret = cobalt_init(); + if (ret) + goto cleanup_rtdm; + + rtdm_fd_init(); + + printk(XENO_INFO "Cobalt v%s %s%s%s%s\n", + XENO_VERSION_STRING, + boot_debug_notice, + boot_lat_trace_notice, + boot_evt_trace_notice, + boot_state_notice); + + return 0; + +cleanup_rtdm: + rtdm_cleanup(); +cleanup_sys: + sys_shutdown(); +cleanup_select: + xnselect_umount(); +cleanup_pipe: + xnpipe_umount(); +cleanup_mach: + pipeline_cleanup(); +cleanup_proc: + xnprocfs_cleanup_tree(); +fail: + set_realtime_core_state(COBALT_STATE_DISABLED); + printk(XENO_ERR "init failed, code %d\n", ret); + + return ret; +} +device_initcall(xenomai_init); + +/** + * @ingroup cobalt + * @defgroup cobalt_core Cobalt kernel + * + * The Cobalt core is a co-kernel which supplements the Linux kernel + * for delivering real-time services with very low latency. It + * implements a set of generic RTOS building blocks, which the + * Cobalt/POSIX and Cobalt/RTDM APIs are based on. Cobalt has higher + * priority over the Linux kernel activities. + * + * @{ + * + * @page cobalt-core-tags Dual kernel service tags + * + * The Cobalt kernel services may be restricted to particular calling + * contexts, or entail specific side-effects. To describe this + * information, each service documented by this section bears a set of + * tags when applicable. + * + * The table below matches the tags used throughout the documentation + * with the description of their meaning for the caller. + * + * @par + * <b>Context tags</b> + * <TABLE> + * <TR><TH>Tag</TH> <TH>Context on entry</TH></TR> + * <TR><TD>primary-only</TD> <TD>Must be called from a Cobalt task in primary mode</TD></TR> + * <TR><TD>primary-timed</TD> <TD>Requires a Cobalt task in primary mode if timed</TD></TR> + * <TR><TD>coreirq-only</TD> <TD>Must be called from a Cobalt IRQ handler</TD></TR> + * <TR><TD>secondary-only</TD> <TD>Must be called from a Cobalt task in secondary mode or regular Linux task</TD></TR> + * <TR><TD>rtdm-task</TD> <TD>Must be called from a RTDM driver task</TD></TR> + * <TR><TD>mode-unrestricted</TD> <TD>May be called from a Cobalt task in either primary or secondary mode</TD></TR> + * <TR><TD>task-unrestricted</TD> <TD>May be called from a Cobalt or regular Linux task indifferently</TD></TR> + * <TR><TD>unrestricted</TD> <TD>May be called from any context previously described</TD></TR> + * <TR><TD>atomic-entry</TD> <TD>Caller must currently hold the big Cobalt kernel lock (nklock)</TD></TR> + * </TABLE> + * + * @par + * <b>Possible side-effects</b> + * <TABLE> + * <TR><TH>Tag</TH> <TH>Description</TH></TR> + * <TR><TD>might-switch</TD> <TD>The Cobalt kernel may switch context</TD></TR> + * </TABLE> + * + * @} + */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile new file mode 100644 index 0000000..f2b877d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile @@ -0,0 +1,5 @@ +ccflags-y += -I$(srctree)/kernel + +obj-y += pipeline.o + +pipeline-y := init.o intr.o kevents.o tick.o syscall.o sched.o clock.o diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c new file mode 100644 index 0000000..1c04eed --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c @@ -0,0 +1,158 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + */ + +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/vdso.h> +#include <cobalt/kernel/arith.h> +#include <cobalt/kernel/timer.h> +#include <xenomai/posix/clock.h> +#include <pipeline/machine.h> + +static unsigned long long clockfreq; + +#ifdef XNARCH_HAVE_LLMULSHFT + +static unsigned int tsc_scale, tsc_shift; + +#ifdef XNARCH_HAVE_NODIV_LLIMD + +static struct xnarch_u32frac tsc_frac; + +long long xnclock_core_ns_to_ticks(long long ns) +{ + return xnarch_nodiv_llimd(ns, tsc_frac.frac, tsc_frac.integ); +} + +#else /* !XNARCH_HAVE_NODIV_LLIMD */ + +long long xnclock_core_ns_to_ticks(long long ns) +{ + return xnarch_llimd(ns, 1 << tsc_shift, tsc_scale); +} + +#endif /* !XNARCH_HAVE_NODIV_LLIMD */ + +xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks) +{ + return xnarch_llmulshft(ticks, tsc_scale, tsc_shift); +} + +xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks) +{ + unsigned int shift = tsc_shift - 1; + return (xnarch_llmulshft(ticks, tsc_scale, shift) + 1) / 2; +} + +#else /* !XNARCH_HAVE_LLMULSHFT */ + +xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks) +{ + return xnarch_llimd(ticks, 1000000000, clockfreq); +} + +xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks) +{ + return (xnarch_llimd(ticks, 1000000000, clockfreq/2) + 1) / 2; +} + +xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns) +{ + return xnarch_llimd(ns, clockfreq, 1000000000); +} + +#endif /* !XNARCH_HAVE_LLMULSHFT */ + +EXPORT_SYMBOL_GPL(xnclock_core_ticks_to_ns); +EXPORT_SYMBOL_GPL(xnclock_core_ticks_to_ns_rounded); +EXPORT_SYMBOL_GPL(xnclock_core_ns_to_ticks); + +int pipeline_get_host_time(struct timespec64 *tp) +{ +#ifdef CONFIG_IPIPE_HAVE_HOSTRT + struct xnvdso_hostrt_data *hostrt_data; + u64 now, base, mask, cycle_delta; + __u32 mult, shift; + unsigned long rem; + urwstate_t tmp; + __u64 nsec; + + hostrt_data = get_hostrt_data(); + BUG_ON(!hostrt_data); + + if (unlikely(!hostrt_data->live)) + return -1; + + /* + * Note: Disabling HW interrupts around writes to hostrt_data + * ensures that a reader (on the Xenomai side) cannot + * interrupt a writer (on the Linux kernel side) on the same + * CPU. The urw block is required when a reader is + * interleaved by a writer on a different CPU. This follows + * the approach from userland, where taking the spinlock is + * not possible. + */ + unsynced_read_block(&tmp, &hostrt_data->lock) { + now = xnclock_read_raw(&nkclock); + base = hostrt_data->cycle_last; + mask = hostrt_data->mask; + mult = hostrt_data->mult; + shift = hostrt_data->shift; + tp->tv_sec = hostrt_data->wall_sec; + nsec = hostrt_data->wall_nsec; + } + + /* + * At this point, we have a consistent copy of the fundamental + * data structure - calculate the interval between the current + * and base time stamp cycles, and convert the difference + * to nanoseconds. + */ + cycle_delta = (now - base) & mask; + nsec += (cycle_delta * mult) >> shift; + + /* Convert to the desired sec, usec representation */ + tp->tv_sec += xnclock_divrem_billion(nsec, &rem); + tp->tv_nsec = rem; + + return 0; +#else + return -EINVAL; +#endif +} + +xnticks_t pipeline_read_wallclock(void) +{ + return xnclock_read_monotonic(&nkclock) + xnclock_get_offset(&nkclock); +} +EXPORT_SYMBOL_GPL(pipeline_read_wallclock); + +int pipeline_set_wallclock(xnticks_t epoch_ns) +{ + xnclock_set_wallclock(epoch_ns); + + return 0; +} + +void pipeline_update_clock_freq(unsigned long long freq) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + clockfreq = freq; +#ifdef XNARCH_HAVE_LLMULSHFT + xnarch_init_llmulshft(1000000000, freq, &tsc_scale, &tsc_shift); +#ifdef XNARCH_HAVE_NODIV_LLIMD + xnarch_init_u32frac(&tsc_frac, 1 << tsc_shift, tsc_scale); +#endif +#endif + cobalt_pipeline.clock_freq = freq; + xnlock_put_irqrestore(&nklock, s); +} + +void pipeline_init_clock(void) +{ + pipeline_update_clock_freq(cobalt_pipeline.clock_freq); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c new file mode 100644 index 0000000..c199f00 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum <rpm@xenomai.org> + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <pipeline/machine.h> +#include <linux/ipipe_tickdev.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/clock.h> + +int __init pipeline_init(void) +{ + struct ipipe_sysinfo sysinfo; + int ret, virq; + + ret = ipipe_select_timers(&xnsched_realtime_cpus); + if (ret < 0) + return ret; + + ipipe_get_sysinfo(&sysinfo); + + cobalt_pipeline.clock_freq = sysinfo.sys_hrclock_freq; + + if (cobalt_machine.init) { + ret = cobalt_machine.init(); + if (ret) + return ret; + } + + ipipe_register_head(&xnsched_primary_domain, "Xenomai"); + + virq = ipipe_alloc_virq(); + if (virq == 0) + goto fail_escalate; + + cobalt_pipeline.escalate_virq = virq; + + ipipe_request_irq(&xnsched_primary_domain, + cobalt_pipeline.escalate_virq, + (ipipe_irq_handler_t)__xnsched_run_handler, + NULL, NULL); + + ret = xnclock_init(); + if (ret) + goto fail_clock; + + return 0; + +fail_clock: + ipipe_free_irq(&xnsched_primary_domain, + cobalt_pipeline.escalate_virq); + ipipe_free_virq(cobalt_pipeline.escalate_virq); +fail_escalate: + ipipe_unregister_head(&xnsched_primary_domain); + + if (cobalt_machine.cleanup) + cobalt_machine.cleanup(); + + return ret; +} + +int __init pipeline_late_init(void) +{ + if (cobalt_machine.late_init) + return cobalt_machine.late_init(); + + return 0; +} + +__init void pipeline_cleanup(void) +{ + ipipe_unregister_head(&xnsched_primary_domain); + ipipe_free_irq(&xnsched_primary_domain, + cobalt_pipeline.escalate_virq); + ipipe_free_virq(cobalt_pipeline.escalate_virq); + ipipe_timers_release(); + xnclock_cleanup(); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c new file mode 100644 index 0000000..cb15597 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c @@ -0,0 +1,1230 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2005,2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>. + * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. +*/ +#include <linux/mutex.h> +#include <linux/ipipe.h> +#include <linux/ipipe_tickdev.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/stat.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/assert.h> +#include <trace/events/cobalt-core.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_irq Interrupt management + * @{ + */ +#define XNINTR_MAX_UNHANDLED 1000 + +static DEFINE_MUTEX(intrlock); + +#ifdef CONFIG_XENO_OPT_STATS_IRQS +struct xnintr nktimer; /* Only for statistics */ +static int xnintr_count = 1; /* Number of attached xnintr objects + nktimer */ +static int xnintr_list_rev; /* Modification counter of xnintr list */ + +/* Both functions update xnintr_list_rev at the very end. + * This guarantees that module.c::stat_seq_open() won't get + * an up-to-date xnintr_list_rev and old xnintr_count. */ + +static inline void stat_counter_inc(void) +{ + xnintr_count++; + smp_mb(); + xnintr_list_rev++; +} + +static inline void stat_counter_dec(void) +{ + xnintr_count--; + smp_mb(); + xnintr_list_rev++; +} + +static inline void sync_stat_references(struct xnintr *intr) +{ + struct xnirqstat *statp; + struct xnsched *sched; + int cpu; + + for_each_realtime_cpu(cpu) { + sched = xnsched_struct(cpu); + statp = per_cpu_ptr(intr->stats, cpu); + /* Synchronize on all dangling references to go away. */ + while (sched->current_account == &statp->account) + cpu_relax(); + } +} + +static void clear_irqstats(struct xnintr *intr) +{ + struct xnirqstat *p; + int cpu; + + for_each_realtime_cpu(cpu) { + p = per_cpu_ptr(intr->stats, cpu); + memset(p, 0, sizeof(*p)); + } +} + +static inline void alloc_irqstats(struct xnintr *intr) +{ + intr->stats = alloc_percpu(struct xnirqstat); + clear_irqstats(intr); +} + +static inline void free_irqstats(struct xnintr *intr) +{ + free_percpu(intr->stats); +} + +static inline void query_irqstats(struct xnintr *intr, int cpu, + struct xnintr_iterator *iterator) +{ + struct xnirqstat *statp; + xnticks_t last_switch; + + statp = per_cpu_ptr(intr->stats, cpu); + iterator->hits = xnstat_counter_get(&statp->hits); + last_switch = xnsched_struct(cpu)->last_account_switch; + iterator->exectime_period = statp->account.total; + iterator->account_period = last_switch - statp->account.start; + statp->sum.total += iterator->exectime_period; + iterator->exectime_total = statp->sum.total; + statp->account.total = 0; + statp->account.start = last_switch; +} + +static void inc_irqstats(struct xnintr *intr, struct xnsched *sched, xnticks_t start) +{ + struct xnirqstat *statp; + + statp = raw_cpu_ptr(intr->stats); + xnstat_counter_inc(&statp->hits); + xnstat_exectime_lazy_switch(sched, &statp->account, start); +} + +static inline void switch_to_irqstats(struct xnintr *intr, + struct xnsched *sched) +{ + struct xnirqstat *statp; + + statp = raw_cpu_ptr(intr->stats); + xnstat_exectime_switch(sched, &statp->account); +} + +static inline void switch_from_irqstats(struct xnsched *sched, + xnstat_exectime_t *prev) +{ + xnstat_exectime_switch(sched, prev); +} + +static inline xnstat_exectime_t *switch_core_irqstats(struct xnsched *sched) +{ + struct xnirqstat *statp; + xnstat_exectime_t *prev; + + statp = raw_cpu_ptr(nktimer.stats); + prev = xnstat_exectime_switch(sched, &statp->account); + xnstat_counter_inc(&statp->hits); + + return prev; +} + +#else /* !CONFIG_XENO_OPT_STATS_IRQS */ + +static inline void stat_counter_inc(void) {} + +static inline void stat_counter_dec(void) {} + +static inline void sync_stat_references(struct xnintr *intr) {} + +static inline void alloc_irqstats(struct xnintr *intr) {} + +static inline void free_irqstats(struct xnintr *intr) {} + +static inline void clear_irqstats(struct xnintr *intr) {} + +static inline void query_irqstats(struct xnintr *intr, int cpu, + struct xnintr_iterator *iterator) {} + +static inline void inc_irqstats(struct xnintr *intr, struct xnsched *sched, xnticks_t start) {} + +static inline void switch_to_irqstats(struct xnintr *intr, + struct xnsched *sched) {} + +static inline void switch_from_irqstats(struct xnsched *sched, + xnstat_exectime_t *prev) {} + +static inline xnstat_exectime_t *switch_core_irqstats(struct xnsched *sched) +{ + return NULL; +} + +#endif /* !CONFIG_XENO_OPT_STATS_IRQS */ + +static void xnintr_irq_handler(unsigned int irq, void *cookie); + +void xnintr_host_tick(struct xnsched *sched) /* Interrupts off. */ +{ + sched->lflags &= ~XNHTICK; +#ifdef XNARCH_HOST_TICK_IRQ + ipipe_post_irq_root(XNARCH_HOST_TICK_IRQ); +#endif +} + +/* + * Low-level core clock irq handler. This one forwards ticks from the + * Xenomai platform timer to nkclock exclusively. + */ +void xnintr_core_clock_handler(void) +{ + struct xnsched *sched = xnsched_current(); + int cpu __maybe_unused = xnsched_cpu(sched); + xnstat_exectime_t *prev; + + if (!xnsched_supported_cpu(cpu)) { +#ifdef XNARCH_HOST_TICK_IRQ + ipipe_post_irq_root(XNARCH_HOST_TICK_IRQ); +#endif + return; + } + + prev = switch_core_irqstats(sched); + + trace_cobalt_clock_entry(per_cpu(ipipe_percpu.hrtimer_irq, cpu)); + + ++sched->inesting; + sched->lflags |= XNINIRQ; + + xnlock_get(&nklock); + xnclock_tick(&nkclock); + xnlock_put(&nklock); + + trace_cobalt_clock_exit(per_cpu(ipipe_percpu.hrtimer_irq, cpu)); + switch_from_irqstats(sched, prev); + + if (--sched->inesting == 0) { + sched->lflags &= ~XNINIRQ; + xnsched_run(); + sched = xnsched_current(); + } + /* + * If the core clock interrupt preempted a real-time thread, + * any transition to the root thread has already triggered a + * host tick propagation from xnsched_run(), so at this point, + * we only need to propagate the host tick in case the + * interrupt preempted the root thread. + */ + if ((sched->lflags & XNHTICK) && + xnthread_test_state(sched->curr, XNROOT)) + xnintr_host_tick(sched); +} + +struct irqdisable_work { + struct ipipe_work_header work; /* Must be first. */ + int irq; +}; + +static void lostage_irqdisable_line(struct ipipe_work_header *work) +{ + struct irqdisable_work *rq; + + rq = container_of(work, struct irqdisable_work, work); + ipipe_disable_irq(rq->irq); +} + +static void disable_irq_line(int irq) +{ + struct irqdisable_work diswork = { + .work = { + .size = sizeof(diswork), + .handler = lostage_irqdisable_line, + }, + .irq = irq, + }; + + ipipe_post_work_root(&diswork, work); +} + +/* Optional support for shared interrupts. */ + +#ifdef CONFIG_XENO_OPT_SHIRQ + +struct xnintr_vector { + DECLARE_XNLOCK(lock); + struct xnintr *handlers; + int unhandled; +} ____cacheline_aligned_in_smp; + +static struct xnintr_vector vectors[IPIPE_NR_IRQS]; + +static inline struct xnintr *xnintr_vec_first(unsigned int irq) +{ + return vectors[irq].handlers; +} + +static inline struct xnintr *xnintr_vec_next(struct xnintr *prev) +{ + return prev->next; +} + +static void disable_shared_irq_line(struct xnintr_vector *vec) +{ + int irq = vec - vectors; + struct xnintr *intr; + + xnlock_get(&vec->lock); + intr = vec->handlers; + while (intr) { + set_bit(XN_IRQSTAT_DISABLED, &intr->status); + intr = intr->next; + } + xnlock_put(&vec->lock); + disable_irq_line(irq); +} + +/* + * Low-level interrupt handler dispatching the user-defined ISRs for + * shared interrupts -- Called with interrupts off. + */ +static void xnintr_vec_handler(unsigned int irq, void *cookie) +{ + struct xnsched *sched = xnsched_current(); + struct xnintr_vector *vec = vectors + irq; + xnstat_exectime_t *prev; + struct xnintr *intr; + xnticks_t start; + int s = 0, ret; + + prev = xnstat_exectime_get_current(sched); + start = xnstat_exectime_now(); + trace_cobalt_irq_entry(irq); + + ++sched->inesting; + sched->lflags |= XNINIRQ; + + xnlock_get(&vec->lock); + intr = vec->handlers; + if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) { + /* irqdisable_work is on its way, ignore. */ + xnlock_put(&vec->lock); + goto out; + } + + while (intr) { + /* + * NOTE: We assume that no CPU migration can occur + * while running the interrupt service routine. + */ + ret = intr->isr(intr); + XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0); + s |= ret; + if (ret & XN_IRQ_HANDLED) { + inc_irqstats(intr, sched, start); + start = xnstat_exectime_now(); + } + intr = intr->next; + } + + xnlock_put(&vec->lock); + + if (unlikely(!(s & XN_IRQ_HANDLED))) { + if (++vec->unhandled == XNINTR_MAX_UNHANDLED) { + printk(XENO_ERR "%s: IRQ%d not handled. Disabling IRQ line\n", + __FUNCTION__, irq); + s |= XN_IRQ_DISABLE; + } + } else + vec->unhandled = 0; + + if (s & XN_IRQ_PROPAGATE) + ipipe_post_irq_root(irq); + else if (s & XN_IRQ_DISABLE) + disable_shared_irq_line(vec); + else + ipipe_end_irq(irq); +out: + switch_from_irqstats(sched, prev); + + trace_cobalt_irq_exit(irq); + + if (--sched->inesting == 0) { + sched->lflags &= ~XNINIRQ; + xnsched_run(); + } +} + +/* + * Low-level interrupt handler dispatching the user-defined ISRs for + * shared edge-triggered interrupts -- Called with interrupts off. + */ +static void xnintr_edge_vec_handler(unsigned int irq, void *cookie) +{ + const int MAX_EDGEIRQ_COUNTER = 128; + struct xnsched *sched = xnsched_current(); + struct xnintr_vector *vec = vectors + irq; + struct xnintr *intr, *end = NULL; + int s = 0, counter = 0, ret; + xnstat_exectime_t *prev; + xnticks_t start; + + prev = xnstat_exectime_get_current(sched); + start = xnstat_exectime_now(); + trace_cobalt_irq_entry(irq); + + ++sched->inesting; + sched->lflags |= XNINIRQ; + + xnlock_get(&vec->lock); + intr = vec->handlers; + if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) { + /* irqdisable_work is on its way, ignore. */ + xnlock_put(&vec->lock); + goto out; + } + + while (intr != end) { + switch_to_irqstats(intr, sched); + /* + * NOTE: We assume that no CPU migration will occur + * while running the interrupt service routine. + */ + ret = intr->isr(intr); + XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0); + s |= ret; + + if (ret & XN_IRQ_HANDLED) { + end = NULL; + inc_irqstats(intr, sched, start); + start = xnstat_exectime_now(); + } else if (end == NULL) + end = intr; + + if (counter++ > MAX_EDGEIRQ_COUNTER) + break; + + intr = intr->next; + if (intr == NULL) + intr = vec->handlers; + } + + xnlock_put(&vec->lock); + + if (counter > MAX_EDGEIRQ_COUNTER) + printk(XENO_ERR "%s: failed to get the IRQ%d line free\n", + __FUNCTION__, irq); + + if (unlikely(!(s & XN_IRQ_HANDLED))) { + if (++vec->unhandled == XNINTR_MAX_UNHANDLED) { + printk(XENO_ERR "%s: IRQ%d not handled. Disabling IRQ line\n", + __FUNCTION__, irq); + s |= XN_IRQ_DISABLE; + } + } else + vec->unhandled = 0; + + if (s & XN_IRQ_PROPAGATE) + ipipe_post_irq_root(irq); + else if (s & XN_IRQ_DISABLE) + disable_shared_irq_line(vec); + else + ipipe_end_irq(irq); +out: + switch_from_irqstats(sched, prev); + + trace_cobalt_irq_exit(irq); + + if (--sched->inesting == 0) { + sched->lflags &= ~XNINIRQ; + xnsched_run(); + } +} + +static inline bool cobalt_owns_irq(int irq) +{ + ipipe_irq_handler_t h; + + h = __ipipe_irq_handler(&xnsched_primary_domain, irq); + + return h == xnintr_vec_handler || + h == xnintr_edge_vec_handler || + h == xnintr_irq_handler; +} + +static inline int xnintr_irq_attach(struct xnintr *intr) +{ + struct xnintr_vector *vec = vectors + intr->irq; + struct xnintr *prev, **p = &vec->handlers; + int ret; + + prev = *p; + if (prev) { + /* Check on whether the shared mode is allowed. */ + if ((prev->flags & intr->flags & XN_IRQTYPE_SHARED) == 0 || + (prev->iack != intr->iack) + || ((prev->flags & XN_IRQTYPE_EDGE) != + (intr->flags & XN_IRQTYPE_EDGE))) + return -EBUSY; + + /* + * Get a position at the end of the list to insert the + * new element. + */ + while (prev) { + p = &prev->next; + prev = *p; + } + } else { + /* Initialize the corresponding interrupt channel */ + void (*handler) (unsigned, void *) = xnintr_irq_handler; + + if (intr->flags & XN_IRQTYPE_SHARED) { + if (intr->flags & XN_IRQTYPE_EDGE) + handler = xnintr_edge_vec_handler; + else + handler = xnintr_vec_handler; + + } + vec->unhandled = 0; + + ret = ipipe_request_irq(&xnsched_primary_domain, + intr->irq, handler, intr, + (ipipe_irq_ackfn_t)intr->iack); + if (ret) + return ret; + } + + intr->next = NULL; + /* + * Add the given interrupt object. No need to synchronise with + * the IRQ handler, we are only extending the chain. + */ + *p = intr; + + return 0; +} + +static inline void xnintr_irq_detach(struct xnintr *intr) +{ + struct xnintr_vector *vec = vectors + intr->irq; + struct xnintr *e, **p = &vec->handlers; + + while ((e = *p) != NULL) { + if (e == intr) { + /* Remove the given interrupt object from the list. */ + xnlock_get(&vec->lock); + *p = e->next; + xnlock_put(&vec->lock); + + sync_stat_references(intr); + + /* Release the IRQ line if this was the last user */ + if (vec->handlers == NULL) + ipipe_free_irq(&xnsched_primary_domain, intr->irq); + + return; + } + p = &e->next; + } + + printk(XENO_ERR "attempted to detach an unregistered interrupt descriptor\n"); +} + +#else /* !CONFIG_XENO_OPT_SHIRQ */ + +struct xnintr_vector { +#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING) + DECLARE_XNLOCK(lock); +#endif /* CONFIG_SMP || XENO_DEBUG(LOCKING) */ +} ____cacheline_aligned_in_smp; + +static struct xnintr_vector vectors[IPIPE_NR_IRQS]; + +static inline bool cobalt_owns_irq(int irq) +{ + ipipe_irq_handler_t h; + + h = __ipipe_irq_handler(&xnsched_primary_domain, irq); + + return h == xnintr_irq_handler; +} + +static inline struct xnintr *xnintr_vec_first(unsigned int irq) +{ + return cobalt_owns_irq(irq) ? + __ipipe_irq_cookie(&xnsched_primary_domain, irq) : NULL; +} + +static inline struct xnintr *xnintr_vec_next(struct xnintr *prev) +{ + return NULL; +} + +static inline int xnintr_irq_attach(struct xnintr *intr) +{ + return ipipe_request_irq(&xnsched_primary_domain, + intr->irq, xnintr_irq_handler, intr, + (ipipe_irq_ackfn_t)intr->iack); +} + +static inline void xnintr_irq_detach(struct xnintr *intr) +{ + int irq = intr->irq; + + xnlock_get(&vectors[irq].lock); + ipipe_free_irq(&xnsched_primary_domain, irq); + xnlock_put(&vectors[irq].lock); + + sync_stat_references(intr); +} + +#endif /* !CONFIG_XENO_OPT_SHIRQ */ + +/* + * Low-level interrupt handler dispatching non-shared ISRs -- Called + * with interrupts off. + */ +static void xnintr_irq_handler(unsigned int irq, void *cookie) +{ + struct xnintr_vector __maybe_unused *vec = vectors + irq; + struct xnsched *sched = xnsched_current(); + xnstat_exectime_t *prev; + struct xnintr *intr; + xnticks_t start; + int s = 0; + + prev = xnstat_exectime_get_current(sched); + start = xnstat_exectime_now(); + trace_cobalt_irq_entry(irq); + + ++sched->inesting; + sched->lflags |= XNINIRQ; + + xnlock_get(&vec->lock); + +#ifdef CONFIG_SMP + /* + * In SMP case, we have to reload the cookie under the per-IRQ + * lock to avoid racing with xnintr_detach. However, we + * assume that no CPU migration will occur while running the + * interrupt service routine, so the scheduler pointer will + * remain valid throughout this function. + */ + intr = __ipipe_irq_cookie(&xnsched_primary_domain, irq); + if (unlikely(intr == NULL)) + goto done; +#else + intr = cookie; +#endif + if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) { + /* irqdisable_work is on its way, ignore. */ + xnlock_put(&vec->lock); + goto out; + } + + s = intr->isr(intr); + XENO_WARN_ON_ONCE(USER, (s & XN_IRQ_STATMASK) == 0); + if (unlikely(!(s & XN_IRQ_HANDLED))) { + if (++intr->unhandled == XNINTR_MAX_UNHANDLED) { + printk(XENO_ERR "%s: IRQ%d not handled. Disabling IRQ line\n", + __FUNCTION__, irq); + s |= XN_IRQ_DISABLE; + } + } else { + inc_irqstats(intr, sched, start); + intr->unhandled = 0; + } + + if (s & XN_IRQ_DISABLE) + set_bit(XN_IRQSTAT_DISABLED, &intr->status); +#ifdef CONFIG_SMP +done: +#endif + xnlock_put(&vec->lock); + + if (s & XN_IRQ_DISABLE) + disable_irq_line(irq); + else if (s & XN_IRQ_PROPAGATE) + ipipe_post_irq_root(irq); + else + ipipe_end_irq(irq); +out: + switch_from_irqstats(sched, prev); + + trace_cobalt_irq_exit(irq); + + if (--sched->inesting == 0) { + sched->lflags &= ~XNINIRQ; + xnsched_run(); + } +} + +int __init xnintr_mount(void) +{ + int i; + for (i = 0; i < IPIPE_NR_IRQS; ++i) + xnlock_init(&vectors[i].lock); + return 0; +} + +/** + * @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned int irq,xnisr_t isr,xniack_t iack,int flags) + * @brief Initialize an interrupt descriptor. + * + * When an interrupt occurs on the given @a irq line, the interrupt + * service routine @a isr is fired in order to deal with the hardware + * event. The interrupt handler may call any non-blocking service from + * the Cobalt core. + * + * Upon receipt of an IRQ, the interrupt handler @a isr is immediately + * called on behalf of the interrupted stack context, the rescheduling + * procedure is locked, and the interrupt line is masked in the system + * interrupt controller chip. Upon return, the status of the + * interrupt handler is checked for the following bits: + * + * - XN_IRQ_HANDLED indicates that the interrupt request was + * successfully handled. + * + * - XN_IRQ_NONE indicates the opposite to XN_IRQ_HANDLED, meaning + * that no interrupt source could be identified for the ongoing + * request by the handler. + * + * In addition, one of the following bits may be present in the + * status: + * + * - XN_IRQ_DISABLE tells the Cobalt core to disable the interrupt + * line before returning from the interrupt context. + * + * - XN_IRQ_PROPAGATE propagates the IRQ event down the interrupt + * pipeline to Linux. Using this flag is strongly discouraged, unless + * you fully understand the implications of such propagation. + * + * @warning The handler should not use these bits if it shares the + * interrupt line with other handlers in the real-time domain. When + * any of these bits is detected, the interrupt line is left masked. + * + * A count of interrupt receipts is tracked into the interrupt + * descriptor, and reset to zero each time such descriptor is + * attached. Since this count could wrap around, it should be used as + * an indication of interrupt activity only. + * + * @param intr The address of a descriptor the Cobalt core will use to + * store the interrupt-specific data. + * + * @param name An ASCII string standing for the symbolic name of the + * interrupt or NULL. + * + * @param irq The IRQ line number associated with the interrupt + * descriptor. This value is architecture-dependent. An interrupt + * descriptor must be attached to the system by a call to + * xnintr_attach() before @a irq events can be received. + * + * @param isr The address of an interrupt handler, which is passed the + * address of the interrupt descriptor receiving the IRQ. + * + * @param iack The address of an optional interrupt acknowledge + * routine, aimed at replacing the default one. Only very specific + * situations actually require to override the default setting for + * this parameter, like having to acknowledge non-standard PIC + * hardware. @a iack should return a non-zero value to indicate that + * the interrupt has been properly acknowledged. If @a iack is NULL, + * the default routine will be used instead. + * + * @param flags A set of creation flags affecting the operation. The + * valid flags are: + * + * - XN_IRQTYPE_SHARED enables IRQ-sharing with other interrupt + * objects. + * + * - XN_IRQTYPE_EDGE is an additional flag need to be set together + * with XN_IRQTYPE_SHARED to enable IRQ-sharing of edge-triggered + * interrupts. + * + * @return 0 is returned on success. Otherwise, -EINVAL is returned if + * @a irq is not a valid interrupt number. + * + * @coretags{secondary-only} + */ +int xnintr_init(struct xnintr *intr, const char *name, + unsigned int irq, xnisr_t isr, xniack_t iack, + int flags) +{ + secondary_mode_only(); + + if (irq >= IPIPE_NR_IRQS) + return -EINVAL; + + intr->irq = irq; + intr->isr = isr; + intr->iack = iack; + intr->cookie = NULL; + intr->name = name ? : "<unknown>"; + intr->flags = flags; + intr->status = _XN_IRQSTAT_DISABLED; + intr->unhandled = 0; + raw_spin_lock_init(&intr->lock); +#ifdef CONFIG_XENO_OPT_SHIRQ + intr->next = NULL; +#endif + alloc_irqstats(intr); + + return 0; +} +EXPORT_SYMBOL_GPL(xnintr_init); + +/** + * @fn void xnintr_destroy(struct xnintr *intr) + * @brief Destroy an interrupt descriptor. + * + * Destroys an interrupt descriptor previously initialized by + * xnintr_init(). The descriptor is automatically detached by a call + * to xnintr_detach(). No more IRQs will be received through this + * descriptor after this service has returned. + * + * @param intr The address of the interrupt descriptor to destroy. + * + * @coretags{secondary-only} + */ +void xnintr_destroy(struct xnintr *intr) +{ + secondary_mode_only(); + xnintr_detach(intr); + free_irqstats(intr); +} +EXPORT_SYMBOL_GPL(xnintr_destroy); + +/** + * @fn int xnintr_attach(struct xnintr *intr, void *cookie) + * @brief Attach an interrupt descriptor. + * + * Attach an interrupt descriptor previously initialized by + * xnintr_init(). This operation registers the descriptor at the + * interrupt pipeline, but does not enable the interrupt line yet. A + * call to xnintr_enable() is required to start receiving IRQs from + * the interrupt line associated to the descriptor. + * + * @param intr The address of the interrupt descriptor to attach. + * + * @param cookie A user-defined opaque value which is stored into the + * descriptor for further retrieval by the interrupt handler. + * + * @param cpumask Initial CPU affinity of the interrupt. If NULL, affinity is + * set to all real-time CPUs. + * + * @return 0 is returned on success. Otherwise: + * + * - -EINVAL is returned if an error occurred while attaching the + * descriptor. + * + * - -EBUSY is returned if the descriptor was already attached. + * + * @note The caller <b>must not</b> hold nklock when invoking this service, + * this would cause deadlocks. + * + * @coretags{secondary-only} + * + * @note Attaching an interrupt descriptor resets the tracked number + * of IRQ receipts to zero. + */ +int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask) +{ +#ifdef CONFIG_SMP + cpumask_t tmp_mask, *effective_mask; +#endif + int ret; + + secondary_mode_only(); + trace_cobalt_irq_attach(intr->irq); + + intr->cookie = cookie; + clear_irqstats(intr); + +#ifdef CONFIG_SMP + if (!cpumask) { + effective_mask = &xnsched_realtime_cpus; + } else { + effective_mask = &tmp_mask; + cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask); + if (cpumask_empty(effective_mask)) + return -EINVAL; + } + ret = ipipe_set_irq_affinity(intr->irq, *effective_mask); + if (ret < 0) + return ret; +#endif /* CONFIG_SMP */ + + raw_spin_lock(&intr->lock); + + if (test_and_set_bit(XN_IRQSTAT_ATTACHED, &intr->status)) { + ret = -EBUSY; + goto out; + } + + ret = xnintr_irq_attach(intr); + if (ret) { + clear_bit(XN_IRQSTAT_ATTACHED, &intr->status); + goto out; + } + + stat_counter_inc(); +out: + raw_spin_unlock(&intr->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(xnintr_attach); + +/** + * @fn int xnintr_detach(struct xnintr *intr) + * @brief Detach an interrupt descriptor. + * + * This call unregisters an interrupt descriptor previously attached + * by xnintr_attach() from the interrupt pipeline. Once detached, the + * associated interrupt line is disabled, but the descriptor remains + * valid. The descriptor can be attached anew by a call to + * xnintr_attach(). + * + * @param intr The address of the interrupt descriptor to detach. + * + * @note The caller <b>must not</b> hold nklock when invoking this + * service, this would cause deadlocks. + * + * @coretags{secondary-only} + */ +void xnintr_detach(struct xnintr *intr) +{ + secondary_mode_only(); + trace_cobalt_irq_detach(intr->irq); + + raw_spin_lock(&intr->lock); + + if (test_and_clear_bit(XN_IRQSTAT_ATTACHED, &intr->status)) { + xnintr_irq_detach(intr); + stat_counter_dec(); + } + + raw_spin_unlock(&intr->lock); +} +EXPORT_SYMBOL_GPL(xnintr_detach); + +/** + * @fn void xnintr_enable(struct xnintr *intr) + * @brief Enable an interrupt line. + * + * Enables the interrupt line associated with an interrupt descriptor. + * + * @param intr The address of the interrupt descriptor. + * + * @coretags{secondary-only} + */ +void xnintr_enable(struct xnintr *intr) +{ + unsigned long flags; + + secondary_mode_only(); + trace_cobalt_irq_enable(intr->irq); + + raw_spin_lock_irqsave(&intr->lock, flags); + + /* + * If disabled on entry, there is no way we could race with + * disable_irq_line(). + */ + if (test_and_clear_bit(XN_IRQSTAT_DISABLED, &intr->status)) + ipipe_enable_irq(intr->irq); + + raw_spin_unlock_irqrestore(&intr->lock, flags); +} +EXPORT_SYMBOL_GPL(xnintr_enable); + +/** + * @fn void xnintr_disable(struct xnintr *intr) + * @brief Disable an interrupt line. + * + * Disables the interrupt line associated with an interrupt + * descriptor. + * + * @param intr The address of the interrupt descriptor. + * + * @coretags{secondary-only} + */ +void xnintr_disable(struct xnintr *intr) +{ + unsigned long flags; + + secondary_mode_only(); + trace_cobalt_irq_disable(intr->irq); + + /* We only need a virtual masking. */ + raw_spin_lock_irqsave(&intr->lock, flags); + + /* + * Racing with disable_irq_line() is innocuous, the pipeline + * would serialize calls to ipipe_disable_irq() across CPUs, + * and the descriptor status would still properly match the + * line status in the end. + */ + if (!test_and_set_bit(XN_IRQSTAT_DISABLED, &intr->status)) + ipipe_disable_irq(intr->irq); + + raw_spin_unlock_irqrestore(&intr->lock, flags); +} +EXPORT_SYMBOL_GPL(xnintr_disable); + +/** + * @fn void xnintr_affinity(struct xnintr *intr, cpumask_t cpumask) + * @brief Set processor affinity of interrupt. + * + * Restricts the IRQ line associated with the interrupt descriptor @a + * intr to be received only on processors which bits are set in @a + * cpumask. + * + * @param intr The address of the interrupt descriptor. + * + * @param cpumask The new processor affinity. + * + * @note Depending on architectures, setting more than one bit in @a + * cpumask could be meaningless. + * + * @coretags{secondary-only} + */ +int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask) +{ +#ifdef CONFIG_SMP + cpumask_t effective_mask; + + secondary_mode_only(); + + cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask); + if (cpumask_empty(&effective_mask)) + return -EINVAL; + + return ipipe_set_irq_affinity(intr->irq, effective_mask); +#else + secondary_mode_only(); + return 0; +#endif +} +EXPORT_SYMBOL_GPL(xnintr_affinity); + +static inline int xnintr_is_timer_irq(int irq) +{ + int cpu; + + for_each_realtime_cpu(cpu) + if (irq == per_cpu(ipipe_percpu.hrtimer_irq, cpu)) + return 1; + + return 0; +} + +#ifdef CONFIG_XENO_OPT_STATS_IRQS + +int xnintr_get_query_lock(void) +{ + return mutex_lock_interruptible(&intrlock) ? -ERESTARTSYS : 0; +} + +void xnintr_put_query_lock(void) +{ + mutex_unlock(&intrlock); +} + +int xnintr_query_init(struct xnintr_iterator *iterator) +{ + iterator->prev = NULL; + + /* The order is important here: first xnintr_list_rev then + * xnintr_count. On the other hand, xnintr_attach/detach() + * update xnintr_count first and then xnintr_list_rev. This + * should guarantee that we can't get an up-to-date + * xnintr_list_rev and old xnintr_count here. The other way + * around is not a problem as xnintr_query() will notice this + * fact later. Should xnintr_list_rev change later, + * xnintr_query() will trigger an appropriate error below. + */ + iterator->list_rev = xnintr_list_rev; + smp_mb(); + + return xnintr_count; +} + +int xnintr_query_next(int irq, struct xnintr_iterator *iterator, + char *name_buf) +{ + int cpu, nr_cpus = num_present_cpus(); + struct xnintr *intr; + + if (iterator->list_rev != xnintr_list_rev) + return -EAGAIN; + + intr = iterator->prev; + if (intr == NULL) { + if (xnintr_is_timer_irq(irq)) + intr = &nktimer; + else + intr = xnintr_vec_first(irq); + if (intr == NULL) + return -ENODEV; + iterator->prev = intr; + iterator->cpu = -1; + } + + for (;;) { + for (cpu = iterator->cpu + 1; cpu < nr_cpus; ++cpu) { + if (cpu_online(cpu)) { + ksformat(name_buf, XNOBJECT_NAME_LEN, "IRQ%d: %s", + irq, intr->name); + query_irqstats(intr, cpu, iterator); + iterator->cpu = cpu; + return 0; + } + } + + iterator->prev = xnintr_vec_next(intr); + if (iterator->prev == NULL) + return -ENODEV; + + iterator->cpu = -1; + } +} + +#endif /* CONFIG_XENO_OPT_STATS_IRQS */ + +#ifdef CONFIG_XENO_OPT_VFILE + +#include <cobalt/kernel/vfile.h> + +static inline int format_irq_proc(unsigned int irq, + struct xnvfile_regular_iterator *it) +{ + struct xnintr *intr; + struct irq_desc *d; + int cpu; + + for_each_realtime_cpu(cpu) + if (xnintr_is_timer_irq(irq)) { + xnvfile_printf(it, " [timer/%d]", cpu); + return 0; + } + +#ifdef CONFIG_SMP + /* + * IPI numbers on ARM are not compile time constants, so do + * not use switch/case here. + */ + if (irq == IPIPE_HRTIMER_IPI) { + xnvfile_puts(it, " [timer-ipi]"); + return 0; + } + if (irq == IPIPE_RESCHEDULE_IPI) { + xnvfile_puts(it, " [reschedule]"); + return 0; + } + if (irq == IPIPE_CRITICAL_IPI) { + xnvfile_puts(it, " [sync]"); + return 0; + } +#endif /* CONFIG_SMP */ + if (ipipe_virtual_irq_p(irq)) { + xnvfile_puts(it, " [virtual]"); + return 0; + } + + mutex_lock(&intrlock); + + if (!cobalt_owns_irq(irq)) { + xnvfile_puts(it, " "); + d = irq_to_desc(irq); + xnvfile_puts(it, d && d->name ? d->name : "-"); + } else { + intr = xnintr_vec_first(irq); + if (intr) { + xnvfile_puts(it, " "); + + do { + xnvfile_putc(it, ' '); + xnvfile_puts(it, intr->name); + intr = xnintr_vec_next(intr); + } while (intr); + } + } + + mutex_unlock(&intrlock); + + return 0; +} + +static int irq_vfile_show(struct xnvfile_regular_iterator *it, + void *data) +{ + int cpu, irq; + + /* FIXME: We assume the entire output fits in a single page. */ + + xnvfile_puts(it, " IRQ "); + + for_each_realtime_cpu(cpu) + xnvfile_printf(it, " CPU%d", cpu); + + for (irq = 0; irq < IPIPE_NR_IRQS; irq++) { + if (__ipipe_irq_handler(&xnsched_primary_domain, irq) == NULL) + continue; + + xnvfile_printf(it, "\n%5d:", irq); + + for_each_realtime_cpu(cpu) { + xnvfile_printf(it, "%12lu", + __ipipe_cpudata_irq_hits(&xnsched_primary_domain, cpu, + irq)); + } + + format_irq_proc(irq, it); + } + + xnvfile_putc(it, '\n'); + + return 0; +} + +static struct xnvfile_regular_ops irq_vfile_ops = { + .show = irq_vfile_show, +}; + +static struct xnvfile_regular irq_vfile = { + .ops = &irq_vfile_ops, +}; + +void xnintr_init_proc(void) +{ + xnvfile_init_regular("irq", &irq_vfile, &cobalt_vfroot); +} + +void xnintr_cleanup_proc(void) +{ + xnvfile_destroy_regular(&irq_vfile); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c new file mode 100644 index 0000000..43cc192 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c @@ -0,0 +1,541 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org> + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org> + * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org> + */ + +#include <linux/ipipe.h> +#include <linux/ipipe_tickdev.h> +#include <linux/ptrace.h> +#include <linux/kallsyms.h> +#include <pipeline/kevents.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/vdso.h> +#include <rtdm/driver.h> +#include <trace/events/cobalt-core.h> +#include "../posix/process.h" +#include "../posix/thread.h" +#include "../posix/memory.h" + +static inline int handle_exception(struct ipipe_trap_data *d) +{ + struct xnthread *thread; + struct xnsched *sched; + + sched = xnsched_current(); + thread = sched->curr; + + trace_cobalt_thread_fault(xnarch_fault_pc(d), + xnarch_fault_trap(d)); + + if (xnthread_test_state(thread, XNROOT)) + return 0; + + if (xnarch_fault_fpu_p(d)) { +#ifdef CONFIG_XENO_ARCH_FPU + spl_t s; + + /* FPU exception received in primary mode. */ + splhigh(s); + if (xnarch_handle_fpu_fault(sched->fpuholder, thread, d)) { + sched->fpuholder = thread; + splexit(s); + return 1; + } + splexit(s); +#endif /* CONFIG_XENO_ARCH_FPU */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0) + printk("invalid use of FPU in Xenomai context at %pS\n", + (void *)xnarch_fault_pc(d)); +#else + print_symbol("invalid use of FPU in Xenomai context at %s\n", + xnarch_fault_pc(d)); +#endif + } + + if (xnarch_fault_bp_p(d) && user_mode(d->regs)) { + spl_t s; + + XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX)); + xnlock_get_irqsave(&nklock, s); + xnthread_set_info(thread, XNCONTHI); + ipipe_enable_user_intret_notifier(); + cobalt_stop_debugged_process(thread); + xnlock_put_irqrestore(&nklock, s); + xnsched_run(); + } + + /* + * If we experienced a trap on behalf of a shadow thread + * running in primary mode, move it to the Linux domain, + * leaving the kernel process the exception. + */ +#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER) + if (!user_mode(d->regs)) { + xntrace_panic_freeze(); + printk(XENO_WARNING + "switching %s to secondary mode after exception #%u in " + "kernel-space at 0x%lx (pid %d)\n", thread->name, + xnarch_fault_trap(d), + xnarch_fault_pc(d), + xnthread_host_pid(thread)); + xntrace_panic_dump(); + } else if (xnarch_fault_notify(d)) /* Don't report debug traps */ + printk(XENO_WARNING + "switching %s to secondary mode after exception #%u from " + "user-space at 0x%lx (pid %d)\n", thread->name, + xnarch_fault_trap(d), + xnarch_fault_pc(d), + xnthread_host_pid(thread)); +#endif + + if (xnarch_fault_pf_p(d)) + /* + * The page fault counter is not SMP-safe, but it's a + * simple indicator that something went wrong wrt + * memory locking anyway. + */ + xnstat_counter_inc(&thread->stat.pf); + + xnthread_relax(xnarch_fault_notify(d), SIGDEBUG_MIGRATE_FAULT); + + return 0; +} + +static int handle_mayday_event(struct pt_regs *regs) +{ + XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER)); + + xnthread_relax(0, 0); + + return KEVENT_PROPAGATE; +} + +int ipipe_trap_hook(struct ipipe_trap_data *data) +{ + if (data->exception == IPIPE_TRAP_MAYDAY) + return handle_mayday_event(data->regs); + + /* + * No migration is possible on behalf of the head domain, so + * the following access is safe. + */ + raw_cpu_ptr(&cobalt_machine_cpudata)->faults[data->exception]++; + + if (handle_exception(data)) + return KEVENT_STOP; + + /* + * CAUTION: access faults must be propagated downstream + * whichever domain caused them, so that we don't spuriously + * raise a fatal error when some Linux fixup code is available + * to recover from the fault. + */ + return KEVENT_PROPAGATE; +} + +/* + * Legacy idle hook, unconditionally allow entering the idle state. + */ +bool ipipe_enter_idle_hook(void) +{ + return true; +} + +static inline int handle_setaffinity_event(struct ipipe_cpu_migration_data *d) +{ + return cobalt_handle_setaffinity_event(d->task); +} + +static inline int handle_taskexit_event(struct task_struct *p) +{ + return cobalt_handle_taskexit_event(p); +} + +void ipipe_migration_hook(struct task_struct *p) /* hw IRQs off */ +{ + struct xnthread *thread = xnthread_from_task(p); + + xnlock_get(&nklock); + + /* + * We fire the handler before the thread is migrated, so that + * thread->sched does not change between paired invocations of + * relax_thread/harden_thread handlers. + */ + xnthread_run_handler_stack(thread, harden_thread); + + cobalt_adjust_affinity(p); + + xnthread_resume(thread, XNRELAX); + + /* + * In case we migrated independently of the user return notifier, clear + * XNCONTHI here and also disable the notifier - we are already done. + */ + if (unlikely(xnthread_test_info(thread, XNCONTHI))) { + xnthread_clear_info(thread, XNCONTHI); + ipipe_disable_user_intret_notifier(); + } + + /* Unregister as debugged thread in case we postponed this. */ + if (unlikely(xnthread_test_state(thread, XNSSTEP))) + cobalt_unregister_debugged_thread(thread); + + xnlock_put(&nklock); + + xnsched_run(); +} + +#ifdef CONFIG_IPIPE_HAVE_HOSTRT + +static IPIPE_DEFINE_SPINLOCK(__hostrtlock); + +static int handle_hostrt_event(struct ipipe_hostrt_data *hostrt) +{ + unsigned long flags; + urwstate_t tmp; + + /* + * The locking strategy is twofold: + * - The spinlock protects against concurrent updates from within the + * Linux kernel and against preemption by Xenomai + * - The unsynced R/W block is for lockless read-only access. + */ + raw_spin_lock_irqsave(&__hostrtlock, flags); + + unsynced_write_block(&tmp, &nkvdso->hostrt_data.lock) { + nkvdso->hostrt_data.live = 1; + nkvdso->hostrt_data.cycle_last = hostrt->cycle_last; + nkvdso->hostrt_data.mask = hostrt->mask; + nkvdso->hostrt_data.mult = hostrt->mult; + nkvdso->hostrt_data.shift = hostrt->shift; + nkvdso->hostrt_data.wall_sec = hostrt->wall_time_sec; + nkvdso->hostrt_data.wall_nsec = hostrt->wall_time_nsec; + nkvdso->hostrt_data.wtom_sec = hostrt->wall_to_monotonic.tv_sec; + nkvdso->hostrt_data.wtom_nsec = hostrt->wall_to_monotonic.tv_nsec; + } + + raw_spin_unlock_irqrestore(&__hostrtlock, flags); + + return KEVENT_PROPAGATE; +} + +static inline void init_hostrt(void) +{ + unsynced_rw_init(&nkvdso->hostrt_data.lock); + nkvdso->hostrt_data.live = 0; +} + +#else /* !CONFIG_IPIPE_HAVE_HOSTRT */ + +struct ipipe_hostrt_data; + +static inline int handle_hostrt_event(struct ipipe_hostrt_data *hostrt) +{ + return KEVENT_PROPAGATE; +} + +static inline void init_hostrt(void) { } + +#endif /* !CONFIG_IPIPE_HAVE_HOSTRT */ + +static int handle_schedule_event(struct task_struct *next_task) +{ + struct task_struct *prev_task; + struct xnthread *next; + sigset_t pending; + spl_t s; + + cobalt_signal_yield(); + + prev_task = current; + next = xnthread_from_task(next_task); + if (next == NULL) + goto out; + + xnlock_get_irqsave(&nklock, s); + + /* + * Track tasks leaving the ptraced state. Check both SIGSTOP + * (NPTL) and SIGINT (LinuxThreads) to detect ptrace + * continuation. + */ + if (xnthread_test_state(next, XNSSTEP)) { + if (signal_pending(next_task)) { + /* + * Do not grab the sighand lock here: it's + * useless, and we already own the runqueue + * lock, so this would expose us to deadlock + * situations on SMP. + */ + sigorsets(&pending, + &next_task->pending.signal, + &next_task->signal->shared_pending.signal); + if (sigismember(&pending, SIGSTOP) || + sigismember(&pending, SIGINT)) + goto no_ptrace; + } + + /* + * Do not unregister before the thread migrated. + * cobalt_unregister_debugged_thread will then be called by our + * ipipe_migration_hook. + */ + if (!xnthread_test_info(next, XNCONTHI)) + cobalt_unregister_debugged_thread(next); + + xnthread_set_localinfo(next, XNHICCUP); + } + +no_ptrace: + xnlock_put_irqrestore(&nklock, s); + + /* + * Do basic sanity checks on the incoming thread state. + * NOTE: we allow ptraced threads to run shortly in order to + * properly recover from a stopped state. + */ + if (!XENO_WARN(COBALT, !xnthread_test_state(next, XNRELAX), + "hardened thread %s[%d] running in Linux domain?! " + "(status=0x%x, sig=%d, prev=%s[%d])", + next->name, task_pid_nr(next_task), + xnthread_get_state(next), + signal_pending(next_task), + prev_task->comm, task_pid_nr(prev_task))) + XENO_WARN(COBALT, + !(next_task->ptrace & PT_PTRACED) && + !xnthread_test_state(next, XNDORMANT) + && xnthread_test_state(next, XNPEND), + "blocked thread %s[%d] rescheduled?! " + "(status=0x%x, sig=%d, prev=%s[%d])", + next->name, task_pid_nr(next_task), + xnthread_get_state(next), + signal_pending(next_task), prev_task->comm, + task_pid_nr(prev_task)); +out: + return KEVENT_PROPAGATE; +} + +static int handle_sigwake_event(struct task_struct *p) +{ + struct xnthread *thread; + sigset_t pending; + spl_t s; + + thread = xnthread_from_task(p); + if (thread == NULL) + return KEVENT_PROPAGATE; + + xnlock_get_irqsave(&nklock, s); + + /* + * CAUTION: __TASK_TRACED is not set in p->state yet. This + * state bit will be set right after we return, when the task + * is woken up. + */ + if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) { + /* We already own the siglock. */ + sigorsets(&pending, + &p->pending.signal, + &p->signal->shared_pending.signal); + + if (sigismember(&pending, SIGTRAP) || + sigismember(&pending, SIGSTOP) + || sigismember(&pending, SIGINT)) + cobalt_register_debugged_thread(thread); + } + + if (xnthread_test_state(thread, XNRELAX)) + goto out; + + /* + * If kicking a shadow thread in primary mode, make sure Linux + * won't schedule in its mate under our feet as a result of + * running signal_wake_up(). The Xenomai scheduler must remain + * in control for now, until we explicitly relax the shadow + * thread to allow for processing the pending signals. Make + * sure we keep the additional state flags unmodified so that + * we don't break any undergoing ptrace. + */ + if (p->state & (TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE)) + cobalt_set_task_state(p, p->state | TASK_NOWAKEUP); + + /* + * Allow a thread stopped for debugging to resume briefly in order to + * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP. + */ + if (xnthread_test_state(thread, XNDBGSTOP)) + xnthread_resume(thread, XNDBGSTOP); + + __xnthread_kick(thread); +out: + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); + + return KEVENT_PROPAGATE; +} + +static inline int handle_cleanup_event(struct mm_struct *mm) +{ + return cobalt_handle_cleanup_event(mm); +} + +void pipeline_cleanup_process(void) +{ + ipipe_disable_notifier(current); +} + +static inline int handle_clockfreq_event(unsigned int *p) +{ + unsigned int newfreq = *p; + + pipeline_update_clock_freq(newfreq); + + return KEVENT_PROPAGATE; +} + +static inline int handle_user_return(struct task_struct *task) +{ + ipipe_disable_user_intret_notifier(); + return cobalt_handle_user_return(task); +} + +int handle_ptrace_resume(struct ipipe_ptrace_resume_data *resume) +{ + struct xnthread *thread; + spl_t s; + + thread = xnthread_from_task(resume->task); + if (thread == NULL) + return KEVENT_PROPAGATE; + + if (resume->request == PTRACE_SINGLESTEP && + xnthread_test_state(thread, XNSSTEP)) { + xnlock_get_irqsave(&nklock, s); + + xnthread_resume(thread, XNDBGSTOP); + cobalt_unregister_debugged_thread(thread); + + xnlock_put_irqrestore(&nklock, s); + } + + return KEVENT_PROPAGATE; +} + +int ipipe_kevent_hook(int kevent, void *data) +{ + int ret; + + switch (kevent) { + case IPIPE_KEVT_SCHEDULE: + ret = handle_schedule_event(data); + break; + case IPIPE_KEVT_SIGWAKE: + ret = handle_sigwake_event(data); + break; + case IPIPE_KEVT_EXIT: + ret = handle_taskexit_event(data); + break; + case IPIPE_KEVT_CLEANUP: + ret = handle_cleanup_event(data); + break; + case IPIPE_KEVT_SETAFFINITY: + ret = handle_setaffinity_event(data); + break; +#ifdef CONFIG_IPIPE_HAVE_HOSTRT + case IPIPE_KEVT_HOSTRT: + ret = handle_hostrt_event(data); + break; +#endif + case IPIPE_KEVT_CLOCKFREQ: + ret = handle_clockfreq_event(data); + break; + case IPIPE_KEVT_USERINTRET: + ret = handle_user_return(data); + break; + case IPIPE_KEVT_PTRESUME: + ret = handle_ptrace_resume(data); + break; + default: + ret = KEVENT_PROPAGATE; + } + + return ret; +} + +#ifdef CONFIG_MMU + +int pipeline_prepare_current(void) +{ + struct task_struct *p = current; + kernel_siginfo_t si; + + if ((p->mm->def_flags & VM_LOCKED) == 0) { + memset(&si, 0, sizeof(si)); + si.si_signo = SIGDEBUG; + si.si_code = SI_QUEUE; + si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker; + send_sig_info(SIGDEBUG, &si, p); + return 0; + } + + return __ipipe_disable_ondemand_mappings(p); +} + +static inline int get_mayday_prot(void) +{ + return PROT_READ|PROT_EXEC; +} + +#else /* !CONFIG_MMU */ + +int pipeline_prepare_current(void) +{ + return 0; +} + +static inline int get_mayday_prot(void) +{ + /* + * Until we stop backing /dev/mem with the mayday page, we + * can't ask for PROT_EXEC since the former does not define + * mmap capabilities, and default ones won't allow an + * executable mapping with MAP_SHARED. In the NOMMU case, this + * is (currently) not an issue. + */ + return PROT_READ; +} + +#endif /* !CONFIG_MMU */ + +void pipeline_attach_current(struct xnthread *thread) +{ + struct cobalt_threadinfo *p; + + p = pipeline_current(); + p->thread = thread; + p->process = cobalt_search_process(current->mm); +} + +int pipeline_trap_kevents(void) +{ + init_hostrt(); + ipipe_set_hooks(ipipe_root_domain, IPIPE_SYSCALL|IPIPE_KEVENT); + ipipe_set_hooks(&xnsched_primary_domain, IPIPE_SYSCALL|IPIPE_TRAP); + + return 0; +} + +void pipeline_enable_kevents(void) +{ + ipipe_enable_notifier(current); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c new file mode 100644 index 0000000..3104e50 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c @@ -0,0 +1,198 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001-2020 Philippe Gerum <rpm@xenomai.org>. + */ + +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/assert.h> +#include <pipeline/sched.h> +#include <trace/events/cobalt-core.h> + +int pipeline_schedule(struct xnsched *sched) +{ + int ret = 0; + + XENO_WARN_ON_ONCE(COBALT, + !hard_irqs_disabled() && is_secondary_domain()); + + if (!xnarch_escalate()) + ret = ___xnsched_run(sched); + + return ret; +} +EXPORT_SYMBOL_GPL(pipeline_schedule); + +void pipeline_prep_switch_oob(struct xnthread *root) +{ + struct xnarchtcb *rootcb = xnthread_archtcb(root); + struct task_struct *p = current; + + ipipe_notify_root_preemption(); + /* Remember the preempted Linux task pointer. */ + rootcb->core.host_task = p; + rootcb->core.tsp = &p->thread; + rootcb->core.mm = rootcb->core.active_mm = ipipe_get_active_mm(); + rootcb->core.tip = task_thread_info(p); + xnarch_leave_root(root); +} + +#ifdef CONFIG_XENO_ARCH_FPU + +static void switch_fpu(void) +{ + struct xnsched *sched = xnsched_current(); + struct xnthread *curr = sched->curr; + + if (!xnthread_test_state(curr, XNFPU)) + return; + + xnarch_switch_fpu(sched->fpuholder, curr); + sched->fpuholder = curr; +} + +static void giveup_fpu(struct xnthread *thread) +{ + struct xnsched *sched = thread->sched; + + if (thread == sched->fpuholder) + sched->fpuholder = NULL; +} + +#else + +static inline void giveup_fpu(struct xnthread *thread) +{ } + +#endif /* !CONFIG_XENO_ARCH_FPU */ + +bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next, + bool leaving_inband) +{ + xnarch_switch_to(prev, next); + + /* + * Test whether we transitioned from primary mode to secondary + * over a shadow thread, caused by a call to xnthread_relax(). + * In such a case, we are running over the regular schedule() + * tail code, so we have to tell the caller to skip the Cobalt + * tail code. + */ + if (!leaving_inband && is_secondary_domain()) { + __ipipe_complete_domain_migration(); + XENO_BUG_ON(COBALT, xnthread_current() == NULL); + /* + * Interrupts must be disabled here (has to be done on + * entry of the Linux [__]switch_to function), but it + * is what callers expect, specifically the reschedule + * of an IRQ handler that hit before we call + * xnsched_run in xnthread_suspend() when relaxing a + * thread. + */ + XENO_BUG_ON(COBALT, !hard_irqs_disabled()); + return true; + } + + switch_fpu(); + + return false; +} + +void pipeline_init_shadow_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + struct task_struct *p = current; + + /* + * If the current task is a kthread, the pipeline will take + * the necessary steps to make the FPU usable in such + * context. The kernel already took care of this issue for + * userland tasks (e.g. setting up a clean backup area). + */ + __ipipe_share_current(0); + + tcb->core.host_task = p; + tcb->core.tsp = &p->thread; + tcb->core.mm = p->mm; + tcb->core.active_mm = p->mm; + tcb->core.tip = task_thread_info(p); +#ifdef CONFIG_XENO_ARCH_FPU + tcb->core.user_fpu_owner = p; +#endif /* CONFIG_XENO_ARCH_FPU */ + xnarch_init_shadow_tcb(thread); + + trace_cobalt_shadow_map(thread); +} + +void pipeline_init_root_tcb(struct xnthread *thread) +{ + struct xnarchtcb *tcb = xnthread_archtcb(thread); + struct task_struct *p = current; + + tcb->core.host_task = p; + tcb->core.tsp = &tcb->core.ts; + tcb->core.mm = p->mm; + tcb->core.tip = NULL; + xnarch_init_root_tcb(thread); +} + +int pipeline_leave_inband(void) +{ + int ret; + + ret = __ipipe_migrate_head(); + if (ret) + return ret; + + switch_fpu(); + + return 0; +} + +int pipeline_leave_oob_prepare(void) +{ + struct xnthread *curr = xnthread_current(); + struct task_struct *p = current; + int suspmask = XNRELAX; + + set_current_state(p->state & ~TASK_NOWAKEUP); + + /* + * If current is being debugged, record that it should migrate + * back in case it resumes in userspace. If it resumes in + * kernel space, i.e. over a restarting syscall, the + * associated hardening will both clear XNCONTHI and disable + * the user return notifier again. + */ + if (xnthread_test_state(curr, XNSSTEP)) { + xnthread_set_info(curr, XNCONTHI); + ipipe_enable_user_intret_notifier(); + suspmask |= XNDBGSTOP; + } + /* + * Return the suspension bits the caller should pass to + * xnthread_suspend(). + */ + return suspmask; +} + +void pipeline_leave_oob_finish(void) +{ + __ipipe_reenter_root(); +} + +void pipeline_finalize_thread(struct xnthread *thread) +{ + giveup_fpu(thread); +} + +void pipeline_raise_mayday(struct task_struct *tsk) +{ + ipipe_raise_mayday(tsk); +} + +void pipeline_clear_mayday(void) /* May solely affect current. */ +{ + ipipe_clear_thread_flag(TIP_MAYDAY); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c new file mode 100644 index 0000000..867a81e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org> + * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + */ + +#include <pipeline/pipeline.h> +#include <pipeline/kevents.h> +#include <cobalt/kernel/assert.h> +#include <xenomai/posix/syscall.h> + +int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs) +{ + if (unlikely(is_secondary_domain())) + return handle_root_syscall(regs); + + return handle_head_syscall(ipd != &xnsched_primary_domain, regs); +} + +int ipipe_fastcall_hook(struct pt_regs *regs) +{ + int ret; + + ret = handle_head_syscall(false, regs); + XENO_BUG_ON(COBALT, ret == KEVENT_PROPAGATE); + + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c new file mode 100644 index 0000000..db6e37c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c @@ -0,0 +1,286 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + */ +#include <linux/ipipe.h> +#include <linux/ipipe_tickdev.h> +#include <linux/sched.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/arith.h> + +extern struct xnintr nktimer; + +/** + * @internal + * @fn static int program_htick_shot(unsigned long delay, struct clock_event_device *cdev) + * + * @brief Program next host tick as a Xenomai timer event. + * + * Program the next shot for the host tick on the current CPU. + * Emulation is done using a nucleus timer attached to the master + * timebase. + * + * @param delay The time delta from the current date to the next tick, + * expressed as a count of nanoseconds. + * + * @param cdev An pointer to the clock device which notifies us. + * + * @coretags{unrestricted} + */ +static int program_htick_shot(unsigned long delay, + struct clock_event_device *cdev) +{ + struct xnsched *sched; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + sched = xnsched_current(); + ret = xntimer_start(&sched->htimer, delay, XN_INFINITE, XN_RELATIVE); + xnlock_put_irqrestore(&nklock, s); + + return ret ? -ETIME : 0; +} + +/** + * @internal + * @fn void switch_htick_mode(enum clock_event_mode mode, struct clock_event_device *cdev) + * + * @brief Tick mode switch emulation callback. + * + * Changes the host tick mode for the tick device of the current CPU. + * + * @param mode The new mode to switch to. The possible values are: + * + * - CLOCK_EVT_MODE_ONESHOT, for a switch to oneshot mode. + * + * - CLOCK_EVT_MODE_PERIODIC, for a switch to periodic mode. The current + * implementation for the generic clockevent layer Linux exhibits + * should never downgrade from a oneshot to a periodic tick mode, so + * this mode should not be encountered. This said, the associated code + * is provided, basically for illustration purposes. + * + * - CLOCK_EVT_MODE_SHUTDOWN, indicates the removal of the current + * tick device. Normally, the nucleus only interposes on tick devices + * which should never be shut down, so this mode should not be + * encountered. + * + * @param cdev An opaque pointer to the clock device which notifies us. + * + * @coretags{unrestricted} + * + * @note GENERIC_CLOCKEVENTS is required from the host kernel. + */ +static void switch_htick_mode(enum clock_event_mode mode, + struct clock_event_device *cdev) +{ + struct xnsched *sched; + xnticks_t tickval; + spl_t s; + + if (mode == CLOCK_EVT_MODE_ONESHOT) + return; + + xnlock_get_irqsave(&nklock, s); + + sched = xnsched_current(); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + tickval = 1000000000UL / HZ; + xntimer_start(&sched->htimer, tickval, tickval, XN_RELATIVE); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + xntimer_stop(&sched->htimer); + break; + default: + XENO_BUG(COBALT); + } + + xnlock_put_irqrestore(&nklock, s); +} + +static int grab_timer_on_cpu(int cpu) +{ + int tickval, ret; + + ret = ipipe_timer_start(xnintr_core_clock_handler, + switch_htick_mode, program_htick_shot, cpu); + switch (ret) { + case CLOCK_EVT_MODE_PERIODIC: + /* + * Oneshot tick emulation callback won't be used, ask + * the caller to start an internal timer for emulating + * a periodic tick. + */ + tickval = 1000000000UL / HZ; + break; + + case CLOCK_EVT_MODE_ONESHOT: + /* oneshot tick emulation */ + tickval = 1; + break; + + case CLOCK_EVT_MODE_UNUSED: + /* we don't need to emulate the tick at all. */ + tickval = 0; + break; + + case CLOCK_EVT_MODE_SHUTDOWN: + return -ENODEV; + + default: + return ret; + } + + return tickval; +} + +/** + * @fn int pipeline_install_tick_proxy(void) + * @brief Grab the hardware timer on all real-time CPUs. + * + * pipeline_install_tick_proxy() grabs and tunes the hardware timer for all + * real-time CPUs. + * + * Host tick emulation is performed for sharing the clock chip between + * Linux and Xenomai. + * + * @return a positive value is returned on success, representing the + * duration of a Linux periodic tick expressed as a count of + * nanoseconds; zero should be returned when the Linux kernel does not + * undergo periodic timing on the given CPU (e.g. oneshot + * mode). Otherwise: + * + * - -EBUSY is returned if the hardware timer has already been + * grabbed. xntimer_release_hardware() must be issued before + * pipeline_install_tick_proxy() is called again. + * + * - -ENODEV is returned if the hardware timer cannot be used. This + * situation may occur after the kernel disabled the timer due to + * invalid calibration results; in such a case, such hardware is + * unusable for any timing duties. + * + * @coretags{secondary-only} + */ + +int pipeline_install_tick_proxy(void) +{ + struct xnsched *sched; + int ret, cpu, _cpu; + spl_t s; + +#ifdef CONFIG_XENO_OPT_STATS_IRQS + /* + * Only for statistical purpose, the timer interrupt is + * attached by pipeline_install_tick_proxy(). + */ + xnintr_init(&nktimer, "[timer]", + per_cpu(ipipe_percpu.hrtimer_irq, 0), NULL, NULL, 0); +#endif /* CONFIG_XENO_OPT_STATS_IRQS */ + +#ifdef CONFIG_SMP + ret = ipipe_request_irq(&cobalt_pipeline.domain, + IPIPE_HRTIMER_IPI, + (ipipe_irq_handler_t)xnintr_core_clock_handler, + NULL, NULL); + if (ret) + return ret; +#endif + + for_each_realtime_cpu(cpu) { + ret = grab_timer_on_cpu(cpu); + if (ret < 0) + goto fail; + + xnlock_get_irqsave(&nklock, s); + + /* + * If the current tick device for the target CPU is + * periodic, we won't be called back for host tick + * emulation. Therefore, we need to start a periodic + * nucleus timer which will emulate the ticking for + * that CPU, since we are going to hijack the hw clock + * chip for managing our own system timer. + * + * CAUTION: + * + * - nucleus timers may be started only _after_ the hw + * timer has been set up for the target CPU through a + * call to pipeline_install_tick_proxy(). + * + * - we don't compensate for the elapsed portion of + * the current host tick, since we cannot get this + * information easily for all CPUs except the current + * one, and also because of the declining relevance of + * the jiffies clocksource anyway. + * + * - we must not hold the nklock across calls to + * pipeline_install_tick_proxy(). + */ + + sched = xnsched_struct(cpu); + /* Set up timer with host tick period if valid. */ + if (ret > 1) + xntimer_start(&sched->htimer, ret, ret, XN_RELATIVE); + else if (ret == 1) + xntimer_start(&sched->htimer, 0, 0, XN_RELATIVE); + + xnlock_put_irqrestore(&nklock, s); + } + + return 0; +fail: + for_each_realtime_cpu(_cpu) { + if (_cpu == cpu) + break; + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + xntimer_stop(&sched->htimer); + xnlock_put_irqrestore(&nklock, s); + ipipe_timer_stop(_cpu); + } + +#ifdef CONFIG_SMP + ipipe_free_irq(&cobalt_pipeline.domain, + IPIPE_HRTIMER_IPI); +#endif + + return ret; +} + +/** + * @fn void pipeline_uninstall_tick_proxy(void) + * @brief Release hardware timers. + * + * Releases hardware timers previously grabbed by a call to + * pipeline_install_tick_proxy(). + * + * @coretags{secondary-only} + */ +void pipeline_uninstall_tick_proxy(void) +{ + int cpu; + + /* + * We must not hold the nklock while stopping the hardware + * timer, since this could cause deadlock situations to arise + * on SMP systems. + */ + for_each_realtime_cpu(cpu) + ipipe_timer_stop(cpu); + +#ifdef CONFIG_SMP + ipipe_free_irq(&cobalt_pipeline.domain, + IPIPE_HRTIMER_IPI); +#endif + +#ifdef CONFIG_XENO_OPT_STATS_IRQS + xnintr_destroy(&nktimer); +#endif /* CONFIG_XENO_OPT_STATS_IRQS */ +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c new file mode 100644 index 0000000..e48072d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2001-2012 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/module.h> +#include <cobalt/kernel/lock.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_lock Locking services + * + * The Xenomai core deals with concurrent activities from two distinct + * kernels running side-by-side. When interrupts are involved, the + * services from this section control the @b hard interrupt state + * exclusively, for protecting against processor-local or SMP + * concurrency. + * + * @note In a dual kernel configuration, <i>hard interrupts</i> are + * gated by the CPU. When enabled, hard interrupts are immediately + * delivered to the Xenomai core if they belong to a real-time source, + * or deferred until enabled by a second-stage virtual interrupt mask, + * if they belong to regular Linux devices/sources. + * + * @{ + */ +DEFINE_XNLOCK(nklock); +#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING) +EXPORT_SYMBOL_GPL(nklock); + +#ifdef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK +int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + return ____xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT); +} +EXPORT_SYMBOL_GPL(___xnlock_get); + +void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + ____xnlock_put(lock /* , */ XNLOCK_DBG_PASS_CONTEXT); +} +EXPORT_SYMBOL_GPL(___xnlock_put); +#endif /* out of line xnlock */ +#endif /* CONFIG_SMP || XENO_DEBUG(LOCKING) */ + +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING +DEFINE_PER_CPU(struct xnlockinfo, xnlock_stats); +EXPORT_PER_CPU_SYMBOL_GPL(xnlock_stats); +#endif + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/map.c b/kernel/xenomai-v3.2.4/kernel/cobalt/map.c new file mode 100644 index 0000000..161d24c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/map.c @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/module.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/map.h> +#include <asm/xenomai/machine.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_map Lightweight key-to-object mapping service + * + * A map is a simple indexing structure which associates unique + * integer keys with pointers to objects. The current implementation + * supports reservation, for naming/indexing objects, either on a + * fixed, user-provided integer (i.e. a reserved key value), or by + * drawing the next available key internally if the caller did not + * specify any fixed key. For instance, in some given map, the key + * space ranging from 0 to 255 could be reserved for fixed keys, + * whilst the range from 256 to 511 could be available for drawing + * free keys dynamically. + * + * A maximum of 1024 unique keys per map is supported on 32bit + * machines. + * + * (This implementation should not be confused with C++ STL maps, + * which are dynamically expandable and allow arbitrary key types; + * Xenomai maps don't). + * + * @{ + */ + +/** + * @fn void xnmap_create(int nkeys, int reserve, int offset) + * @brief Create a map. + * + * Allocates a new map with the specified addressing capabilities. The + * memory is obtained from the Xenomai system heap. + * + * @param nkeys The maximum number of unique keys the map will be able + * to hold. This value cannot exceed the static limit represented by + * XNMAP_MAX_KEYS, and must be a power of two. + * + * @param reserve The number of keys which should be kept for + * reservation within the index space. Reserving a key means to + * specify a valid key to the xnmap_enter() service, which will then + * attempt to register this exact key, instead of drawing the next + * available key from the unreserved index space. When reservation is + * in effect, the unreserved index space will hold key values greater + * than @a reserve, keeping the low key values for the reserved space. + * For instance, passing @a reserve = 32 would cause the index range [ + * 0 .. 31 ] to be kept for reserved keys. When non-zero, @a reserve + * is rounded to the next multiple of BITS_PER_LONG. If @a reserve is + * zero no reservation will be available from the map. + * + * @param offset The lowest key value xnmap_enter() will return to the + * caller. Key values will be in the range [ 0 + offset .. @a nkeys + + * offset - 1 ]. Negative offsets are valid. + * + * @return the address of the new map is returned on success; + * otherwise, NULL is returned if @a nkeys is invalid. + * + * @coretags{task-unrestricted} + */ +struct xnmap *xnmap_create(int nkeys, int reserve, int offset) +{ + struct xnmap *map; + int mapsize; + + if (nkeys <= 0 || (nkeys & (nkeys - 1)) != 0) + return NULL; + + mapsize = sizeof(*map) + (nkeys - 1) * sizeof(map->objarray[0]); + map = xnmalloc(mapsize); + + if (!map) + return NULL; + + map->ukeys = 0; + map->nkeys = nkeys; + map->offset = offset; + map->himask = (1 << ((reserve + BITS_PER_LONG - 1) / BITS_PER_LONG)) - 1; + map->himap = ~0; + memset(map->lomap, ~0, sizeof(map->lomap)); + memset(map->objarray, 0, sizeof(map->objarray[0]) * nkeys); + + return map; +} +EXPORT_SYMBOL_GPL(xnmap_create); + +/** + * @fn void xnmap_delete(struct xnmap *map) + * @brief Delete a map. + * + * Deletes a map, freeing any associated memory back to the Xenomai + * system heap. + * + * @param map The address of the map to delete. + * + * @coretags{task-unrestricted} + */ +void xnmap_delete(struct xnmap *map) +{ + xnfree(map); +} +EXPORT_SYMBOL_GPL(xnmap_delete); + +/** + * @fn void xnmap_enter(struct xnmap *map, int key, void *objaddr) + * @brief Index an object into a map. + * + * Insert a new object into the given map. + * + * @param map The address of the map to insert into. + * + * @param key The key to index the object on. If this key is within + * the valid index range [ 0 - offset .. nkeys - offset - 1 ], then an + * attempt to reserve this exact key is made. If @a key has an + * out-of-range value lower or equal to 0 - offset - 1, then an + * attempt is made to draw a free key from the unreserved index space. + * + * @param objaddr The address of the object to index on the key. This + * value will be returned by a successful call to xnmap_fetch() with + * the same key. + * + * @return a valid key is returned on success, either @a key if + * reserved, or the next free key. Otherwise: + * + * - -EEXIST is returned upon attempt to reserve a busy key. + * + * - -ENOSPC when no more free key is available. + * + * @coretags{unrestricted} + */ +int xnmap_enter(struct xnmap *map, int key, void *objaddr) +{ + int hi, lo, ofkey = key - map->offset; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (ofkey >= 0 && ofkey < map->nkeys) { + if (map->objarray[ofkey] != NULL) { + key = -EEXIST; + goto unlock_and_exit; + } + } else if (map->ukeys >= map->nkeys) { + key = -ENOSPC; + goto unlock_and_exit; + } + else { + /* The himask implements a namespace reservation of + half of the bitmap space which cannot be used to + draw keys. */ + + hi = ffnz(map->himap & ~map->himask); + lo = ffnz(map->lomap[hi]); + ofkey = hi * BITS_PER_LONG + lo; + ++map->ukeys; + + map->lomap[hi] &= ~(1UL << lo); + if (map->lomap[hi] == 0) + map->himap &= ~(1UL << hi); + } + + map->objarray[ofkey] = objaddr; + + unlock_and_exit: + + xnlock_put_irqrestore(&nklock, s); + + return ofkey + map->offset; +} +EXPORT_SYMBOL_GPL(xnmap_enter); + +/** + * @fn void xnmap_remove(struct xnmap *map, int key) + * @brief Remove an object reference from a map. + * + * Removes an object reference from the given map, releasing the + * associated key. + * + * @param map The address of the map to remove from. + * + * @param key The key the object reference to be removed is indexed + * on. + * + * @return 0 is returned on success. Otherwise: + * + * - -ESRCH is returned if @a key is invalid. + * + * @coretags{unrestricted} + */ +int xnmap_remove(struct xnmap *map, int key) +{ + int ofkey = key - map->offset, hi, lo; + spl_t s; + + if (ofkey < 0 || ofkey >= map->nkeys) + return -ESRCH; + + hi = ofkey / BITS_PER_LONG; + lo = ofkey % BITS_PER_LONG; + xnlock_get_irqsave(&nklock, s); + map->objarray[ofkey] = NULL; + map->himap |= (1UL << hi); + map->lomap[hi] |= (1UL << lo); + --map->ukeys; + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnmap_remove); + +/** + * @fn void xnmap_fetch(struct xnmap *map, int key) + * @brief Search an object into a map. + * + * Retrieve an object reference from the given map by its index key. + * + * @param map The address of the map to retrieve from. + * + * @param key The key to be searched for in the map index. + * + * @return The indexed object address is returned on success, + * otherwise NULL is returned when @a key is invalid or no object is + * currently indexed on it. + * + * @coretags{unrestricted} + */ + +/** + * @fn void xnmap_fetch_nocheck(struct xnmap *map, int key) + * @brief Search an object into a map - unchecked form. + * + * Retrieve an object reference from the given map by its index key, + * but does not perform any sanity check on the provided key. + * + * @param map The address of the map to retrieve from. + * + * @param key The key to be searched for in the map index. + * + * @return The indexed object address is returned on success, + * otherwise NULL is returned when no object is currently indexed on + * @a key. + * + * @coretags{unrestricted} + */ + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c b/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c new file mode 100644 index 0000000..e846303 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c @@ -0,0 +1,1201 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2005 Dmitry Adamushko <dmitry.adamushko@gmail.com> + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA + * 02139, USA; either version 2 of the License, or (at your option) + * any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/fcntl.h> +#include <linux/poll.h> +#include <linux/termios.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/uaccess.h> +#include <linux/compat.h> +#include <asm/io.h> +#include <asm/xenomai/syscall.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/pipe.h> +#include <pipeline/sirq.h> + +static int xnpipe_asyncsig = SIGIO; + +struct xnpipe_state xnpipe_states[XNPIPE_NDEVS]; +EXPORT_SYMBOL_GPL(xnpipe_states); + +#define XNPIPE_BITMAP_SIZE ((XNPIPE_NDEVS + BITS_PER_LONG - 1) / BITS_PER_LONG) + +static unsigned long xnpipe_bitmap[XNPIPE_BITMAP_SIZE]; + +static LIST_HEAD(xnpipe_sleepq); + +static LIST_HEAD(xnpipe_asyncq); + +static int xnpipe_wakeup_virq; + +static struct class *xnpipe_class; + +/* Allocation of minor values */ + +static inline int xnpipe_minor_alloc(int minor) +{ + spl_t s; + + if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS) + return -ENODEV; + + xnlock_get_irqsave(&nklock, s); + + if (minor == XNPIPE_MINOR_AUTO) + minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS); + + if (minor == XNPIPE_NDEVS || + (xnpipe_bitmap[minor / BITS_PER_LONG] & + (1UL << (minor % BITS_PER_LONG)))) + minor = -EBUSY; + else + xnpipe_bitmap[minor / BITS_PER_LONG] |= + (1UL << (minor % BITS_PER_LONG)); + + xnlock_put_irqrestore(&nklock, s); + + return minor; +} + +static inline void xnpipe_minor_free(int minor) +{ + xnpipe_bitmap[minor / BITS_PER_LONG] &= + ~(1UL << (minor % BITS_PER_LONG)); +} + +static inline void xnpipe_enqueue_wait(struct xnpipe_state *state, int mask) +{ + if (state->wcount != 0x7fffffff && state->wcount++ == 0) + list_add_tail(&state->slink, &xnpipe_sleepq); + + state->status |= mask; +} + +static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask) +{ + if (state->status & mask) + if (--state->wcount == 0) { + list_del(&state->slink); + state->status &= ~mask; + } +} + +static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask) +{ + if (state->status & mask) { + if (state->wcount) { + state->wcount = 0; + list_del(&state->slink); + state->status &= ~mask; + } + } +} + +/* Must be entered with nklock held, interrupts off. */ +#define xnpipe_wait(__state, __mask, __s, __cond) \ +({ \ + wait_queue_head_t *__waitq; \ + DEFINE_WAIT(__wait); \ + int __sigpending; \ + \ + if ((__mask) & XNPIPE_USER_WREAD) \ + __waitq = &(__state)->readq; \ + else \ + __waitq = &(__state)->syncq; \ + \ + xnpipe_enqueue_wait(__state, __mask); \ + xnlock_put_irqrestore(&nklock, __s); \ + \ + for (;;) { \ + __sigpending = signal_pending(current); \ + if (__sigpending) \ + break; \ + prepare_to_wait_exclusive(__waitq, &__wait, TASK_INTERRUPTIBLE); \ + if (__cond || (__state)->status & XNPIPE_KERN_LCLOSE) \ + break; \ + schedule(); \ + } \ + \ + finish_wait(__waitq, &__wait); \ + \ + /* Restore the interrupt state initially set by the caller. */ \ + xnlock_get_irqsave(&nklock, __s); \ + xnpipe_dequeue_wait(__state, __mask); \ + \ + __sigpending; \ +}) + +static irqreturn_t xnpipe_wakeup_proc(int sirq, void *dev_id) +{ + struct xnpipe_state *state; + unsigned long rbits; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + /* + * NOTE: sleepers might enter/leave the queue while we don't + * hold the nklock in these wakeup loops. So we iterate over + * each sleeper list until we find no more candidate for + * wakeup after an entire scan, redoing the scan from the list + * head otherwise. + */ + for (;;) { + if (list_empty(&xnpipe_sleepq)) + goto check_async; + + state = list_first_entry(&xnpipe_sleepq, struct xnpipe_state, slink); + + for (;;) { + rbits = state->status & XNPIPE_USER_ALL_READY; + if (rbits) + break; + if (list_is_last(&state->slink, &xnpipe_sleepq)) + goto check_async; + state = list_next_entry(state, slink); + } + + state->status &= ~rbits; + + if ((rbits & XNPIPE_USER_WREAD_READY) != 0) { + if (waitqueue_active(&state->readq)) { + xnlock_put_irqrestore(&nklock, s); + wake_up_interruptible(&state->readq); + xnlock_get_irqsave(&nklock, s); + } + } + if ((rbits & XNPIPE_USER_WSYNC_READY) != 0) { + if (waitqueue_active(&state->syncq)) { + xnlock_put_irqrestore(&nklock, s); + wake_up_interruptible(&state->syncq); + xnlock_get_irqsave(&nklock, s); + } + } + } + +check_async: + /* + * Scan the async queue, sending the proper signal to + * subscribers. + */ + for (;;) { + if (list_empty(&xnpipe_asyncq)) + goto out; + + state = list_first_entry(&xnpipe_asyncq, struct xnpipe_state, alink); + + for (;;) { + if (state->status & XNPIPE_USER_SIGIO) + break; + if (list_is_last(&state->alink, &xnpipe_asyncq)) + goto out; + state = list_next_entry(state, alink); + } + + state->status &= ~XNPIPE_USER_SIGIO; + xnlock_put_irqrestore(&nklock, s); + kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN); + xnlock_get_irqsave(&nklock, s); + } +out: + xnlock_put_irqrestore(&nklock, s); + + return IRQ_HANDLED; +} + +static inline void xnpipe_schedule_request(void) /* hw IRQs off */ +{ + pipeline_post_sirq(xnpipe_wakeup_virq); +} + +static inline ssize_t xnpipe_flush_bufq(void (*fn)(void *buf, void *xstate), + struct list_head *q, + void *xstate) +{ + struct xnpipe_mh *mh, *tmp; + ssize_t n = 0; + + if (list_empty(q)) + return 0; + + /* Queue is private, no locking is required. */ + list_for_each_entry_safe(mh, tmp, q, link) { + list_del(&mh->link); + n += xnpipe_m_size(mh); + fn(mh, xstate); + } + + /* Return the overall count of bytes flushed. */ + return n; +} + +/* + * Move the specified queue contents to a private queue, then call the + * flush handler to purge it. The latter runs without locking. + * Returns the number of bytes flushed. Must be entered with nklock + * held, interrupts off. + */ +#define xnpipe_flushq(__state, __q, __f, __s) \ +({ \ + LIST_HEAD(__privq); \ + ssize_t __n; \ + \ + list_splice_init(&(state)->__q, &__privq); \ + (__state)->nr ## __q = 0; \ + xnlock_put_irqrestore(&nklock, (__s)); \ + __n = xnpipe_flush_bufq((__state)->ops.__f, &__privq, (__state)->xstate); \ + xnlock_get_irqsave(&nklock, (__s)); \ + \ + __n; \ +}) + +static void *xnpipe_default_alloc_ibuf(size_t size, void *xstate) +{ + void *buf; + + buf = xnmalloc(size); + if (likely(buf != NULL)) + return buf; + + if (size > xnheap_get_size(&cobalt_heap)) + /* Request will never succeed. */ + return (struct xnpipe_mh *)-1; + + return NULL; +} + +static void xnpipe_default_free_ibuf(void *buf, void *xstate) +{ + xnfree(buf); +} + +static void xnpipe_default_release(void *xstate) +{ +} + +static inline int xnpipe_set_ops(struct xnpipe_state *state, + struct xnpipe_operations *ops) +{ + state->ops = *ops; + + if (ops->free_obuf == NULL) + /* + * Caller must provide a way to free unread outgoing + * buffers. + */ + return -EINVAL; + + /* Set some default handlers for common usage. */ + if (ops->alloc_ibuf == NULL) + state->ops.alloc_ibuf = xnpipe_default_alloc_ibuf; + if (ops->free_ibuf == NULL) + state->ops.free_ibuf = xnpipe_default_free_ibuf; + if (ops->release == NULL) + state->ops.release = xnpipe_default_release; + + return 0; +} + +int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate) +{ + struct xnpipe_state *state; + int need_sched = 0, ret; + spl_t s; + + minor = xnpipe_minor_alloc(minor); + if (minor < 0) + return minor; + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + ret = xnpipe_set_ops(state, ops); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + return ret; + } + + state->status |= XNPIPE_KERN_CONN; + xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL); + state->xstate = xstate; + state->ionrd = 0; + + if (state->status & XNPIPE_USER_CONN) { + if (state->status & XNPIPE_USER_WREAD) { + /* + * Wake up the regular Linux task waiting for + * the kernel side to connect (xnpipe_open). + */ + state->status |= XNPIPE_USER_WREAD_READY; + need_sched = 1; + } + + if (state->asyncq) { /* Schedule asynch sig. */ + state->status |= XNPIPE_USER_SIGIO; + need_sched = 1; + } + } + + if (need_sched) + xnpipe_schedule_request(); + + xnlock_put_irqrestore(&nklock, s); + + return minor; +} +EXPORT_SYMBOL_GPL(xnpipe_connect); + +int xnpipe_disconnect(int minor) +{ + struct xnpipe_state *state; + int need_sched = 0; + spl_t s; + + if (minor < 0 || minor >= XNPIPE_NDEVS) + return -ENODEV; + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EBADF; + } + + state->status &= ~XNPIPE_KERN_CONN; + + state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s); + + if ((state->status & XNPIPE_USER_CONN) == 0) + goto cleanup; + + xnpipe_flushq(state, inq, free_ibuf, s); + + if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED) + xnsched_run(); + + if (state->status & XNPIPE_USER_WREAD) { + /* + * Wake up the regular Linux task waiting for some + * operation from the Xenomai side (read/write or + * poll). + */ + state->status |= XNPIPE_USER_WREAD_READY; + need_sched = 1; + } + + if (state->asyncq) { /* Schedule asynch sig. */ + state->status |= XNPIPE_USER_SIGIO; + need_sched = 1; + } + +cleanup: + /* + * If xnpipe_release() has not fully run, enter lingering + * close. This will prevent the extra state from being wiped + * out until then. + */ + if (state->status & XNPIPE_USER_CONN) + state->status |= XNPIPE_KERN_LCLOSE; + else { + xnlock_put_irqrestore(&nklock, s); + state->ops.release(state->xstate); + xnlock_get_irqsave(&nklock, s); + xnpipe_minor_free(minor); + } + + if (need_sched) + xnpipe_schedule_request(); + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnpipe_disconnect); + +ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags) +{ + struct xnpipe_state *state; + int need_sched = 0; + spl_t s; + + if (minor < 0 || minor >= XNPIPE_NDEVS) + return -ENODEV; + + if (size <= sizeof(*mh)) + return -EINVAL; + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EBADF; + } + + xnpipe_m_size(mh) = size - sizeof(*mh); + xnpipe_m_rdoff(mh) = 0; + state->ionrd += xnpipe_m_size(mh); + + if (flags & XNPIPE_URGENT) + list_add(&mh->link, &state->outq); + else + list_add_tail(&mh->link, &state->outq); + + state->nroutq++; + + if ((state->status & XNPIPE_USER_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return (ssize_t) size; + } + + if (state->status & XNPIPE_USER_WREAD) { + /* + * Wake up the regular Linux task waiting for input + * from the Xenomai side. + */ + state->status |= XNPIPE_USER_WREAD_READY; + need_sched = 1; + } + + if (state->asyncq) { /* Schedule asynch sig. */ + state->status |= XNPIPE_USER_SIGIO; + need_sched = 1; + } + + if (need_sched) + xnpipe_schedule_request(); + + xnlock_put_irqrestore(&nklock, s); + + return (ssize_t) size; +} +EXPORT_SYMBOL_GPL(xnpipe_send); + +ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size) +{ + struct xnpipe_state *state; + spl_t s; + + if (minor < 0 || minor >= XNPIPE_NDEVS) + return -ENODEV; + + if (size < 0) + return -EINVAL; + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EBADF; + } + + xnpipe_m_size(mh) += size; + state->ionrd += size; + + xnlock_put_irqrestore(&nklock, s); + + return (ssize_t) size; +} +EXPORT_SYMBOL_GPL(xnpipe_mfixup); + +ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout) +{ + struct xnpipe_state *state; + struct xnpipe_mh *mh; + xntmode_t mode; + ssize_t ret; + int info; + spl_t s; + + if (minor < 0 || minor >= XNPIPE_NDEVS) + return -ENODEV; + + if (xnsched_interrupt_p()) + return -EPERM; + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + ret = -EBADF; + goto unlock_and_exit; + } + + /* + * If we received a relative timespec, rescale it to an + * absolute time value based on the monotonic clock. + */ + mode = XN_RELATIVE; + if (timeout != XN_NONBLOCK && timeout != XN_INFINITE) { + mode = XN_ABSOLUTE; + timeout += xnclock_read_monotonic(&nkclock); + } + + for (;;) { + if (!list_empty(&state->inq)) + break; + + if (timeout == XN_NONBLOCK) { + ret = -EWOULDBLOCK; + goto unlock_and_exit; + } + + info = xnsynch_sleep_on(&state->synchbase, timeout, mode); + if (info & XNTIMEO) { + ret = -ETIMEDOUT; + goto unlock_and_exit; + } + if (info & XNBREAK) { + ret = -EINTR; + goto unlock_and_exit; + } + if (info & XNRMID) { + ret = -EIDRM; + goto unlock_and_exit; + } + } + + mh = list_get_entry(&state->inq, struct xnpipe_mh, link); + *pmh = mh; + state->nrinq--; + ret = (ssize_t)xnpipe_m_size(mh); + + if (state->status & XNPIPE_USER_WSYNC) { + state->status |= XNPIPE_USER_WSYNC_READY; + xnpipe_schedule_request(); + } + +unlock_and_exit: + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnpipe_recv); + +int xnpipe_flush(int minor, int mode) +{ + struct xnpipe_state *state; + int msgcount; + spl_t s; + + if (minor < 0 || minor >= XNPIPE_NDEVS) + return -ENODEV; + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EBADF; + } + + msgcount = state->nroutq + state->nrinq; + + if (mode & XNPIPE_OFLUSH) + state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s); + + if (mode & XNPIPE_IFLUSH) + xnpipe_flushq(state, inq, free_ibuf, s); + + if ((state->status & XNPIPE_USER_WSYNC) && + msgcount > state->nroutq + state->nrinq) { + state->status |= XNPIPE_USER_WSYNC_READY; + xnpipe_schedule_request(); + } + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnpipe_flush); + +int xnpipe_pollstate(int minor, unsigned int *mask_r) +{ + struct xnpipe_state *state; + int ret = 0; + spl_t s; + + if (minor < 0 || minor >= XNPIPE_NDEVS) + return -ENODEV; + + state = xnpipe_states + minor; + + xnlock_get_irqsave(&nklock, s); + + if (state->status & XNPIPE_KERN_CONN) { + *mask_r = POLLOUT; + if (!list_empty(&state->inq)) + *mask_r |= POLLIN; + } else + ret = -EIO; + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnpipe_pollstate); + +/* Must be entered with nklock held, interrupts off. */ +#define xnpipe_cleanup_user_conn(__state, __s) \ + do { \ + xnpipe_flushq((__state), outq, free_obuf, (__s)); \ + xnpipe_flushq((__state), inq, free_ibuf, (__s)); \ + (__state)->status &= ~XNPIPE_USER_CONN; \ + if ((__state)->status & XNPIPE_KERN_LCLOSE) { \ + (__state)->status &= ~XNPIPE_KERN_LCLOSE; \ + xnlock_put_irqrestore(&nklock, (__s)); \ + (__state)->ops.release((__state)->xstate); \ + xnlock_get_irqsave(&nklock, (__s)); \ + xnpipe_minor_free(xnminor_from_state(__state)); \ + } \ + } while(0) + +/* + * Open the pipe from user-space. + */ + +static int xnpipe_open(struct inode *inode, struct file *file) +{ + int minor, err = 0, sigpending; + struct xnpipe_state *state; + spl_t s; + + minor = MINOR(inode->i_rdev); + + if (minor >= XNPIPE_NDEVS) + return -ENXIO; /* TssTss... stop playing with mknod() ;o) */ + + state = &xnpipe_states[minor]; + + xnlock_get_irqsave(&nklock, s); + + /* Enforce exclusive open for the message queues. */ + if (state->status & (XNPIPE_USER_CONN | XNPIPE_USER_LCONN)) { + xnlock_put_irqrestore(&nklock, s); + return -EBUSY; + } + + state->status |= XNPIPE_USER_LCONN; + + xnlock_put_irqrestore(&nklock, s); + + file->private_data = state; + init_waitqueue_head(&state->readq); + init_waitqueue_head(&state->syncq); + + xnlock_get_irqsave(&nklock, s); + + state->status |= XNPIPE_USER_CONN; + state->status &= ~XNPIPE_USER_LCONN; + state->wcount = 0; + + state->status &= + ~(XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY | + XNPIPE_USER_SIGIO); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + if (file->f_flags & O_NONBLOCK) { + xnpipe_cleanup_user_conn(state, s); + xnlock_put_irqrestore(&nklock, s); + return -EWOULDBLOCK; + } + + sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s, + state->status & XNPIPE_KERN_CONN); + if (sigpending) { + xnpipe_cleanup_user_conn(state, s); + xnlock_put_irqrestore(&nklock, s); + return -ERESTARTSYS; + } + } + + if (err) + xnpipe_cleanup_user_conn(state, s); + + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +static int xnpipe_release(struct inode *inode, struct file *file) +{ + struct xnpipe_state *state = file->private_data; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + xnpipe_dequeue_all(state, XNPIPE_USER_WREAD); + xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC); + + if (state->status & XNPIPE_KERN_CONN) { + /* Unblock waiters. */ + if (xnsynch_pended_p(&state->synchbase)) { + xnsynch_flush(&state->synchbase, XNRMID); + xnsched_run(); + } + } + + if (state->ops.input) + state->ops.input(NULL, -EPIPE, state->xstate); + + if (state->asyncq) { /* Clear the async queue */ + list_del(&state->alink); + state->status &= ~XNPIPE_USER_SIGIO; + xnlock_put_irqrestore(&nklock, s); + fasync_helper(-1, file, 0, &state->asyncq); + xnlock_get_irqsave(&nklock, s); + } + + xnpipe_cleanup_user_conn(state, s); + /* + * The extra state may not be available from now on, if + * xnpipe_disconnect() entered lingering close before we got + * there; so calling xnpipe_cleanup_user_conn() should be the + * last thing we do. + */ + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static ssize_t xnpipe_read(struct file *file, + char *buf, size_t count, loff_t *ppos) +{ + struct xnpipe_state *state = file->private_data; + int sigpending, err = 0; + size_t nbytes, inbytes; + struct xnpipe_mh *mh; + ssize_t ret; + spl_t s; + + if (!access_wok(buf, count)) + return -EFAULT; + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EPIPE; + } + /* + * Queue probe and proc enqueuing must be seen atomically, + * including from the Xenomai side. + */ + if (list_empty(&state->outq)) { + if (file->f_flags & O_NONBLOCK) { + xnlock_put_irqrestore(&nklock, s); + return -EWOULDBLOCK; + } + + sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s, + !list_empty(&state->outq)); + + if (list_empty(&state->outq)) { + xnlock_put_irqrestore(&nklock, s); + return sigpending ? -ERESTARTSYS : 0; + } + } + + mh = list_get_entry(&state->outq, struct xnpipe_mh, link); + state->nroutq--; + + /* + * We allow more data to be appended to the current message + * bucket while its contents is being copied to the user + * buffer, therefore, we need to loop until: 1) all the data + * has been copied, 2) we consumed the user buffer space + * entirely. + */ + + inbytes = 0; + + for (;;) { + nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh); + + if (nbytes + inbytes > count) + nbytes = count - inbytes; + + if (nbytes == 0) + break; + + xnlock_put_irqrestore(&nklock, s); + + /* More data could be appended while doing this: */ + err = __copy_to_user(buf + inbytes, + xnpipe_m_data(mh) + xnpipe_m_rdoff(mh), + nbytes); + + xnlock_get_irqsave(&nklock, s); + + if (err) { + err = -EFAULT; + break; + } + + inbytes += nbytes; + xnpipe_m_rdoff(mh) += nbytes; + } + + state->ionrd -= inbytes; + ret = inbytes; + + if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh)) { + list_add(&mh->link, &state->outq); + state->nroutq++; + } else { + /* + * We always want to fire the output handler because + * whatever the error state is for userland (e.g + * -EFAULT), we did pull a message from our output + * queue. + */ + if (state->ops.output) + state->ops.output(mh, state->xstate); + xnlock_put_irqrestore(&nklock, s); + state->ops.free_obuf(mh, state->xstate); + xnlock_get_irqsave(&nklock, s); + if (state->status & XNPIPE_USER_WSYNC) { + state->status |= XNPIPE_USER_WSYNC_READY; + xnpipe_schedule_request(); + } + } + + xnlock_put_irqrestore(&nklock, s); + + return err ? : ret; +} + +static ssize_t xnpipe_write(struct file *file, + const char *buf, size_t count, loff_t *ppos) +{ + struct xnpipe_state *state = file->private_data; + struct xnpipe_mh *mh; + int pollnum, ret; + spl_t s; + + if (count == 0) + return 0; + + if (!access_rok(buf, count)) + return -EFAULT; + + xnlock_get_irqsave(&nklock, s); + +retry: + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EPIPE; + } + + pollnum = state->nrinq + state->nroutq; + xnlock_put_irqrestore(&nklock, s); + + mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate); + if (mh == (struct xnpipe_mh *)-1) + return -ENOMEM; + + if (mh == NULL) { + if (file->f_flags & O_NONBLOCK) + return -EWOULDBLOCK; + + xnlock_get_irqsave(&nklock, s); + if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s, + pollnum > state->nrinq + state->nroutq)) { + xnlock_put_irqrestore(&nklock, s); + return -ERESTARTSYS; + } + goto retry; + } + + xnpipe_m_size(mh) = count; + xnpipe_m_rdoff(mh) = 0; + + if (copy_from_user(xnpipe_m_data(mh), buf, count)) { + state->ops.free_ibuf(mh, state->xstate); + return -EFAULT; + } + + xnlock_get_irqsave(&nklock, s); + + list_add_tail(&mh->link, &state->inq); + state->nrinq++; + + /* Wake up a Xenomai sleeper if any. */ + if (xnsynch_wakeup_one_sleeper(&state->synchbase)) + xnsched_run(); + + if (state->ops.input) { + ret = state->ops.input(mh, 0, state->xstate); + if (ret) + count = (size_t)ret; + } + + if (file->f_flags & O_SYNC) { + if (!list_empty(&state->inq)) { + if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s, + list_empty(&state->inq))) + count = -ERESTARTSYS; + } + } + + xnlock_put_irqrestore(&nklock, s); + + return (ssize_t)count; +} + +static long xnpipe_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct xnpipe_state *state = file->private_data; + int ret = 0; + ssize_t n; + spl_t s; + + switch (cmd) { + case XNPIPEIOC_GET_NRDEV: + + if (put_user(XNPIPE_NDEVS, (int *)arg)) + return -EFAULT; + + break; + + case XNPIPEIOC_OFLUSH: + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EPIPE; + } + + n = xnpipe_flushq(state, outq, free_obuf, s); + state->ionrd -= n; + goto kick_wsync; + + case XNPIPEIOC_IFLUSH: + + xnlock_get_irqsave(&nklock, s); + + if ((state->status & XNPIPE_KERN_CONN) == 0) { + xnlock_put_irqrestore(&nklock, s); + return -EPIPE; + } + + n = xnpipe_flushq(state, inq, free_ibuf, s); + + kick_wsync: + + if (n > 0 && (state->status & XNPIPE_USER_WSYNC)) { + state->status |= XNPIPE_USER_WSYNC_READY; + xnpipe_schedule_request(); + } + + xnlock_put_irqrestore(&nklock, s); + ret = n; + break; + + case XNPIPEIOC_SETSIG: + + if (arg < 1 || arg >= _NSIG) + return -EINVAL; + + xnpipe_asyncsig = arg; + break; + + case FIONREAD: + + n = (state->status & XNPIPE_KERN_CONN) ? state->ionrd : 0; + + if (put_user(n, (int *)arg)) + return -EFAULT; + + break; + + case TCGETS: + /* For isatty() probing. */ + return -ENOTTY; + + default: + + return -EINVAL; + } + + return ret; +} + +#ifdef CONFIG_COMPAT +/* + * Could be replaced with compat_ptr_ioctl if support for kernels < 5.4 is + * dropped. + */ +static long xnpipe_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return xnpipe_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); +} +#else +#define xnpipe_compat_ioctl NULL +#endif + +static int xnpipe_fasync(int fd, struct file *file, int on) +{ + struct xnpipe_state *state = file->private_data; + int ret, queued; + spl_t s; + + queued = (state->asyncq != NULL); + ret = fasync_helper(fd, file, on, &state->asyncq); + + if (state->asyncq) { + if (!queued) { + xnlock_get_irqsave(&nklock, s); + list_add_tail(&state->alink, &xnpipe_asyncq); + xnlock_put_irqrestore(&nklock, s); + } + } else if (queued) { + xnlock_get_irqsave(&nklock, s); + list_del(&state->alink); + xnlock_put_irqrestore(&nklock, s); + } + + return ret; +} + +static unsigned xnpipe_poll(struct file *file, poll_table *pt) +{ + struct xnpipe_state *state = file->private_data; + unsigned r_mask = 0, w_mask = 0; + spl_t s; + + poll_wait(file, &state->readq, pt); + + xnlock_get_irqsave(&nklock, s); + + if (state->status & XNPIPE_KERN_CONN) + w_mask |= (POLLOUT | POLLWRNORM); + else + r_mask |= POLLHUP; + + if (!list_empty(&state->outq)) + r_mask |= (POLLIN | POLLRDNORM); + else + /* + * Procs which have issued a timed out poll req will + * remain linked to the sleepers queue, and will be + * silently unlinked the next time the Xenomai side + * kicks xnpipe_wakeup_proc(). + */ + xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD); + + xnlock_put_irqrestore(&nklock, s); + + return r_mask | w_mask; +} + +static struct file_operations xnpipe_fops = { + .read = xnpipe_read, + .write = xnpipe_write, + .poll = xnpipe_poll, + .unlocked_ioctl = xnpipe_ioctl, + .compat_ioctl = xnpipe_compat_ioctl, + .open = xnpipe_open, + .release = xnpipe_release, + .fasync = xnpipe_fasync +}; + +int xnpipe_mount(void) +{ + struct xnpipe_state *state; + struct device *cldev; + int i; + + for (state = &xnpipe_states[0]; + state < &xnpipe_states[XNPIPE_NDEVS]; state++) { + state->status = 0; + state->asyncq = NULL; + INIT_LIST_HEAD(&state->inq); + state->nrinq = 0; + INIT_LIST_HEAD(&state->outq); + state->nroutq = 0; + } + + xnpipe_class = class_create(THIS_MODULE, "rtpipe"); + if (IS_ERR(xnpipe_class)) { + printk(XENO_ERR "error creating rtpipe class, err=%ld\n", + PTR_ERR(xnpipe_class)); + return -EBUSY; + } + + for (i = 0; i < XNPIPE_NDEVS; i++) { + cldev = device_create(xnpipe_class, NULL, + MKDEV(XNPIPE_DEV_MAJOR, i), + NULL, "rtp%d", i); + if (IS_ERR(cldev)) { + printk(XENO_ERR + "can't add device class, major=%d, minor=%d, err=%ld\n", + XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev)); + class_destroy(xnpipe_class); + return -EBUSY; + } + } + + if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) { + printk(XENO_ERR + "unable to reserve major #%d for message pipes\n", + XNPIPE_DEV_MAJOR); + return -EBUSY; + } + + xnpipe_wakeup_virq = pipeline_create_inband_sirq(xnpipe_wakeup_proc); + if (xnpipe_wakeup_virq < 0) { + printk(XENO_ERR + "unable to reserve synthetic IRQ for message pipes\n"); + return xnpipe_wakeup_virq; + } + + return 0; +} + +void xnpipe_umount(void) +{ + int i; + + pipeline_delete_inband_sirq(xnpipe_wakeup_virq); + + unregister_chrdev(XNPIPE_DEV_MAJOR, "rtpipe"); + + for (i = 0; i < XNPIPE_NDEVS; i++) + device_destroy(xnpipe_class, MKDEV(XNPIPE_DEV_MAJOR, i)); + + class_destroy(xnpipe_class); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING new file mode 100644 index 0000000..0d72637 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING @@ -0,0 +1,281 @@ + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile new file mode 100644 index 0000000..5b4f321 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile @@ -0,0 +1,38 @@ + +ccflags-y += -I$(srctree)/kernel + +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := \ + clock.o \ + cond.o \ + corectl.o \ + event.o \ + io.o \ + memory.o \ + monitor.o \ + mqueue.o \ + mutex.o \ + nsem.o \ + process.o \ + sched.o \ + sem.o \ + signal.o \ + syscall.o \ + thread.o \ + timer.o \ + timerfd.o + +syscall_entries := $(srctree)/$(src)/gen-syscall-entries.sh + +quiet_cmd_syscall_entries = GEN $@ + cmd_syscall_entries = $(CONFIG_SHELL) '$(syscall_entries)' $(filter-out FORCE,$^) > $@ + +$(obj)/syscall_entries.h: $(syscall_entries) $(wildcard $(srctree)/$(src)/*.c) FORCE + $(call if_changed,syscall_entries) + +target += syscall_entries.h + +$(obj)/syscall.o: $(obj)/syscall_entries.h + +xenomai-$(CONFIG_XENO_ARCH_SYS3264) += compat.o syscall32.o diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c new file mode 100644 index 0000000..71d14db --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c @@ -0,0 +1,497 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/clocksource.h> +#include <linux/bitmap.h> +#include <cobalt/kernel/clock.h> +#include "internal.h" +#include "thread.h" +#include "clock.h" +#include <trace/events/cobalt-posix.h> +#include <cobalt/kernel/time.h> + +static struct xnclock *external_clocks[COBALT_MAX_EXTCLOCKS]; + +DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS); + +#define do_ext_clock(__clock_id, __handler, __ret, __args...) \ +({ \ + struct xnclock *__clock; \ + int __val = 0, __nr; \ + spl_t __s; \ + \ + if (!__COBALT_CLOCK_EXT_P(__clock_id)) \ + __val = -EINVAL; \ + else { \ + __nr = __COBALT_CLOCK_EXT_INDEX(__clock_id); \ + xnlock_get_irqsave(&nklock, __s); \ + if (!test_bit(__nr, cobalt_clock_extids)) { \ + xnlock_put_irqrestore(&nklock, __s); \ + __val = -EINVAL; \ + } else { \ + __clock = external_clocks[__nr]; \ + (__ret) = xnclock_ ## __handler(__clock, ##__args); \ + xnlock_put_irqrestore(&nklock, __s); \ + } \ + } \ + __val; \ +}) + +int __cobalt_clock_getres(clockid_t clock_id, struct timespec64 *ts) +{ + xnticks_t ns; + int ret; + + switch (clock_id) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + case CLOCK_MONOTONIC_RAW: + ns2ts(ts, 1); + break; + default: + ret = do_ext_clock(clock_id, get_resolution, ns); + if (ret) + return ret; + ns2ts(ts, ns); + } + + trace_cobalt_clock_getres(clock_id, ts); + + return 0; +} + +COBALT_SYSCALL(clock_getres, current, + (clockid_t clock_id, struct __user_old_timespec __user *u_ts)) +{ + struct timespec64 ts; + int ret; + + ret = __cobalt_clock_getres(clock_id, &ts); + if (ret) + return ret; + + if (u_ts && cobalt_put_u_timespec(u_ts, &ts)) + return -EFAULT; + + trace_cobalt_clock_getres(clock_id, &ts); + + return 0; +} + +int __cobalt_clock_getres64(clockid_t clock_id, + struct __kernel_timespec __user *u_ts) +{ + struct timespec64 ts; + int ret; + + ret = __cobalt_clock_getres(clock_id, &ts); + if (ret) + return ret; + + if (cobalt_put_timespec64(&ts, u_ts)) + return -EFAULT; + + trace_cobalt_clock_getres(clock_id, &ts); + + return 0; +} + +COBALT_SYSCALL(clock_getres64, current, + (clockid_t clock_id, struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_clock_getres64(clock_id, u_ts); +} + +int __cobalt_clock_gettime(clockid_t clock_id, struct timespec64 *ts) +{ + xnticks_t ns; + int ret; + + switch (clock_id) { + case CLOCK_REALTIME: + ns2ts(ts, xnclock_read_realtime(&nkclock)); + break; + case CLOCK_MONOTONIC: + case CLOCK_MONOTONIC_RAW: + ns2ts(ts, xnclock_read_monotonic(&nkclock)); + break; + case CLOCK_HOST_REALTIME: + if (pipeline_get_host_time(ts) != 0) + return -EINVAL; + break; + default: + ret = do_ext_clock(clock_id, read_monotonic, ns); + if (ret) + return ret; + ns2ts(ts, ns); + } + + trace_cobalt_clock_gettime(clock_id, ts); + + return 0; +} + +COBALT_SYSCALL(clock_gettime, current, + (clockid_t clock_id, struct __user_old_timespec __user *u_ts)) +{ + struct timespec64 ts; + int ret; + + ret = __cobalt_clock_gettime(clock_id, &ts); + if (ret) + return ret; + + if (cobalt_put_u_timespec(u_ts, &ts)) + return -EFAULT; + + return 0; +} + +int __cobalt_clock_gettime64(clockid_t clock_id, + struct __kernel_timespec __user *u_ts) +{ + struct timespec64 ts; + int ret; + + ret = __cobalt_clock_gettime(clock_id, &ts); + if (ret) + return ret; + + if (cobalt_put_timespec64(&ts, u_ts)) + return -EFAULT; + + return 0; +} + +COBALT_SYSCALL(clock_gettime64, current, + (clockid_t clock_id, struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_clock_gettime64(clock_id, u_ts); +} + +int __cobalt_clock_settime(clockid_t clock_id, const struct timespec64 *ts) +{ + int _ret, ret = 0; + + if ((unsigned long)ts->tv_nsec >= ONE_BILLION) + return -EINVAL; + + switch (clock_id) { + case CLOCK_REALTIME: + ret = pipeline_set_wallclock(ts2ns(ts)); + break; + default: + _ret = do_ext_clock(clock_id, set_time, ret, ts); + if (_ret || ret) + return _ret ?: ret; + } + + trace_cobalt_clock_settime(clock_id, ts); + + return ret; +} + +int __cobalt_clock_adjtime(clockid_t clock_id, struct __kernel_timex *tx) +{ + int _ret, ret = 0; + + switch (clock_id) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + case CLOCK_MONOTONIC_RAW: + case CLOCK_HOST_REALTIME: + return -EOPNOTSUPP; + default: + _ret = do_ext_clock(clock_id, adjust_time, ret, tx); + if (_ret || ret) + return _ret ?: ret; + } + + trace_cobalt_clock_adjtime(clock_id, tx); + + return 0; +} + +COBALT_SYSCALL(clock_settime, current, + (clockid_t clock_id, const struct __user_old_timespec __user *u_ts)) +{ + struct timespec64 ts; + + if (cobalt_get_u_timespec(&ts, u_ts)) + return -EFAULT; + + return __cobalt_clock_settime(clock_id, &ts); +} + +int __cobalt_clock_settime64(clockid_t clock_id, + const struct __kernel_timespec __user *u_ts) +{ + struct timespec64 ts64; + + if (cobalt_get_timespec64(&ts64, u_ts)) + return -EFAULT; + + return __cobalt_clock_settime(clock_id, &ts64); +} + +COBALT_SYSCALL(clock_settime64, current, + (clockid_t clock_id, const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_clock_settime64(clock_id, u_ts); +} + +COBALT_SYSCALL(clock_adjtime, current, + (clockid_t clock_id, struct __user_old_timex __user *u_tx)) +{ + struct __kernel_timex tx; + int ret; + + if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx))) + return -EFAULT; + + ret = __cobalt_clock_adjtime(clock_id, &tx); + if (ret) + return ret; + + return cobalt_copy_to_user(u_tx, &tx, sizeof(tx)); +} + +int __cobalt_clock_adjtime64(clockid_t clock_id, + struct __kernel_timex __user *u_tx) +{ + struct __kernel_timex tx; + int ret; + + if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx))) + return -EFAULT; + + ret = __cobalt_clock_adjtime(clock_id, &tx); + if (ret) + return ret; + + return cobalt_copy_to_user(u_tx, &tx, sizeof(tx)); +} + +COBALT_SYSCALL(clock_adjtime64, current, + (clockid_t clock_id, struct __kernel_timex __user *u_tx)) +{ + return __cobalt_clock_adjtime64(clock_id, u_tx); +} + +int __cobalt_clock_nanosleep(clockid_t clock_id, int flags, + const struct timespec64 *rqt, + struct timespec64 *rmt) +{ + struct restart_block *restart; + struct xnthread *cur; + xnsticks_t timeout, rem; + spl_t s; + + trace_cobalt_clock_nanosleep(clock_id, flags, rqt); + + if (clock_id != CLOCK_MONOTONIC && + clock_id != CLOCK_MONOTONIC_RAW && + clock_id != CLOCK_REALTIME) + return -EOPNOTSUPP; + + if (rqt->tv_sec < 0) + return -EINVAL; + + if ((unsigned long)rqt->tv_nsec >= ONE_BILLION) + return -EINVAL; + + if (flags & ~TIMER_ABSTIME) + return -EINVAL; + + cur = xnthread_current(); + + if (xnthread_test_localinfo(cur, XNSYSRST)) { + xnthread_clear_localinfo(cur, XNSYSRST); + + restart = cobalt_get_restart_block(current); + + if (restart->fn != cobalt_restart_syscall_placeholder) { + if (rmt) { + xnlock_get_irqsave(&nklock, s); + rem = xntimer_get_timeout_stopped(&cur->rtimer); + xnlock_put_irqrestore(&nklock, s); + ns2ts(rmt, rem > 1 ? rem : 0); + } + return -EINTR; + } + + timeout = restart->nanosleep.expires; + } else + timeout = ts2ns(rqt); + + xnlock_get_irqsave(&nklock, s); + + xnthread_suspend(cur, XNDELAY, timeout + 1, + clock_flag(flags, clock_id), NULL); + + if (xnthread_test_info(cur, XNBREAK)) { + if (signal_pending(current)) { + restart = cobalt_get_restart_block(current); + restart->nanosleep.expires = + (flags & TIMER_ABSTIME) ? timeout : + xntimer_get_timeout_stopped(&cur->rtimer); + xnlock_put_irqrestore(&nklock, s); + restart->fn = cobalt_restart_syscall_placeholder; + + xnthread_set_localinfo(cur, XNSYSRST); + + return -ERESTARTSYS; + } + + if (flags == 0 && rmt) { + rem = xntimer_get_timeout_stopped(&cur->rtimer); + xnlock_put_irqrestore(&nklock, s); + ns2ts(rmt, rem > 1 ? rem : 0); + } else + xnlock_put_irqrestore(&nklock, s); + + return -EINTR; + } + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +COBALT_SYSCALL(clock_nanosleep, primary, + (clockid_t clock_id, int flags, + const struct __user_old_timespec __user *u_rqt, + struct __user_old_timespec __user *u_rmt)) +{ + struct timespec64 rqt, rmt, *rmtp = NULL; + int ret; + + if (u_rmt) + rmtp = &rmt; + + if (cobalt_get_u_timespec(&rqt, u_rqt)) + return -EFAULT; + + ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp); + if (ret == -EINTR && flags == 0 && rmtp) { + if (cobalt_put_u_timespec(u_rmt, rmtp)) + return -EFAULT; + } + + return ret; +} + +int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags, + const struct __kernel_timespec __user *u_rqt, + struct __kernel_timespec __user *u_rmt) +{ + struct timespec64 rqt, rmt, *rmtp = NULL; + int ret; + + if (u_rmt) + rmtp = &rmt; + + if (cobalt_get_timespec64(&rqt, u_rqt)) + return -EFAULT; + + ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp); + if (ret == -EINTR && flags == 0 && rmtp) { + if (cobalt_put_timespec64(rmtp, u_rmt)) + return -EFAULT; + } + + return ret; +} + +COBALT_SYSCALL(clock_nanosleep64, primary, + (clockid_t clock_id, int flags, + const struct __kernel_timespec __user *u_rqt, + struct __kernel_timespec __user *u_rmt)) +{ + return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt); +} + +int cobalt_clock_register(struct xnclock *clock, const cpumask_t *affinity, + clockid_t *clk_id) +{ + int ret, nr; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + nr = find_first_zero_bit(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS); + if (nr >= COBALT_MAX_EXTCLOCKS) { + xnlock_put_irqrestore(&nklock, s); + return -EAGAIN; + } + + /* + * CAUTION: a bit raised in cobalt_clock_extids means that the + * corresponding entry in external_clocks[] is valid. The + * converse assumption is NOT true. + */ + __set_bit(nr, cobalt_clock_extids); + external_clocks[nr] = clock; + + xnlock_put_irqrestore(&nklock, s); + + ret = xnclock_register(clock, affinity); + if (ret) + return ret; + + clock->id = nr; + *clk_id = __COBALT_CLOCK_EXT(clock->id); + + trace_cobalt_clock_register(clock->name, *clk_id); + + return 0; +} +EXPORT_SYMBOL_GPL(cobalt_clock_register); + +void cobalt_clock_deregister(struct xnclock *clock) +{ + trace_cobalt_clock_deregister(clock->name, clock->id); + clear_bit(clock->id, cobalt_clock_extids); + smp_mb__after_atomic(); + external_clocks[clock->id] = NULL; + xnclock_deregister(clock); +} +EXPORT_SYMBOL_GPL(cobalt_clock_deregister); + +struct xnclock *cobalt_clock_find(clockid_t clock_id) +{ + struct xnclock *clock = ERR_PTR(-EINVAL); + spl_t s; + int nr; + + if (clock_id == CLOCK_MONOTONIC || + clock_id == CLOCK_MONOTONIC_RAW || + clock_id == CLOCK_REALTIME) + return &nkclock; + + if (__COBALT_CLOCK_EXT_P(clock_id)) { + nr = __COBALT_CLOCK_EXT_INDEX(clock_id); + xnlock_get_irqsave(&nklock, s); + if (test_bit(nr, cobalt_clock_extids)) + clock = external_clocks[nr]; + xnlock_put_irqrestore(&nklock, s); + } + + return clock; +} +EXPORT_SYMBOL_GPL(cobalt_clock_find); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h new file mode 100644 index 0000000..e183739 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h @@ -0,0 +1,174 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_CLOCK_H +#define _COBALT_POSIX_CLOCK_H + +#include <linux/types.h> +#include <linux/time.h> +#include <linux/cpumask.h> +#include <cobalt/uapi/time.h> +#include <xenomai/posix/syscall.h> + +#define ONE_BILLION 1000000000 + +struct xnclock; + +static inline void ns2ts(struct timespec64 *ts, xnticks_t nsecs) +{ + ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec); +} + +static inline void u_ns2ts(struct __user_old_timespec *ts, xnticks_t nsecs) +{ + ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec); +} + +static inline xnticks_t ts2ns(const struct timespec64 *ts) +{ + xnticks_t nsecs = ts->tv_nsec; + + if (ts->tv_sec) + nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION; + + return nsecs; +} + +static inline xnticks_t u_ts2ns(const struct __user_old_timespec *ts) +{ + xnticks_t nsecs = ts->tv_nsec; + + if (ts->tv_sec) + nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION; + + return nsecs; +} + +static inline xnticks_t tv2ns(const struct __kernel_old_timeval *tv) +{ + xnticks_t nsecs = tv->tv_usec * 1000; + + if (tv->tv_sec) + nsecs += (xnticks_t)tv->tv_sec * ONE_BILLION; + + return nsecs; +} + +static inline void ticks2tv(struct __kernel_old_timeval *tv, xnticks_t ticks) +{ + unsigned long nsecs; + + tv->tv_sec = xnclock_divrem_billion(ticks, &nsecs); + tv->tv_usec = nsecs / 1000; +} + +static inline xnticks_t clock_get_ticks(clockid_t clock_id) +{ + return clock_id == CLOCK_REALTIME ? + xnclock_read_realtime(&nkclock) : + xnclock_read_monotonic(&nkclock); +} + +static inline int clock_flag(int flag, clockid_t clock_id) +{ + if ((flag & TIMER_ABSTIME) == 0) + return XN_RELATIVE; + + if (clock_id == CLOCK_REALTIME) + return XN_REALTIME; + + return XN_ABSOLUTE; +} + +int __cobalt_clock_getres(clockid_t clock_id, + struct timespec64 *ts); + +int __cobalt_clock_getres64(clockid_t clock_id, + struct __kernel_timespec __user *u_ts); + +int __cobalt_clock_gettime(clockid_t clock_id, + struct timespec64 *ts); + +int __cobalt_clock_gettime64(clockid_t clock_id, + struct __kernel_timespec __user *u_ts); + +int __cobalt_clock_settime(clockid_t clock_id, + const struct timespec64 *ts); + +int __cobalt_clock_settime64(clockid_t clock_id, + const struct __kernel_timespec __user *u_ts); + +int __cobalt_clock_adjtime(clockid_t clock_id, + struct __kernel_timex *tx); + +int __cobalt_clock_adjtime64(clockid_t clock_id, + struct __kernel_timex __user *u_tx); + +int __cobalt_clock_nanosleep(clockid_t clock_id, int flags, + const struct timespec64 *rqt, + struct timespec64 *rmt); + +int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags, + const struct __kernel_timespec __user *u_rqt, + struct __kernel_timespec __user *u_rmt); + +COBALT_SYSCALL_DECL(clock_getres, + (clockid_t clock_id, struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(clock_getres64, + (clockid_t clock_id, struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(clock_gettime, + (clockid_t clock_id, struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(clock_gettime64, + (clockid_t clock_id, struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(clock_settime, + (clockid_t clock_id, const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(clock_settime64, + (clockid_t clock_id, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(clock_adjtime, + (clockid_t clock_id, struct __user_old_timex __user *u_tx)); + +COBALT_SYSCALL_DECL(clock_adjtime64, + (clockid_t clock_id, struct __kernel_timex __user *u_tx)); + +COBALT_SYSCALL_DECL(clock_nanosleep, + (clockid_t clock_id, int flags, + const struct __user_old_timespec __user *u_rqt, + struct __user_old_timespec __user *u_rmt)); + +COBALT_SYSCALL_DECL(clock_nanosleep64, + (clockid_t clock_id, int flags, + const struct __kernel_timespec __user *u_rqt, + struct __kernel_timespec __user *u_rmt)); + +int cobalt_clock_register(struct xnclock *clock, + const cpumask_t *affinity, + clockid_t *clk_id); + +void cobalt_clock_deregister(struct xnclock *clock); + +struct xnclock *cobalt_clock_find(clockid_t clock_id); + +extern DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS); + +#endif /* !_COBALT_POSIX_CLOCK_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c new file mode 100644 index 0000000..2ec4608 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c @@ -0,0 +1,544 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/err.h> +#include <linux/memory.h> +#include <linux/module.h> +#include <cobalt/kernel/compat.h> +#include <asm/xenomai/syscall.h> +#include <xenomai/posix/mqueue.h> + +int sys32_get_timespec(struct timespec64 *ts, + const struct old_timespec32 __user *u_cts) +{ + struct old_timespec32 cts; + + if (u_cts == NULL || !access_rok(u_cts, sizeof(*u_cts))) + return -EFAULT; + + if (__xn_get_user(cts.tv_sec, &u_cts->tv_sec) || + __xn_get_user(cts.tv_nsec, &u_cts->tv_nsec)) + return -EFAULT; + + ts->tv_sec = cts.tv_sec; + ts->tv_nsec = cts.tv_nsec; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_timespec); + +int sys32_put_timespec(struct old_timespec32 __user *u_cts, + const struct timespec64 *ts) +{ + struct old_timespec32 cts; + + if (u_cts == NULL || !access_wok(u_cts, sizeof(*u_cts))) + return -EFAULT; + + cts.tv_sec = ts->tv_sec; + cts.tv_nsec = ts->tv_nsec; + + if (__xn_put_user(cts.tv_sec, &u_cts->tv_sec) || + __xn_put_user(cts.tv_nsec, &u_cts->tv_nsec)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_timespec); + +int sys32_get_itimerspec(struct itimerspec64 *its, + const struct old_itimerspec32 __user *cits) +{ + int ret = sys32_get_timespec(&its->it_value, &cits->it_value); + + return ret ?: sys32_get_timespec(&its->it_interval, &cits->it_interval); +} +EXPORT_SYMBOL_GPL(sys32_get_itimerspec); + +int sys32_put_itimerspec(struct old_itimerspec32 __user *cits, + const struct itimerspec64 *its) +{ + int ret = sys32_put_timespec(&cits->it_value, &its->it_value); + + return ret ?: sys32_put_timespec(&cits->it_interval, &its->it_interval); +} +EXPORT_SYMBOL_GPL(sys32_put_itimerspec); + +int sys32_get_timeval(struct __kernel_old_timeval *tv, + const struct old_timeval32 __user *ctv) +{ + return (ctv == NULL || + !access_rok(ctv, sizeof(*ctv)) || + __xn_get_user(tv->tv_sec, &ctv->tv_sec) || + __xn_get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; +} +EXPORT_SYMBOL_GPL(sys32_get_timeval); + +int sys32_put_timeval(struct old_timeval32 __user *ctv, + const struct __kernel_old_timeval *tv) +{ + return (ctv == NULL || + !access_wok(ctv, sizeof(*ctv)) || + __xn_put_user(tv->tv_sec, &ctv->tv_sec) || + __xn_put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; +} +EXPORT_SYMBOL_GPL(sys32_put_timeval); + +int sys32_get_timex(struct __kernel_timex *tx, + const struct old_timex32 __user *ctx) +{ + struct __kernel_old_timeval time; + int ret; + + memset(tx, 0, sizeof(*tx)); + + ret = sys32_get_timeval(&time, &ctx->time); + if (ret) + return ret; + + tx->time.tv_sec = time.tv_sec; + tx->time.tv_usec = time.tv_usec; + + if (!access_rok(ctx, sizeof(*ctx)) || + __xn_get_user(tx->modes, &ctx->modes) || + __xn_get_user(tx->offset, &ctx->offset) || + __xn_get_user(tx->freq, &ctx->freq) || + __xn_get_user(tx->maxerror, &ctx->maxerror) || + __xn_get_user(tx->esterror, &ctx->esterror) || + __xn_get_user(tx->status, &ctx->status) || + __xn_get_user(tx->constant, &ctx->constant) || + __xn_get_user(tx->precision, &ctx->precision) || + __xn_get_user(tx->tolerance, &ctx->tolerance) || + __xn_get_user(tx->tick, &ctx->tick) || + __xn_get_user(tx->ppsfreq, &ctx->ppsfreq) || + __xn_get_user(tx->jitter, &ctx->jitter) || + __xn_get_user(tx->shift, &ctx->shift) || + __xn_get_user(tx->stabil, &ctx->stabil) || + __xn_get_user(tx->jitcnt, &ctx->jitcnt) || + __xn_get_user(tx->calcnt, &ctx->calcnt) || + __xn_get_user(tx->errcnt, &ctx->errcnt) || + __xn_get_user(tx->stbcnt, &ctx->stbcnt)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_timex); + +int sys32_put_timex(struct old_timex32 __user *ctx, + const struct __kernel_timex *tx) +{ + struct __kernel_old_timeval time; + int ret; + + time.tv_sec = tx->time.tv_sec; + time.tv_usec = tx->time.tv_usec; + + ret = sys32_put_timeval(&ctx->time, &time); + if (ret) + return ret; + + if (!access_wok(ctx, sizeof(*ctx)) || + __xn_put_user(tx->modes, &ctx->modes) || + __xn_put_user(tx->offset, &ctx->offset) || + __xn_put_user(tx->freq, &ctx->freq) || + __xn_put_user(tx->maxerror, &ctx->maxerror) || + __xn_put_user(tx->esterror, &ctx->esterror) || + __xn_put_user(tx->status, &ctx->status) || + __xn_put_user(tx->constant, &ctx->constant) || + __xn_put_user(tx->precision, &ctx->precision) || + __xn_put_user(tx->tolerance, &ctx->tolerance) || + __xn_put_user(tx->tick, &ctx->tick) || + __xn_put_user(tx->ppsfreq, &ctx->ppsfreq) || + __xn_put_user(tx->jitter, &ctx->jitter) || + __xn_put_user(tx->shift, &ctx->shift) || + __xn_put_user(tx->stabil, &ctx->stabil) || + __xn_put_user(tx->jitcnt, &ctx->jitcnt) || + __xn_put_user(tx->calcnt, &ctx->calcnt) || + __xn_put_user(tx->errcnt, &ctx->errcnt) || + __xn_put_user(tx->stbcnt, &ctx->stbcnt)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_timex); + +int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds, + size_t cfdsize) +{ + int rdpos, wrpos, rdlim = cfdsize / sizeof(compat_ulong_t); + + if (cfds == NULL || !access_rok(cfds, cfdsize)) + return -EFAULT; + + for (rdpos = 0, wrpos = 0; rdpos < rdlim; rdpos++, wrpos++) + if (__xn_get_user(fds->fds_bits[wrpos], cfds->fds_bits + rdpos)) + return -EFAULT; + + return 0; +} + +int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds, + size_t fdsize) +{ + int rdpos, wrpos, wrlim = fdsize / sizeof(long); + + if (cfds == NULL || !access_wok(cfds, wrlim * sizeof(compat_ulong_t))) + return -EFAULT; + + for (rdpos = 0, wrpos = 0; wrpos < wrlim; rdpos++, wrpos++) + if (__xn_put_user(fds->fds_bits[rdpos], cfds->fds_bits + wrpos)) + return -EFAULT; + + return 0; +} + +int sys32_get_param_ex(int policy, + struct sched_param_ex *p, + const struct compat_sched_param_ex __user *u_cp) +{ + struct compat_sched_param_ex cpex; + + if (u_cp == NULL || cobalt_copy_from_user(&cpex, u_cp, sizeof(cpex))) + return -EFAULT; + + p->sched_priority = cpex.sched_priority; + + switch (policy) { + case SCHED_SPORADIC: + p->sched_ss_low_priority = cpex.sched_ss_low_priority; + p->sched_ss_max_repl = cpex.sched_ss_max_repl; + p->sched_ss_repl_period.tv_sec = cpex.sched_ss_repl_period.tv_sec; + p->sched_ss_repl_period.tv_nsec = cpex.sched_ss_repl_period.tv_nsec; + p->sched_ss_init_budget.tv_sec = cpex.sched_ss_init_budget.tv_sec; + p->sched_ss_init_budget.tv_nsec = cpex.sched_ss_init_budget.tv_nsec; + break; + case SCHED_RR: + p->sched_rr_quantum.tv_sec = cpex.sched_rr_quantum.tv_sec; + p->sched_rr_quantum.tv_nsec = cpex.sched_rr_quantum.tv_nsec; + break; + case SCHED_TP: + p->sched_tp_partition = cpex.sched_tp_partition; + break; + case SCHED_QUOTA: + p->sched_quota_group = cpex.sched_quota_group; + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_param_ex); + +int sys32_put_param_ex(int policy, + struct compat_sched_param_ex __user *u_cp, + const struct sched_param_ex *p) +{ + struct compat_sched_param_ex cpex; + + if (u_cp == NULL) + return -EFAULT; + + cpex.sched_priority = p->sched_priority; + + switch (policy) { + case SCHED_SPORADIC: + cpex.sched_ss_low_priority = p->sched_ss_low_priority; + cpex.sched_ss_max_repl = p->sched_ss_max_repl; + cpex.sched_ss_repl_period.tv_sec = p->sched_ss_repl_period.tv_sec; + cpex.sched_ss_repl_period.tv_nsec = p->sched_ss_repl_period.tv_nsec; + cpex.sched_ss_init_budget.tv_sec = p->sched_ss_init_budget.tv_sec; + cpex.sched_ss_init_budget.tv_nsec = p->sched_ss_init_budget.tv_nsec; + break; + case SCHED_RR: + cpex.sched_rr_quantum.tv_sec = p->sched_rr_quantum.tv_sec; + cpex.sched_rr_quantum.tv_nsec = p->sched_rr_quantum.tv_nsec; + break; + case SCHED_TP: + cpex.sched_tp_partition = p->sched_tp_partition; + break; + case SCHED_QUOTA: + cpex.sched_quota_group = p->sched_quota_group; + break; + } + + return cobalt_copy_to_user(u_cp, &cpex, sizeof(cpex)); +} +EXPORT_SYMBOL_GPL(sys32_put_param_ex); + +int sys32_get_mqattr(struct mq_attr *ap, + const struct compat_mq_attr __user *u_cap) +{ + struct compat_mq_attr cattr; + + if (u_cap == NULL || + cobalt_copy_from_user(&cattr, u_cap, sizeof(cattr))) + return -EFAULT; + + ap->mq_flags = cattr.mq_flags; + ap->mq_maxmsg = cattr.mq_maxmsg; + ap->mq_msgsize = cattr.mq_msgsize; + ap->mq_curmsgs = cattr.mq_curmsgs; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_mqattr); + +int sys32_put_mqattr(struct compat_mq_attr __user *u_cap, + const struct mq_attr *ap) +{ + struct compat_mq_attr cattr; + + cattr.mq_flags = ap->mq_flags; + cattr.mq_maxmsg = ap->mq_maxmsg; + cattr.mq_msgsize = ap->mq_msgsize; + cattr.mq_curmsgs = ap->mq_curmsgs; + + return u_cap == NULL ? -EFAULT : + cobalt_copy_to_user(u_cap, &cattr, sizeof(cattr)); +} +EXPORT_SYMBOL_GPL(sys32_put_mqattr); + +int sys32_get_sigevent(struct sigevent *ev, + const struct compat_sigevent *__user u_cev) +{ + struct compat_sigevent cev; + compat_int_t *cp; + int ret, *p; + + if (u_cev == NULL) + return -EFAULT; + + ret = cobalt_copy_from_user(&cev, u_cev, sizeof(cev)); + if (ret) + return ret; + + memset(ev, 0, sizeof(*ev)); + ev->sigev_value.sival_ptr = compat_ptr(cev.sigev_value.sival_ptr); + ev->sigev_signo = cev.sigev_signo; + ev->sigev_notify = cev.sigev_notify; + /* + * Extensions may define extra fields we don't know about in + * the padding area, so we have to load it entirely. + */ + p = ev->_sigev_un._pad; + cp = cev._sigev_un._pad; + while (p < &ev->_sigev_un._pad[ARRAY_SIZE(ev->_sigev_un._pad)] && + cp < &cev._sigev_un._pad[ARRAY_SIZE(cev._sigev_un._pad)]) + *p++ = *cp++; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_sigevent); + +int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset) +{ +#ifdef __BIG_ENDIAN + compat_sigset_t v; + + if (cobalt_copy_from_user(&v, u_cset, sizeof(compat_sigset_t))) + return -EFAULT; + switch (_NSIG_WORDS) { + case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 ); + case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 ); + case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 ); + case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 ); + } +#else + if (cobalt_copy_from_user(set, u_cset, sizeof(compat_sigset_t))) + return -EFAULT; +#endif + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_sigset); + +int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set) +{ +#ifdef __BIG_ENDIAN + compat_sigset_t v; + switch (_NSIG_WORDS) { + case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; + case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; + case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; + case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; + } + return cobalt_copy_to_user(u_cset, &v, sizeof(*u_cset)) ? -EFAULT : 0; +#else + return cobalt_copy_to_user(u_cset, set, sizeof(*u_cset)) ? -EFAULT : 0; +#endif +} +EXPORT_SYMBOL_GPL(sys32_put_sigset); + +int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval) +{ + union compat_sigval cval; + int ret; + + if (u_cval == NULL) + return -EFAULT; + + ret = cobalt_copy_from_user(&cval, u_cval, sizeof(cval)); + if (ret) + return ret; + + val->sival_ptr = compat_ptr(cval.sival_ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_sigval); + +int sys32_put_siginfo(void __user *u_si, const struct siginfo *si, + int overrun) +{ + struct compat_siginfo __user *u_p = u_si; + int ret; + + if (u_p == NULL) + return -EFAULT; + + ret = __xn_put_user(si->si_signo, &u_p->si_signo); + ret |= __xn_put_user(si->si_errno, &u_p->si_errno); + ret |= __xn_put_user(si->si_code, &u_p->si_code); + + /* + * Copy the generic/standard siginfo bits to userland. + */ + switch (si->si_code) { + case SI_TIMER: + ret |= __xn_put_user(si->si_tid, &u_p->si_tid); + ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr); + ret |= __xn_put_user(overrun, &u_p->si_overrun); + break; + case SI_QUEUE: + case SI_MESGQ: + ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr); + fallthrough; + case SI_USER: + ret |= __xn_put_user(si->si_pid, &u_p->si_pid); + ret |= __xn_put_user(si->si_uid, &u_p->si_uid); + } + + return ret; +} +EXPORT_SYMBOL_GPL(sys32_put_siginfo); + +int sys32_get_msghdr(struct user_msghdr *msg, + const struct compat_msghdr __user *u_cmsg) +{ + compat_uptr_t tmp1, tmp2, tmp3; + + if (u_cmsg == NULL || + !access_rok(u_cmsg, sizeof(*u_cmsg)) || + __xn_get_user(tmp1, &u_cmsg->msg_name) || + __xn_get_user(msg->msg_namelen, &u_cmsg->msg_namelen) || + __xn_get_user(tmp2, &u_cmsg->msg_iov) || + __xn_get_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) || + __xn_get_user(tmp3, &u_cmsg->msg_control) || + __xn_get_user(msg->msg_controllen, &u_cmsg->msg_controllen) || + __xn_get_user(msg->msg_flags, &u_cmsg->msg_flags)) + return -EFAULT; + + if (msg->msg_namelen > sizeof(struct sockaddr_storage)) + msg->msg_namelen = sizeof(struct sockaddr_storage); + + msg->msg_name = compat_ptr(tmp1); + msg->msg_iov = compat_ptr(tmp2); + msg->msg_control = compat_ptr(tmp3); + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_msghdr); + +int sys32_get_mmsghdr(struct mmsghdr *mmsg, + const struct compat_mmsghdr __user *u_cmmsg) +{ + if (u_cmmsg == NULL || + !access_rok(u_cmmsg, sizeof(*u_cmmsg)) || + __xn_get_user(mmsg->msg_len, &u_cmmsg->msg_len)) + return -EFAULT; + + return sys32_get_msghdr(&mmsg->msg_hdr, &u_cmmsg->msg_hdr); +} +EXPORT_SYMBOL_GPL(sys32_get_mmsghdr); + +int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg, + const struct user_msghdr *msg) +{ + if (u_cmsg == NULL || + !access_wok(u_cmsg, sizeof(*u_cmsg)) || + __xn_put_user(ptr_to_compat(msg->msg_name), &u_cmsg->msg_name) || + __xn_put_user(msg->msg_namelen, &u_cmsg->msg_namelen) || + __xn_put_user(ptr_to_compat(msg->msg_iov), &u_cmsg->msg_iov) || + __xn_put_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) || + __xn_put_user(ptr_to_compat(msg->msg_control), &u_cmsg->msg_control) || + __xn_put_user(msg->msg_controllen, &u_cmsg->msg_controllen) || + __xn_put_user(msg->msg_flags, &u_cmsg->msg_flags)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_msghdr); + +int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg, + const struct mmsghdr *mmsg) +{ + if (u_cmmsg == NULL || + !access_wok(u_cmmsg, sizeof(*u_cmmsg)) || + __xn_put_user(mmsg->msg_len, &u_cmmsg->msg_len)) + return -EFAULT; + + return sys32_put_msghdr(&u_cmmsg->msg_hdr, &mmsg->msg_hdr); +} +EXPORT_SYMBOL_GPL(sys32_put_mmsghdr); + +int sys32_get_iovec(struct iovec *iov, + const struct compat_iovec __user *u_ciov, + int ciovlen) +{ + const struct compat_iovec __user *p; + struct compat_iovec ciov; + int ret, n; + + for (n = 0, p = u_ciov; n < ciovlen; n++, p++) { + ret = cobalt_copy_from_user(&ciov, p, sizeof(ciov)); + if (ret) + return ret; + iov[n].iov_base = compat_ptr(ciov.iov_base); + iov[n].iov_len = ciov.iov_len; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_iovec); + +int sys32_put_iovec(struct compat_iovec __user *u_ciov, + const struct iovec *iov, + int iovlen) +{ + struct compat_iovec __user *p; + struct compat_iovec ciov; + int ret, n; + + for (n = 0, p = u_ciov; n < iovlen; n++, p++) { + ciov.iov_base = ptr_to_compat(iov[n].iov_base); + ciov.iov_len = iov[n].iov_len; + ret = cobalt_copy_to_user(p, &ciov, sizeof(*p)); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_iovec); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c new file mode 100644 index 0000000..bb18fe3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c @@ -0,0 +1,424 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "internal.h" +#include "thread.h" +#include "mutex.h" +#include "cond.h" +#include "clock.h" +#include <trace/events/cobalt-posix.h> + +static inline int +pthread_cond_init(struct cobalt_cond_shadow *cnd, const struct cobalt_condattr *attr) +{ + int synch_flags = XNSYNCH_PRIO, ret; + struct cobalt_cond *cond, *old_cond; + struct cobalt_cond_state *state; + struct cobalt_ppd *sys_ppd; + struct list_head *condq; + spl_t s; + + cond = xnmalloc(sizeof(*cond)); + if (cond == NULL) + return -ENOMEM; + + sys_ppd = cobalt_ppd_get(attr->pshared); + state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state)); + if (state == NULL) { + ret = -EAGAIN; + goto fail_umm; + } + cond->state = state; + state->pending_signals = 0; + state->mutex_state_offset = ~0U; + + xnlock_get_irqsave(&nklock, s); + + condq = &cobalt_current_resources(attr->pshared)->condq; + if (cnd->magic == COBALT_COND_MAGIC && !list_empty(condq)) { + old_cond = xnregistry_lookup(cnd->handle, NULL); + if (cobalt_obj_active(old_cond, COBALT_COND_MAGIC, + typeof(*old_cond))) { + ret = -EBUSY; + goto fail_register; + } + } + + ret = xnregistry_enter_anon(cond, &cond->resnode.handle); + if (ret < 0) + goto fail_register; + if (attr->pshared) + cond->resnode.handle |= XNSYNCH_PSHARED; + cond->magic = COBALT_COND_MAGIC; + xnsynch_init(&cond->synchbase, synch_flags, NULL); + cond->attr = *attr; + cond->mutex = NULL; + cobalt_add_resource(&cond->resnode, cond, attr->pshared); + + cnd->handle = cond->resnode.handle; + cnd->state_offset = cobalt_umm_offset(&sys_ppd->umm, state); + cnd->magic = COBALT_COND_MAGIC; + + xnlock_put_irqrestore(&nklock, s); + + return 0; +fail_register: + xnlock_put_irqrestore(&nklock, s); + cobalt_umm_free(&sys_ppd->umm, state); +fail_umm: + xnfree(cond); + + return ret; +} + +static inline int pthread_cond_destroy(struct cobalt_cond_shadow *cnd) +{ + struct cobalt_cond *cond; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + cond = xnregistry_lookup(cnd->handle, NULL); + if (cond == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + if (!cobalt_obj_active(cnd, COBALT_COND_MAGIC, struct cobalt_cond_shadow) + || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + if (cond->resnode.scope != + cobalt_current_resources(cond->attr.pshared)) { + xnlock_put_irqrestore(&nklock, s); + return -EPERM; + } + + if (xnsynch_pended_p(&cond->synchbase) || cond->mutex) { + xnlock_put_irqrestore(&nklock, s); + return -EBUSY; + } + + cobalt_cond_reclaim(&cond->resnode, s); /* drops lock */ + + cobalt_mark_deleted(cnd); + + return 0; +} + +static inline int cobalt_cond_timedwait_prologue(struct xnthread *cur, + struct cobalt_cond *cond, + struct cobalt_mutex *mutex, + xnticks_t abs_to) +{ + int err, ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + /* If another thread waiting for cond does not use the same mutex */ + if (!cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond) + || (cond->mutex && cond->mutex != mutex)) { + err = -EINVAL; + goto unlock_and_return; + } + + if (cond->resnode.scope != + cobalt_current_resources(cond->attr.pshared)) { + err = -EPERM; + goto unlock_and_return; + } + + if (mutex->attr.pshared != cond->attr.pshared) { + err = -EINVAL; + goto unlock_and_return; + } + + /* Unlock mutex. */ + err = cobalt_mutex_release(cur, mutex); + if (err < 0) + goto unlock_and_return; + + /* err == 1 means a reschedule is needed, but do not + reschedule here, releasing the mutex and suspension must be + done atomically in pthread_cond_*wait. */ + + /* Bind mutex to cond. */ + if (cond->mutex == NULL) { + cond->mutex = mutex; + list_add_tail(&cond->mutex_link, &mutex->conds); + } + + /* Wait for another thread to signal the condition. */ + if (abs_to != XN_INFINITE) + ret = xnsynch_sleep_on(&cond->synchbase, abs_to, + clock_flag(TIMER_ABSTIME, cond->attr.clock)); + else + ret = xnsynch_sleep_on(&cond->synchbase, XN_INFINITE, XN_RELATIVE); + + /* There are three possible wakeup conditions : + - cond_signal / cond_broadcast, no status bit is set, and the function + should return 0 ; + - timeout, the status XNTIMEO is set, and the function should return + ETIMEDOUT ; + - pthread_kill, the status bit XNBREAK is set, but ignored, the + function simply returns EINTR (used only by the user-space + interface, replaced by 0 anywhere else), causing a wakeup, spurious + or not whether pthread_cond_signal was called between pthread_kill + and the moment when xnsynch_sleep_on returned ; + */ + + err = 0; + + if (ret & XNBREAK) + err = -EINTR; + else if (ret & XNTIMEO) + err = -ETIMEDOUT; + +unlock_and_return: + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +static inline int cobalt_cond_timedwait_epilogue(struct xnthread *cur, + struct cobalt_cond *cond, + struct cobalt_mutex *mutex) +{ + int err; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + err = __cobalt_mutex_acquire_unchecked(cur, mutex, NULL); + if (err == -EINTR) + goto unlock_and_return; + + /* + * Unbind mutex and cond, if no other thread is waiting, if + * the job was not already done. + */ + if (!xnsynch_pended_p(&cond->synchbase) && cond->mutex == mutex) { + cond->mutex = NULL; + list_del(&cond->mutex_link); + } + +unlock_and_return: + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +COBALT_SYSCALL(cond_init, current, + (struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_condattr __user *u_attr)) +{ + struct cobalt_cond_shadow cnd; + struct cobalt_condattr attr; + int err; + + if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd))) + return -EFAULT; + + if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr))) + return -EFAULT; + + trace_cobalt_cond_init(u_cnd, &attr); + + err = pthread_cond_init(&cnd, &attr); + if (err < 0) + return err; + + return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd)); +} + +COBALT_SYSCALL(cond_destroy, current, + (struct cobalt_cond_shadow __user *u_cnd)) +{ + struct cobalt_cond_shadow cnd; + int err; + + if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd))) + return -EFAULT; + + trace_cobalt_cond_destroy(u_cnd); + + err = pthread_cond_destroy(&cnd); + if (err < 0) + return err; + + return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd)); +} + +struct us_cond_data { + int err; +}; + +static inline int cond_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts); +} + +int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct xnthread *cur = xnthread_current(); + struct cobalt_cond *cond; + struct cobalt_mutex *mx; + struct us_cond_data d; + struct timespec64 ts; + xnhandle_t handle; + int err, perr = 0; + __u32 offset; + + handle = cobalt_get_handle_from_user(&u_cnd->handle); + cond = xnregistry_lookup(handle, NULL); + + handle = cobalt_get_handle_from_user(&u_mx->handle); + mx = xnregistry_lookup(handle, NULL); + + if (cond->mutex == NULL) { + __xn_get_user(offset, &u_mx->state_offset); + cond->state->mutex_state_offset = offset; + } + + if (fetch_timeout) { + err = fetch_timeout(&ts, u_ts); + if (err == 0) { + trace_cobalt_cond_timedwait(u_cnd, u_mx, &ts); + err = cobalt_cond_timedwait_prologue(cur, cond, mx, + ts2ns(&ts) + 1); + } + } else { + trace_cobalt_cond_wait(u_cnd, u_mx); + err = cobalt_cond_timedwait_prologue(cur, cond, mx, XN_INFINITE); + } + + switch(err) { + case 0: + case -ETIMEDOUT: + perr = d.err = err; + err = cobalt_cond_timedwait_epilogue(cur, cond, mx); + break; + + case -EINTR: + perr = err; + d.err = 0; /* epilogue should return 0. */ + break; + + default: + /* Please gcc and handle the case which will never + happen */ + d.err = EINVAL; + } + + if (cond->mutex == NULL) + cond->state->mutex_state_offset = ~0U; + + if (err == -EINTR) + __xn_put_user(d.err, u_err); + + return err == 0 ? perr : err; +} + +/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */ +COBALT_SYSCALL(cond_wait_prologue, nonrestartable, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + unsigned int timed, + struct __user_old_timespec __user *u_ts)) +{ + return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts, + timed ? cond_fetch_timeout : NULL); +} + +COBALT_SYSCALL(cond_wait_epilogue, primary, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx)) +{ + struct xnthread *cur = xnthread_current(); + struct cobalt_cond *cond; + struct cobalt_mutex *mx; + xnhandle_t handle; + int err; + + handle = cobalt_get_handle_from_user(&u_cnd->handle); + cond = xnregistry_lookup(handle, NULL); + + handle = cobalt_get_handle_from_user(&u_mx->handle); + mx = xnregistry_lookup(handle, NULL); + err = cobalt_cond_timedwait_epilogue(cur, cond, mx); + + if (cond->mutex == NULL) + cond->state->mutex_state_offset = ~0U; + + return err; +} + +int cobalt_cond_deferred_signals(struct cobalt_cond *cond) +{ + struct cobalt_cond_state *state; + __u32 pending_signals; + int need_resched; + + state = cond->state; + pending_signals = state->pending_signals; + + switch(pending_signals) { + default: + state->pending_signals = 0; + need_resched = xnsynch_wakeup_many_sleepers(&cond->synchbase, + pending_signals); + break; + + case ~0U: + need_resched = + xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED; + state->pending_signals = 0; + break; + + case 0: + need_resched = 0; + break; + } + + return need_resched; +} + +void cobalt_cond_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_cond *cond; + + cond = container_of(node, struct cobalt_cond, resnode); + xnregistry_remove(node->handle); + cobalt_del_resource(node); + xnsynch_destroy(&cond->synchbase); + cobalt_mark_deleted(cond); + xnlock_put_irqrestore(&nklock, s); + + cobalt_umm_free(&cobalt_ppd_get(cond->attr.pshared)->umm, + cond->state); + xnfree(cond); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h new file mode 100644 index 0000000..7bec2a6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h @@ -0,0 +1,71 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_COND_H +#define _COBALT_POSIX_COND_H + +#include <linux/types.h> +#include <linux/time.h> +#include <linux/list.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/uapi/thread.h> +#include <cobalt/uapi/cond.h> +#include <xenomai/posix/syscall.h> +#include <xenomai/posix/process.h> + +struct cobalt_mutex; + +struct cobalt_cond { + unsigned int magic; + struct xnsynch synchbase; + struct list_head mutex_link; + struct cobalt_cond_state *state; + struct cobalt_condattr attr; + struct cobalt_mutex *mutex; + struct cobalt_resnode resnode; +}; + +int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); +COBALT_SYSCALL_DECL(cond_init, + (struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_condattr __user *u_attr)); + +COBALT_SYSCALL_DECL(cond_destroy, + (struct cobalt_cond_shadow __user *u_cnd)); + +COBALT_SYSCALL_DECL(cond_wait_prologue, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + unsigned int timed, + struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(cond_wait_epilogue, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx)); + +int cobalt_cond_deferred_signals(struct cobalt_cond *cond); + +void cobalt_cond_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_COND_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c new file mode 100644 index 0000000..fd012d0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kconfig.h> +#include <linux/atomic.h> +#include <linux/printk.h> +#include <cobalt/kernel/init.h> +#include <cobalt/kernel/thread.h> +#include <xenomai/version.h> +#include <pipeline/tick.h> +#include <asm/xenomai/syscall.h> +#include "corectl.h" + +static BLOCKING_NOTIFIER_HEAD(config_notifier_list); + +static int do_conf_option(int option, void __user *u_buf, size_t u_bufsz) +{ + struct cobalt_config_vector vec; + int ret, val = 0; + + if (option <= _CC_COBALT_GET_CORE_STATUS && u_bufsz < sizeof(val)) + return -EINVAL; + + switch (option) { + case _CC_COBALT_GET_VERSION: + val = XENO_VERSION_CODE; + break; + case _CC_COBALT_GET_NR_PIPES: +#ifdef CONFIG_XENO_OPT_PIPE + val = CONFIG_XENO_OPT_PIPE_NRDEV; +#endif + break; + case _CC_COBALT_GET_NR_TIMERS: + val = CONFIG_XENO_OPT_NRTIMERS; + break; + case _CC_COBALT_GET_POLICIES: + val = _CC_COBALT_SCHED_FIFO|_CC_COBALT_SCHED_RR; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK)) + val |= _CC_COBALT_SCHED_WEAK; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_SPORADIC)) + val |= _CC_COBALT_SCHED_SPORADIC; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_QUOTA)) + val |= _CC_COBALT_SCHED_QUOTA; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_TP)) + val |= _CC_COBALT_SCHED_TP; + break; + case _CC_COBALT_GET_DEBUG: + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_COBALT)) + val |= _CC_COBALT_DEBUG_ASSERT; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_CONTEXT)) + val |= _CC_COBALT_DEBUG_CONTEXT; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LOCKING)) + val |= _CC_COBALT_DEBUG_LOCKING; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_USER)) + val |= _CC_COBALT_DEBUG_USER; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED)) + val |= _CC_COBALT_DEBUG_MUTEX_RELAXED; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) + val |= _CC_COBALT_DEBUG_MUTEX_SLEEP; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY)) + val |= _CC_COBALT_DEBUG_LEGACY; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_TRACE_RELAX)) + val |= _CC_COBALT_DEBUG_TRACE_RELAX; + if (IS_ENABLED(CONFIG_XENO_DRIVERS_RTNET_CHECKED)) + val |= _CC_COBALT_DEBUG_NET; + break; + case _CC_COBALT_GET_WATCHDOG: +#ifdef CONFIG_XENO_OPT_WATCHDOG + val = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT; +#endif + break; + case _CC_COBALT_GET_CORE_STATUS: + val = realtime_core_state(); + break; + default: + if (is_primary_domain()) + /* Switch to secondary mode first. */ + return -ENOSYS; + vec.u_buf = u_buf; + vec.u_bufsz = u_bufsz; + ret = blocking_notifier_call_chain(&config_notifier_list, + option, &vec); + if (ret == NOTIFY_DONE) + return -EINVAL; /* Nobody cared. */ + return notifier_to_errno(ret); + } + + ret = cobalt_copy_to_user(u_buf, &val, sizeof(val)); + + return ret ? -EFAULT : 0; +} + +static int stop_services(const void __user *u_buf, size_t u_bufsz) +{ + const u32 final_grace_period = 3; /* seconds */ + enum cobalt_run_states state; + __u32 grace_period; + int ret; + + /* + * XXX: we don't have any syscall for unbinding a thread from + * the Cobalt core, so we deny real-time threads from stopping + * Cobalt services. i.e. _CC_COBALT_STOP_CORE must be issued + * from a plain regular linux thread. + */ + if (xnthread_current()) + return -EPERM; + + if (u_bufsz != sizeof(__u32)) + return -EINVAL; + + ret = cobalt_copy_from_user(&grace_period, + u_buf, sizeof(grace_period)); + if (ret) + return ret; + + state = atomic_cmpxchg(&cobalt_runstate, + COBALT_STATE_RUNNING, + COBALT_STATE_TEARDOWN); + switch (state) { + case COBALT_STATE_STOPPED: + break; + case COBALT_STATE_RUNNING: + /* Kill user threads. */ + ret = xnthread_killall(grace_period, XNUSER); + if (ret) { + set_realtime_core_state(state); + return ret; + } + cobalt_call_state_chain(COBALT_STATE_TEARDOWN); + /* Kill lingering RTDM tasks. */ + ret = xnthread_killall(final_grace_period, 0); + if (ret == -EAGAIN) + printk(XENO_WARNING "some RTDM tasks won't stop"); + pipeline_uninstall_tick_proxy(); + set_realtime_core_state(COBALT_STATE_STOPPED); + printk(XENO_INFO "services stopped\n"); + break; + default: + ret = -EINPROGRESS; + } + + return ret; +} + +static int start_services(void) +{ + enum cobalt_run_states state; + int ret = 0; + + state = atomic_cmpxchg(&cobalt_runstate, + COBALT_STATE_STOPPED, + COBALT_STATE_WARMUP); + switch (state) { + case COBALT_STATE_RUNNING: + break; + case COBALT_STATE_STOPPED: + pipeline_install_tick_proxy(); + cobalt_call_state_chain(COBALT_STATE_WARMUP); + set_realtime_core_state(COBALT_STATE_RUNNING); + printk(XENO_INFO "services started\n"); + break; + default: + ret = -EINPROGRESS; + } + + return ret; +} + +COBALT_SYSCALL(corectl, probing, + (int request, void __user *u_buf, size_t u_bufsz)) +{ + int ret; + + switch (request) { + case _CC_COBALT_STOP_CORE: + ret = stop_services(u_buf, u_bufsz); + break; + case _CC_COBALT_START_CORE: + ret = start_services(); + break; + default: + ret = do_conf_option(request, u_buf, u_bufsz); + } + + return ret; +} + +void cobalt_add_config_chain(struct notifier_block *nb) +{ + blocking_notifier_chain_register(&config_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cobalt_add_config_chain); + +void cobalt_remove_config_chain(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&config_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cobalt_remove_config_chain); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h new file mode 100644 index 0000000..b9bcf3b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_CORECTL_H +#define _COBALT_POSIX_CORECTL_H + +#include <linux/types.h> +#include <linux/notifier.h> +#include <xenomai/posix/syscall.h> +#include <cobalt/uapi/corectl.h> + +struct cobalt_config_vector { + void __user *u_buf; + size_t u_bufsz; +}; + +COBALT_SYSCALL_DECL(corectl, + (int request, void __user *u_buf, size_t u_bufsz)); + +void cobalt_add_config_chain(struct notifier_block *nb); + +void cobalt_remove_config_chain(struct notifier_block *nb); + +#endif /* !_COBALT_POSIX_CORECTL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c new file mode 100644 index 0000000..052c686 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c @@ -0,0 +1,415 @@ +/* + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include "internal.h" +#include "thread.h" +#include "clock.h" +#include "event.h" +#include <trace/events/cobalt-posix.h> +#include <cobalt/kernel/time.h> + +/* + * Cobalt event notification services + * + * An event flag group is a synchronization object represented by a + * regular native integer; every available bit in such word can be + * used to map a user-defined event flag. When a flag is set, the + * associated event is said to have occurred. + * + * Xenomai threads and interrupt handlers can use event flags to + * signal the occurrence of events to other threads; those threads can + * either wait for the events to occur in a conjunctive manner (all + * awaited events must have occurred to wake up), or in a disjunctive + * way (at least one of the awaited events must have occurred to wake + * up). + * + * We expose this non-POSIX feature through the internal API, as a + * fast IPC mechanism available to the Copperplate interface. + */ + +struct event_wait_context { + struct xnthread_wait_context wc; + unsigned int value; + int mode; +}; + +COBALT_SYSCALL(event_init, current, + (struct cobalt_event_shadow __user *u_event, + unsigned int value, int flags)) +{ + struct cobalt_event_shadow shadow; + struct cobalt_event_state *state; + int pshared, synflags, ret; + struct cobalt_event *event; + struct cobalt_umm *umm; + unsigned long stateoff; + spl_t s; + + trace_cobalt_event_init(u_event, value, flags); + + event = xnmalloc(sizeof(*event)); + if (event == NULL) + return -ENOMEM; + + pshared = (flags & COBALT_EVENT_SHARED) != 0; + umm = &cobalt_ppd_get(pshared)->umm; + state = cobalt_umm_alloc(umm, sizeof(*state)); + if (state == NULL) { + xnfree(event); + return -EAGAIN; + } + + ret = xnregistry_enter_anon(event, &event->resnode.handle); + if (ret) { + cobalt_umm_free(umm, state); + xnfree(event); + return ret; + } + + event->state = state; + event->flags = flags; + synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO; + xnsynch_init(&event->synch, synflags, NULL); + state->value = value; + state->flags = 0; + state->nwaiters = 0; + stateoff = cobalt_umm_offset(umm, state); + XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff); + + xnlock_get_irqsave(&nklock, s); + cobalt_add_resource(&event->resnode, event, pshared); + event->magic = COBALT_EVENT_MAGIC; + xnlock_put_irqrestore(&nklock, s); + + shadow.flags = flags; + shadow.handle = event->resnode.handle; + shadow.state_offset = (__u32)stateoff; + + return cobalt_copy_to_user(u_event, &shadow, sizeof(*u_event)); +} + +int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct timespec64 *ts) +{ + unsigned int rbits = 0, testval; + xnticks_t timeout = XN_INFINITE; + struct cobalt_event_state *state; + xntmode_t tmode = XN_RELATIVE; + struct event_wait_context ewc; + struct cobalt_event *event; + xnhandle_t handle; + int ret = 0, info; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_event->handle); + + if (ts) { + if (!timespec64_valid(ts)) + return -EINVAL; + + timeout = ts2ns(ts); + if (timeout) { + timeout++; + tmode = XN_ABSOLUTE; + } else + timeout = XN_NONBLOCK; + trace_cobalt_event_timedwait(u_event, bits, mode, ts); + } else + trace_cobalt_event_wait(u_event, bits, mode); + + xnlock_get_irqsave(&nklock, s); + + event = xnregistry_lookup(handle, NULL); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + ret = -EINVAL; + goto out; + } + + state = event->state; + + if (bits == 0) { + /* + * Special case: we don't wait for any event, we only + * return the current flag group value. + */ + rbits = state->value; + goto out; + } + + state->flags |= COBALT_EVENT_PENDED; + rbits = state->value & bits; + testval = mode & COBALT_EVENT_ANY ? rbits : bits; + if (rbits && rbits == testval) + goto done; + + if (timeout == XN_NONBLOCK) { + ret = -EWOULDBLOCK; + goto done; + } + + ewc.value = bits; + ewc.mode = mode; + xnthread_prepare_wait(&ewc.wc); + state->nwaiters++; + info = xnsynch_sleep_on(&event->synch, timeout, tmode); + if (info & XNRMID) { + ret = -EIDRM; + goto out; + } + if (info & (XNBREAK|XNTIMEO)) { + state->nwaiters--; + ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT; + } else + rbits = ewc.value; +done: + if (!xnsynch_pended_p(&event->synch)) + state->flags &= ~COBALT_EVENT_PENDED; +out: + xnlock_put_irqrestore(&nklock, s); + + if (ret == 0 && + cobalt_copy_to_user(u_bits_r, &rbits, sizeof(rbits))) + return -EFAULT; + + return ret; +} + +int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __kernel_timespec __user *u_ts) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = cobalt_get_timespec64(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp); +} + +COBALT_SYSCALL(event_wait, primary, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __user_old_timespec __user *u_ts)) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = cobalt_get_u_timespec(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp); +} + +COBALT_SYSCALL(event_wait64, primary, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts); +} + +COBALT_SYSCALL(event_sync, current, + (struct cobalt_event_shadow __user *u_event)) +{ + unsigned int bits, waitval, testval; + struct xnthread_wait_context *wc; + struct cobalt_event_state *state; + struct event_wait_context *ewc; + struct cobalt_event *event; + struct xnthread *p, *tmp; + xnhandle_t handle; + int ret = 0; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_event->handle); + + xnlock_get_irqsave(&nklock, s); + + event = xnregistry_lookup(handle, NULL); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + ret = -EINVAL; + goto out; + } + + /* + * Userland has already updated the bitmask, our job is to + * wake up any thread which could be satisfied by its current + * value. + */ + state = event->state; + bits = state->value; + + xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) { + wc = xnthread_get_wait_context(p); + ewc = container_of(wc, struct event_wait_context, wc); + waitval = ewc->value & bits; + testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value; + if (waitval && waitval == testval) { + state->nwaiters--; + ewc->value = waitval; + xnsynch_wakeup_this_sleeper(&event->synch, p); + } + } + + xnsched_run(); +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(event_destroy, current, + (struct cobalt_event_shadow __user *u_event)) +{ + struct cobalt_event *event; + xnhandle_t handle; + spl_t s; + + trace_cobalt_event_destroy(u_event); + + handle = cobalt_get_handle_from_user(&u_event->handle); + + xnlock_get_irqsave(&nklock, s); + + event = xnregistry_lookup(handle, NULL); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + cobalt_event_reclaim(&event->resnode, s); /* drops lock */ + + return 0; +} + +COBALT_SYSCALL(event_inquire, current, + (struct cobalt_event_shadow __user *u_event, + struct cobalt_event_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)) +{ + int nrpend = 0, nrwait = 0, nrpids, ret = 0; + unsigned long pstamp, nstamp = 0; + struct cobalt_event_info info; + struct cobalt_event *event; + pid_t *t = NULL, fbuf[16]; + struct xnthread *thread; + xnhandle_t handle; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_event->handle); + + nrpids = waitsz / sizeof(pid_t); + + xnlock_get_irqsave(&nklock, s); + + for (;;) { + pstamp = nstamp; + event = xnregistry_lookup(handle, &nstamp); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + /* + * Allocate memory to return the wait list without + * holding any lock, then revalidate the handle. + */ + if (t == NULL) { + nrpend = 0; + if (!xnsynch_pended_p(&event->synch)) + break; + xnsynch_for_each_sleeper(thread, &event->synch) + nrpend++; + if (u_waitlist == NULL) + break; + xnlock_put_irqrestore(&nklock, s); + if (nrpids > nrpend) + nrpids = nrpend; + if (nrpend <= ARRAY_SIZE(fbuf)) + t = fbuf; /* Use fast buffer. */ + else { + t = xnmalloc(nrpend * sizeof(pid_t)); + if (t == NULL) + return -ENOMEM; + } + xnlock_get_irqsave(&nklock, s); + } else if (pstamp == nstamp) + break; + else { + xnlock_put_irqrestore(&nklock, s); + if (t != fbuf) + xnfree(t); + t = NULL; + xnlock_get_irqsave(&nklock, s); + } + } + + info.flags = event->flags; + info.value = event->value; + info.nrwait = nrpend; + + if (xnsynch_pended_p(&event->synch) && u_waitlist != NULL) { + xnsynch_for_each_sleeper(thread, &event->synch) { + if (nrwait >= nrpids) + break; + t[nrwait++] = xnthread_host_pid(thread); + } + } + + xnlock_put_irqrestore(&nklock, s); + + ret = cobalt_copy_to_user(u_info, &info, sizeof(info)); + if (ret == 0 && nrwait > 0) + ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t)); + + if (t && t != fbuf) + xnfree(t); + + return ret ?: nrwait; +} + +void cobalt_event_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_event *event; + struct cobalt_umm *umm; + int pshared; + + event = container_of(node, struct cobalt_event, resnode); + xnregistry_remove(node->handle); + cobalt_del_resource(node); + xnsynch_destroy(&event->synch); + pshared = (event->flags & COBALT_EVENT_SHARED) != 0; + xnlock_put_irqrestore(&nklock, s); + + umm = &cobalt_ppd_get(pshared)->umm; + cobalt_umm_free(umm, event->state); + xnfree(event); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h new file mode 100644 index 0000000..919774c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_POSIX_EVENT_H +#define _COBALT_POSIX_EVENT_H + +#include <cobalt/kernel/synch.h> +#include <cobalt/uapi/event.h> +#include <xenomai/posix/syscall.h> +#include <xenomai/posix/process.h> + +struct cobalt_resources; +struct cobalt_process; + +struct cobalt_event { + unsigned int magic; + unsigned int value; + int flags; + struct xnsynch synch; + struct cobalt_event_state *state; + struct cobalt_resnode resnode; +}; + +int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct timespec64 *ts); + +int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event, + unsigned int bits, unsigned int __user *u_bits_r, + int mode, + const struct __kernel_timespec __user *u_ts); + +COBALT_SYSCALL_DECL(event_init, + (struct cobalt_event_shadow __user *u_evtsh, + unsigned int value, + int flags)); + +COBALT_SYSCALL_DECL(event_wait, + (struct cobalt_event_shadow __user *u_evtsh, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(event_wait64, + (struct cobalt_event_shadow __user *u_evtsh, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(event_sync, + (struct cobalt_event_shadow __user *u_evtsh)); + +COBALT_SYSCALL_DECL(event_destroy, + (struct cobalt_event_shadow __user *u_evtsh)); + +COBALT_SYSCALL_DECL(event_inquire, + (struct cobalt_event_shadow __user *u_event, + struct cobalt_event_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)); + +void cobalt_event_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_EVENT_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h new file mode 100644 index 0000000..e23c26c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_EXTENSION_H +#define _COBALT_POSIX_EXTENSION_H + +#include <linux/time.h> +#include <linux/list.h> + +#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION + +#include <cobalt/kernel/thread.h> + +struct cobalt_timer; +struct cobalt_sigpending; +struct cobalt_extref; +struct siginfo; +struct xnsched_class; +union xnsched_policy_param; + +struct cobalt_extension { + struct xnthread_personality core; + struct { + struct cobalt_thread * + (*timer_init)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */ + const struct sigevent *__restrict__ evp); + int (*timer_settime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */ + const struct itimerspec64 *__restrict__ value, + int flags); + int (*timer_gettime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */ + struct itimerspec64 *__restrict__ value); + int (*timer_delete)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */ + int (*timer_cleanup)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */ + int (*signal_deliver)(struct cobalt_extref *refthread, + struct siginfo *si, + struct cobalt_sigpending *sigp); + int (*signal_queue)(struct cobalt_extref *refthread, + struct cobalt_sigpending *sigp); + int (*signal_copyinfo)(struct cobalt_extref *refthread, + void __user *u_si, + const struct siginfo *si, + int overrun); + int (*signal_copyinfo_compat)(struct cobalt_extref *refthread, + void __user *u_si, + const struct siginfo *si, + int overrun); + int (*sched_yield)(struct cobalt_extref *curref); + int (*thread_setsched)(struct cobalt_extref *refthread, /* nklocked, IRQs off. */ + struct xnsched_class *sched_class, + union xnsched_policy_param *param); + } ops; +}; + +struct cobalt_extref { + struct cobalt_extension *extension; + struct list_head next; + void *private; +}; + +static inline void cobalt_set_extref(struct cobalt_extref *ref, + struct cobalt_extension *ext, + void *priv) +{ + ref->extension = ext; + ref->private = priv; +} + +/** + * All macros return non-zero if some thread-level extension code was + * called, leaving the output value into __ret. Otherwise, the __ret + * value is undefined. + */ +#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \ + ({ \ + int __val = 0; \ + if ((__owner) && (__owner)->extref.extension) { \ + (__extref)->extension = (__owner)->extref.extension; \ + if ((__extref)->extension->ops.__extfn) { \ + (__ret) = (__extref)->extension->ops. \ + __extfn(__extref, ##__args ); \ + __val = 1; \ + } \ + } else \ + (__extref)->extension = NULL; \ + __val; \ + }) + +#define cobalt_call_extension(__extfn, __extref, __ret, __args...) \ + ({ \ + int __val = 0; \ + if ((__extref)->extension && \ + (__extref)->extension->ops.__extfn) { \ + (__ret) = (__extref)->extension->ops. \ + __extfn(__extref, ##__args ); \ + __val = 1; \ + } \ + __val; \ + }) + +#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +struct cobalt_extension; + +struct cobalt_extref { +}; + +static inline void cobalt_set_extref(struct cobalt_extref *ref, + struct cobalt_extension *ext, + void *priv) +{ +} + +#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \ + ({ (void)(__owner); (void)(__ret); 0; }) + +#define cobalt_call_extension(__extfn, __extref, __ret, __args...) \ + ({ (void)(__ret); 0; }) + +#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +#endif /* !_COBALT_POSIX_EXTENSION_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh new file mode 100755 index 0000000..0f99fff --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh @@ -0,0 +1,32 @@ +#! /bin/sh + +set -e + +shift + +awk ' +match($0, /COBALT_SYSCALL\([^,]*,[ \t]*[^,]*/) { + str=substr($0, RSTART + 15, RLENGTH - 15) + match(str, /[^, \t]*/) + syscall=substr(str, RSTART, RLENGTH) + + if (syscall == "") { + print "Failed to find syscall name in line " $0 > "/dev/stderr" + exit 1 + } + + calls = calls " __COBALT_CALL_ENTRY(" syscall ") \\\n" + modes = modes " __COBALT_MODE(" str ") \\\n" + next +} + +/COBALT_SYSCALL\(/ { + print "Failed to parse line " $0 > "/dev/stderr" + exit 1 +} + +END { + print "#define __COBALT_CALL_ENTRIES \\\n" calls " /* end */" + print "#define __COBALT_CALL_MODES \\\n" modes " /* end */" +} +' $* diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h new file mode 100644 index 0000000..8b134d0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h @@ -0,0 +1,62 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_INTERNAL_H +#define _COBALT_POSIX_INTERNAL_H + +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/list.h> +#include <cobalt/kernel/arith.h> +#include <asm/xenomai/syscall.h> +#include "process.h" +#include "extension.h" +#include "syscall.h" +#include "memory.h" + +#define COBALT_MAXNAME 64 +#define COBALT_PERMS_MASK (O_RDONLY | O_WRONLY | O_RDWR) + +#define COBALT_MAGIC(n) (0x8686##n##n) +#define COBALT_ANY_MAGIC COBALT_MAGIC(00) +#define COBALT_THREAD_MAGIC COBALT_MAGIC(01) +#define COBALT_MQ_MAGIC COBALT_MAGIC(0A) +#define COBALT_MQD_MAGIC COBALT_MAGIC(0B) +#define COBALT_EVENT_MAGIC COBALT_MAGIC(0F) +#define COBALT_MONITOR_MAGIC COBALT_MAGIC(10) +#define COBALT_TIMERFD_MAGIC COBALT_MAGIC(11) + +#define cobalt_obj_active(h,m,t) \ + ((h) && ((t *)(h))->magic == (m)) + +#define cobalt_mark_deleted(t) ((t)->magic = ~(t)->magic) + +extern struct xnptree posix_ptree; + +static inline xnhandle_t cobalt_get_handle_from_user(xnhandle_t *u_h) +{ + xnhandle_t handle; + return __xn_get_user(handle, u_h) ? 0 : handle; +} + +int cobalt_init(void); + +long cobalt_restart_syscall_placeholder(struct restart_block *param); + +#endif /* !_COBALT_POSIX_INTERNAL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c new file mode 100644 index 0000000..b95dfbc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c @@ -0,0 +1,394 @@ +/* + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>. + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>. + * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/err.h> +#include <linux/fs.h> +#include <cobalt/kernel/compat.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/time.h> +#include <xenomai/rtdm/internal.h> +#include "process.h" +#include "internal.h" +#include "clock.h" +#include "io.h" + +COBALT_SYSCALL(open, lostage, + (const char __user *u_path, int oflag)) +{ + struct filename *filename; + int ufd; + + filename = getname(u_path); + if (IS_ERR(filename)) + return PTR_ERR(filename); + + ufd = __rtdm_dev_open(filename->name, oflag); + putname(filename); + + return ufd; +} + +COBALT_SYSCALL(socket, lostage, + (int protocol_family, int socket_type, int protocol)) +{ + return __rtdm_dev_socket(protocol_family, socket_type, protocol); +} + +COBALT_SYSCALL(close, lostage, (int fd)) +{ + return rtdm_fd_close(fd, 0); +} + +COBALT_SYSCALL(fcntl, current, (int fd, int cmd, long arg)) +{ + return rtdm_fd_fcntl(fd, cmd, arg); +} + +COBALT_SYSCALL(ioctl, handover, + (int fd, unsigned int request, void __user *arg)) +{ + return rtdm_fd_ioctl(fd, request, arg); +} + +COBALT_SYSCALL(read, handover, + (int fd, void __user *buf, size_t size)) +{ + return rtdm_fd_read(fd, buf, size); +} + +COBALT_SYSCALL(write, handover, + (int fd, const void __user *buf, size_t size)) +{ + return rtdm_fd_write(fd, buf, size); +} + +COBALT_SYSCALL(recvmsg, handover, + (int fd, struct user_msghdr __user *umsg, int flags)) +{ + struct user_msghdr m; + ssize_t ret; + + ret = cobalt_copy_from_user(&m, umsg, sizeof(m)); + if (ret) + return ret; + + ret = rtdm_fd_recvmsg(fd, &m, flags); + if (ret < 0) + return ret; + + return cobalt_copy_to_user(umsg, &m, sizeof(*umsg)) ?: ret; +} + +static int get_timespec(struct timespec64 *ts, + const void __user *u_ts) +{ + return cobalt_get_u_timespec(ts, u_ts); +} + +static int get_mmsg(struct mmsghdr *mmsg, void __user *u_mmsg) +{ + return cobalt_copy_from_user(mmsg, u_mmsg, sizeof(*mmsg)); +} + +static int put_mmsg(void __user **u_mmsg_p, const struct mmsghdr *mmsg) +{ + struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p, + *q __user = (*p)++; + + return cobalt_copy_to_user(q, mmsg, sizeof(*q)); +} + +COBALT_SYSCALL(recvmmsg, primary, + (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, struct __user_old_timespec __user *u_timeout)) +{ + return __rtdm_fd_recvmmsg(fd, u_msgvec, vlen, flags, u_timeout, + get_mmsg, put_mmsg, get_timespec); +} + +COBALT_SYSCALL(recvmmsg64, primary, + (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, struct __kernel_timespec __user *u_timeout)) +{ + return __rtdm_fd_recvmmsg64(fd, u_msgvec, vlen, flags, u_timeout, + get_mmsg, put_mmsg); +} + +COBALT_SYSCALL(sendmsg, handover, + (int fd, struct user_msghdr __user *umsg, int flags)) +{ + struct user_msghdr m; + int ret; + + ret = cobalt_copy_from_user(&m, umsg, sizeof(m)); + + return ret ?: rtdm_fd_sendmsg(fd, &m, flags); +} + +static int put_mmsglen(void __user **u_mmsg_p, const struct mmsghdr *mmsg) +{ + struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p, + *q __user = (*p)++; + + return __xn_put_user(mmsg->msg_len, &q->msg_len); +} + +COBALT_SYSCALL(sendmmsg, primary, + (int fd, struct mmsghdr __user *u_msgvec, + unsigned int vlen, unsigned int flags)) +{ + return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags, + get_mmsg, put_mmsglen); +} + +COBALT_SYSCALL(mmap, lostage, + (int fd, struct _rtdm_mmap_request __user *u_rma, + void __user **u_addrp)) +{ + struct _rtdm_mmap_request rma; + void *u_addr = NULL; + int ret; + + ret = cobalt_copy_from_user(&rma, u_rma, sizeof(rma)); + if (ret) + return ret; + + ret = rtdm_fd_mmap(fd, &rma, &u_addr); + if (ret) + return ret; + + return cobalt_copy_to_user(u_addrp, &u_addr, sizeof(u_addr)); +} + +static int __cobalt_first_fd_valid_p(fd_set *fds[XNSELECT_MAX_TYPES], int nfds) +{ + int i, fd; + + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (fds[i] + && (fd = find_first_bit(fds[i]->fds_bits, nfds)) < nfds) + return rtdm_fd_valid_p(fd); + + /* All empty is correct, used as a "sleep" mechanism by strange + applications. */ + return 1; +} + +static int __cobalt_select_bind_all(struct xnselector *selector, + fd_set *fds[XNSELECT_MAX_TYPES], int nfds) +{ + bool first_fd = true; + unsigned fd, type; + int err; + + for (type = 0; type < XNSELECT_MAX_TYPES; type++) { + fd_set *set = fds[type]; + if (set) + for (fd = find_first_bit(set->fds_bits, nfds); + fd < nfds; + fd = find_next_bit(set->fds_bits, nfds, fd + 1)) { + err = rtdm_fd_select(fd, selector, type); + if (err) { + /* + * Do not needlessly signal "retry + * under Linux" for mixed fd sets. + */ + if (err == -EADV && !first_fd) + return -EBADF; + return err; + } + first_fd = false; + } + } + + return 0; +} + +int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds, + void __user *u_xfds, void __user *u_tv, bool compat) +{ + void __user *ufd_sets[XNSELECT_MAX_TYPES] = { + [XNSELECT_READ] = u_rfds, + [XNSELECT_WRITE] = u_wfds, + [XNSELECT_EXCEPT] = u_xfds + }; + fd_set *in_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL}; + fd_set *out_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL}; + fd_set in_fds_storage[XNSELECT_MAX_TYPES], + out_fds_storage[XNSELECT_MAX_TYPES]; + xnticks_t timeout = XN_INFINITE; + struct restart_block *restart; + xntmode_t mode = XN_RELATIVE; + struct xnselector *selector; + struct xnthread *curr; + struct __kernel_old_timeval tv; + size_t fds_size; + int i, err; + + curr = xnthread_current(); + + if (u_tv) { + if (xnthread_test_localinfo(curr, XNSYSRST)) { + xnthread_clear_localinfo(curr, XNSYSRST); + + restart = cobalt_get_restart_block(current); + timeout = restart->nanosleep.expires; + + if (restart->fn != cobalt_restart_syscall_placeholder) { + err = -EINTR; + goto out; + } + } else { +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_get_timeval(&tv, u_tv)) + return -EFAULT; + } else +#endif + { + if (!access_wok(u_tv, sizeof(tv)) + || cobalt_copy_from_user(&tv, u_tv, + sizeof(tv))) + return -EFAULT; + } + + if (tv.tv_usec >= 1000000) + return -EINVAL; + + timeout = clock_get_ticks(CLOCK_MONOTONIC) + tv2ns(&tv); + } + + mode = XN_ABSOLUTE; + } + + fds_size = __FDELT__(nfds + __NFDBITS__ - 1) * sizeof(long); + + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (ufd_sets[i]) { + in_fds[i] = &in_fds_storage[i]; + out_fds[i] = &out_fds_storage[i]; +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_get_fdset(in_fds[i], ufd_sets[i], + fds_size)) + return -EFAULT; + } else +#endif + { + if (!access_wok((void __user *) ufd_sets[i], + sizeof(fd_set)) + || cobalt_copy_from_user(in_fds[i], + (void __user *)ufd_sets[i], + fds_size)) + return -EFAULT; + } + } + + selector = curr->selector; + if (!selector) { + /* This function may be called from pure Linux fd_sets, we want + to avoid the xnselector allocation in this case, so, we do a + simple test: test if the first file descriptor we find in the + fd_set is an RTDM descriptor or a message queue descriptor. */ + if (!__cobalt_first_fd_valid_p(in_fds, nfds)) + return -EADV; + + selector = xnmalloc(sizeof(*curr->selector)); + if (selector == NULL) + return -ENOMEM; + xnselector_init(selector); + curr->selector = selector; + + /* Bind directly the file descriptors, we do not need to go + through xnselect returning -ECHRNG */ + err = __cobalt_select_bind_all(selector, in_fds, nfds); + if (err) + return err; + } + + do { + err = xnselect(selector, out_fds, in_fds, nfds, timeout, mode); + if (err == -ECHRNG) { + int bind_err = __cobalt_select_bind_all(selector, + out_fds, nfds); + if (bind_err) + return bind_err; + } + } while (err == -ECHRNG); + + if (err == -EINTR && signal_pending(current)) { + xnthread_set_localinfo(curr, XNSYSRST); + + restart = cobalt_get_restart_block(current); + restart->fn = cobalt_restart_syscall_placeholder; + restart->nanosleep.expires = timeout; + + return -ERESTARTSYS; + } + +out: + if (u_tv && (err > 0 || err == -EINTR)) { + xnsticks_t diff = timeout - clock_get_ticks(CLOCK_MONOTONIC); + if (diff > 0) + ticks2tv(&tv, diff); + else + tv.tv_sec = tv.tv_usec = 0; + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_put_timeval(u_tv, &tv)) + return -EFAULT; + } else +#endif + { + if (cobalt_copy_to_user(u_tv, &tv, sizeof(tv))) + return -EFAULT; + } + } + + if (err >= 0) + for (i = 0; i < XNSELECT_MAX_TYPES; i++) { + if (!ufd_sets[i]) + continue; +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_put_fdset(ufd_sets[i], out_fds[i], + sizeof(fd_set))) + return -EFAULT; + } else +#endif + { + if (cobalt_copy_to_user((void __user *)ufd_sets[i], + out_fds[i], sizeof(fd_set))) + return -EFAULT; + } + } + return err; +} + +/* int select(int, fd_set *, fd_set *, fd_set *, struct __kernel_old_timeval *) */ +COBALT_SYSCALL(select, primary, + (int nfds, + fd_set __user *u_rfds, + fd_set __user *u_wfds, + fd_set __user *u_xfds, + struct __kernel_old_timeval __user *u_tv)) +{ + return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, false); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h new file mode 100644 index 0000000..1d9ee09 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>. + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_IO_H +#define _COBALT_POSIX_IO_H + +#include <rtdm/rtdm.h> +#include <xenomai/posix/syscall.h> +#include <cobalt/kernel/select.h> + +int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds, + void __user *u_xfds, void __user *u_tv, bool compat); + +COBALT_SYSCALL_DECL(open, + (const char __user *u_path, int oflag)); + +COBALT_SYSCALL_DECL(socket, + (int protocol_family, + int socket_type, int protocol)); + +COBALT_SYSCALL_DECL(close, (int fd)); + +COBALT_SYSCALL_DECL(fcntl, (int fd, int cmd, long arg)); + +COBALT_SYSCALL_DECL(ioctl, + (int fd, unsigned int request, void __user *arg)); + +COBALT_SYSCALL_DECL(read, + (int fd, void __user *buf, size_t size)); + +COBALT_SYSCALL_DECL(write, + (int fd, const void __user *buf, size_t size)); + +COBALT_SYSCALL_DECL(recvmsg, + (int fd, struct user_msghdr __user *umsg, int flags)); + +COBALT_SYSCALL_DECL(recvmmsg, + (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, struct __user_old_timespec __user *u_timeout)); + +COBALT_SYSCALL_DECL(recvmmsg64, + (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, + struct __kernel_timespec __user *u_timeout)); + +COBALT_SYSCALL_DECL(sendmsg, + (int fd, struct user_msghdr __user *umsg, int flags)); + +COBALT_SYSCALL_DECL(sendmmsg, + (int fd, struct mmsghdr __user *u_msgvec, + unsigned int vlen, unsigned int flags)); + +COBALT_SYSCALL_DECL(mmap, + (int fd, struct _rtdm_mmap_request __user *u_rma, + void __user * __user *u_addrp)); + +COBALT_SYSCALL_DECL(select, + (int nfds, + fd_set __user *u_rfds, + fd_set __user *u_wfds, + fd_set __user *u_xfds, + struct __kernel_old_timeval __user *u_tv)); + +#endif /* !_COBALT_POSIX_IO_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c new file mode 100644 index 0000000..fc88e26 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c @@ -0,0 +1,354 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/gfp.h> +#include <linux/vmalloc.h> +#include <rtdm/driver.h> +#include <cobalt/kernel/vdso.h> +#include "process.h" +#include "memory.h" + +#define UMM_PRIVATE 0 /* Per-process user-mapped memory heap */ +#define UMM_SHARED 1 /* Shared user-mapped memory heap */ +#define SYS_GLOBAL 2 /* System heap (not mmapped) */ + +struct xnvdso *nkvdso; +EXPORT_SYMBOL_GPL(nkvdso); + +static void umm_vmopen(struct vm_area_struct *vma) +{ + struct cobalt_umm *umm = vma->vm_private_data; + + atomic_inc(&umm->refcount); +} + +static void umm_vmclose(struct vm_area_struct *vma) +{ + struct cobalt_umm *umm = vma->vm_private_data; + + cobalt_umm_destroy(umm); +} + +static struct vm_operations_struct umm_vmops = { + .open = umm_vmopen, + .close = umm_vmclose, +}; + +static struct cobalt_umm *umm_from_fd(struct rtdm_fd *fd) +{ + struct cobalt_process *process; + + process = cobalt_current_process(); + if (process == NULL) + return NULL; + + if (rtdm_fd_minor(fd) == UMM_PRIVATE) + return &process->sys_ppd.umm; + + return &cobalt_kernel_ppd.umm; +} + +static int umm_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma) +{ + struct cobalt_umm *umm; + size_t len; + int ret; + + umm = umm_from_fd(fd); + if (fd == NULL) + return -ENODEV; + + len = vma->vm_end - vma->vm_start; + if (len != xnheap_get_size(&umm->heap)) + return -EINVAL; + + vma->vm_private_data = umm; + vma->vm_ops = &umm_vmops; + if (xnarch_cache_aliasing()) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + ret = rtdm_mmap_vmem(vma, xnheap_get_membase(&umm->heap)); + if (ret) + return ret; + + atomic_inc(&umm->refcount); + + return 0; +} + +#ifndef CONFIG_MMU +static unsigned long umm_get_unmapped_area(struct rtdm_fd *fd, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + struct cobalt_umm *umm; + + umm = umm_from_fd(fd); + if (umm == NULL) + return -ENODEV; + + if (pgoff == 0) + return (unsigned long)xnheap_get_membase(&umm->heap); + + return pgoff << PAGE_SHIFT; +} +#else +#define umm_get_unmapped_area NULL +#endif + +static int stat_umm(struct rtdm_fd *fd, + struct cobalt_umm __user *u_stat) +{ + struct cobalt_memdev_stat stat; + struct cobalt_umm *umm; + spl_t s; + + umm = umm_from_fd(fd); + if (umm == NULL) + return -ENODEV; + + xnlock_get_irqsave(&umm->heap.lock, s); + stat.size = xnheap_get_size(&umm->heap); + stat.free = xnheap_get_free(&umm->heap); + xnlock_put_irqrestore(&umm->heap.lock, s); + + return rtdm_safe_copy_to_user(fd, u_stat, &stat, sizeof(stat)); +} + +static int do_umm_ioctls(struct rtdm_fd *fd, + unsigned int request, void __user *arg) +{ + int ret; + + switch (request) { + case MEMDEV_RTIOC_STAT: + ret = stat_umm(fd, arg); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int umm_ioctl_rt(struct rtdm_fd *fd, + unsigned int request, void __user *arg) +{ + return do_umm_ioctls(fd, request, arg); +} + +static int umm_ioctl_nrt(struct rtdm_fd *fd, + unsigned int request, void __user *arg) +{ + return do_umm_ioctls(fd, request, arg); +} + +static int sysmem_open(struct rtdm_fd *fd, int oflags) +{ + if ((oflags & O_ACCMODE) != O_RDONLY) + return -EACCES; + + return 0; +} + +static int do_sysmem_ioctls(struct rtdm_fd *fd, + unsigned int request, void __user *arg) +{ + struct cobalt_memdev_stat stat; + spl_t s; + int ret; + + switch (request) { + case MEMDEV_RTIOC_STAT: + xnlock_get_irqsave(&cobalt_heap.lock, s); + stat.size = xnheap_get_size(&cobalt_heap); + stat.free = xnheap_get_free(&cobalt_heap); + xnlock_put_irqrestore(&cobalt_heap.lock, s); + ret = rtdm_safe_copy_to_user(fd, arg, &stat, sizeof(stat)); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static int sysmem_ioctl_rt(struct rtdm_fd *fd, + unsigned int request, void __user *arg) +{ + return do_sysmem_ioctls(fd, request, arg); +} + +static int sysmem_ioctl_nrt(struct rtdm_fd *fd, + unsigned int request, void __user *arg) +{ + return do_sysmem_ioctls(fd, request, arg); +} + +static struct rtdm_driver umm_driver = { + .profile_info = RTDM_PROFILE_INFO(umm, + RTDM_CLASS_MEMORY, + RTDM_SUBCLASS_GENERIC, + 0), + .device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR, + .device_count = 2, + .ops = { + .ioctl_rt = umm_ioctl_rt, + .ioctl_nrt = umm_ioctl_nrt, + .mmap = umm_mmap, + .get_unmapped_area = umm_get_unmapped_area, + }, +}; + +static struct rtdm_device umm_devices[] = { + [ UMM_PRIVATE ] = { + .driver = &umm_driver, + .label = COBALT_MEMDEV_PRIVATE, + .minor = UMM_PRIVATE, + }, + [ UMM_SHARED ] = { + .driver = &umm_driver, + .label = COBALT_MEMDEV_SHARED, + .minor = UMM_SHARED, + }, +}; + +static struct rtdm_driver sysmem_driver = { + .profile_info = RTDM_PROFILE_INFO(sysmem, + RTDM_CLASS_MEMORY, + SYS_GLOBAL, + 0), + .device_flags = RTDM_NAMED_DEVICE, + .device_count = 1, + .ops = { + .open = sysmem_open, + .ioctl_rt = sysmem_ioctl_rt, + .ioctl_nrt = sysmem_ioctl_nrt, + }, +}; + +static struct rtdm_device sysmem_device = { + .driver = &sysmem_driver, + .label = COBALT_MEMDEV_SYS, +}; + +static inline void init_vdso(void) +{ + nkvdso->features = XNVDSO_FEATURES; + nkvdso->wallclock_offset = nkclock.wallclock_offset; +} + +int cobalt_memdev_init(void) +{ + int ret; + + ret = cobalt_umm_init(&cobalt_kernel_ppd.umm, + CONFIG_XENO_OPT_SHARED_HEAPSZ * 1024, NULL); + if (ret) + return ret; + + cobalt_umm_set_name(&cobalt_kernel_ppd.umm, "shared heap"); + + nkvdso = cobalt_umm_alloc(&cobalt_kernel_ppd.umm, sizeof(*nkvdso)); + if (nkvdso == NULL) { + ret = -ENOMEM; + goto fail_vdso; + } + + init_vdso(); + + ret = rtdm_dev_register(umm_devices + UMM_PRIVATE); + if (ret) + goto fail_private; + + ret = rtdm_dev_register(umm_devices + UMM_SHARED); + if (ret) + goto fail_shared; + + ret = rtdm_dev_register(&sysmem_device); + if (ret) + goto fail_sysmem; + + return 0; + +fail_sysmem: + rtdm_dev_unregister(umm_devices + UMM_SHARED); +fail_shared: + rtdm_dev_unregister(umm_devices + UMM_PRIVATE); +fail_private: + cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso); +fail_vdso: + cobalt_umm_destroy(&cobalt_kernel_ppd.umm); + + return ret; +} + +void cobalt_memdev_cleanup(void) +{ + rtdm_dev_unregister(&sysmem_device); + rtdm_dev_unregister(umm_devices + UMM_SHARED); + rtdm_dev_unregister(umm_devices + UMM_PRIVATE); + cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso); + cobalt_umm_destroy(&cobalt_kernel_ppd.umm); +} + +int cobalt_umm_init(struct cobalt_umm *umm, u32 size, + void (*release)(struct cobalt_umm *umm)) +{ + void *basemem; + int ret; + + secondary_mode_only(); + + /* We don't support CPUs with VIVT caches and the like. */ + BUG_ON(xnarch_cache_aliasing()); + + size = PAGE_ALIGN(size); + basemem = vmalloc_kernel(size, __GFP_ZERO); + if (basemem == NULL) + return -ENOMEM; + + ret = xnheap_init(&umm->heap, basemem, size); + if (ret) { + vfree(basemem); + return ret; + } + + umm->release = release; + atomic_set(&umm->refcount, 1); + smp_mb(); + + return 0; +} + +void cobalt_umm_destroy(struct cobalt_umm *umm) +{ + secondary_mode_only(); + + if (atomic_dec_and_test(&umm->refcount)) { + xnheap_destroy(&umm->heap); + vfree(xnheap_get_membase(&umm->heap)); + if (umm->release) + umm->release(umm); + } +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h new file mode 100644 index 0000000..c22417b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h @@ -0,0 +1,61 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_MEMORY_H +#define _COBALT_POSIX_MEMORY_H + +#include <cobalt/kernel/ppd.h> + +#define cobalt_umm_set_name(__umm, __fmt, __args...) \ + xnheap_set_name(&(__umm)->heap, (__fmt), ## __args) + +static inline +void *cobalt_umm_alloc(struct cobalt_umm *umm, __u32 size) +{ + return xnheap_alloc(&umm->heap, size); +} + +static inline +void *cobalt_umm_zalloc(struct cobalt_umm *umm, __u32 size) +{ + return xnheap_zalloc(&umm->heap, size); +} + +static inline +void cobalt_umm_free(struct cobalt_umm *umm, void *p) +{ + xnheap_free(&umm->heap, p); +} + +static inline +__u32 cobalt_umm_offset(struct cobalt_umm *umm, void *p) +{ + return p - xnheap_get_membase(&umm->heap); +} + +int cobalt_memdev_init(void); + +void cobalt_memdev_cleanup(void); + +int cobalt_umm_init(struct cobalt_umm *umm, u32 size, + void (*release)(struct cobalt_umm *umm)); + +void cobalt_umm_destroy(struct cobalt_umm *umm); + +#endif /* !_COBALT_POSIX_MEMORY_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c new file mode 100644 index 0000000..1e71283 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c @@ -0,0 +1,466 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include "internal.h" +#include "thread.h" +#include "clock.h" +#include "monitor.h" +#include <trace/events/cobalt-posix.h> +#include <cobalt/kernel/time.h> + +/* + * The Cobalt monitor is a double-wait condition object, serializing + * accesses through a gate. It behaves like a mutex + two condition + * variables combo with extended signaling logic. Folding several + * conditions and the serialization support into a single object + * performs better on low end hw caches and allows for specific + * optimizations, compared to using separate general-purpose mutex and + * condvars. This object is used by the Copperplate interface + * internally when it runs over the Cobalt core. + * + * Threads can wait for some resource(s) to be granted (consumer + * side), or wait for the available resource(s) to drain (producer + * side). Therefore, signals are thread-directed for the grant side, + * and monitor-directed for the drain side. + * + * Typically, a consumer would wait for the GRANT condition to be + * satisfied, signaling the DRAINED condition when more resources + * could be made available if the protocol implements output + * contention (e.g. the write side of a message queue waiting for the + * consumer to release message slots). Conversely, a producer would + * wait for the DRAINED condition to be satisfied, issuing GRANT + * signals once more resources have been made available to the + * consumer. + * + * Implementation-wise, the monitor logic is shared with the Cobalt + * thread object. + */ +COBALT_SYSCALL(monitor_init, current, + (struct cobalt_monitor_shadow __user *u_mon, + clockid_t clk_id, int flags)) +{ + struct cobalt_monitor_shadow shadow; + struct cobalt_monitor_state *state; + struct cobalt_monitor *mon; + int pshared, tmode, ret; + struct cobalt_umm *umm; + unsigned long stateoff; + spl_t s; + + tmode = clock_flag(TIMER_ABSTIME, clk_id); + if (tmode < 0) + return -EINVAL; + + mon = xnmalloc(sizeof(*mon)); + if (mon == NULL) + return -ENOMEM; + + pshared = (flags & COBALT_MONITOR_SHARED) != 0; + umm = &cobalt_ppd_get(pshared)->umm; + state = cobalt_umm_alloc(umm, sizeof(*state)); + if (state == NULL) { + xnfree(mon); + return -EAGAIN; + } + + ret = xnregistry_enter_anon(mon, &mon->resnode.handle); + if (ret) { + cobalt_umm_free(umm, state); + xnfree(mon); + return ret; + } + + mon->state = state; + xnsynch_init(&mon->gate, XNSYNCH_PI, &state->owner); + xnsynch_init(&mon->drain, XNSYNCH_PRIO, NULL); + mon->flags = flags; + mon->tmode = tmode; + INIT_LIST_HEAD(&mon->waiters); + + xnlock_get_irqsave(&nklock, s); + cobalt_add_resource(&mon->resnode, monitor, pshared); + mon->magic = COBALT_MONITOR_MAGIC; + xnlock_put_irqrestore(&nklock, s); + + state->flags = 0; + stateoff = cobalt_umm_offset(umm, state); + XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff); + shadow.flags = flags; + shadow.handle = mon->resnode.handle; + shadow.state_offset = (__u32)stateoff; + + return cobalt_copy_to_user(u_mon, &shadow, sizeof(*u_mon)); +} + +/* nklock held, irqs off */ +static int monitor_enter(xnhandle_t handle, struct xnthread *curr) +{ + struct cobalt_monitor *mon; + int info; + + mon = xnregistry_lookup(handle, NULL); /* (Re)validate. */ + if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) + return -EINVAL; + + info = xnsynch_acquire(&mon->gate, XN_INFINITE, XN_RELATIVE); + if (info) + /* Break or error, no timeout possible. */ + return info & XNBREAK ? -EINTR : -EINVAL; + + mon->state->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST); + + return 0; +} + +COBALT_SYSCALL(monitor_enter, primary, + (struct cobalt_monitor_shadow __user *u_mon)) +{ + struct xnthread *curr = xnthread_current(); + xnhandle_t handle; + int ret; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mon->handle); + + xnlock_get_irqsave(&nklock, s); + ret = monitor_enter(handle, curr); + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +/* nklock held, irqs off */ +static void monitor_wakeup(struct cobalt_monitor *mon) +{ + struct cobalt_monitor_state *state = mon->state; + struct cobalt_thread *thread, *tmp; + struct xnthread *p; + int bcast; + + /* + * Having the GRANT signal pending does not necessarily mean + * that somebody is actually waiting for it, so we have to + * check both conditions below. + */ + bcast = (state->flags & COBALT_MONITOR_BROADCAST) != 0; + if ((state->flags & COBALT_MONITOR_GRANTED) == 0 || + list_empty(&mon->waiters)) + goto drain; + + /* + * Unblock waiters requesting a grant, either those who + * received it only or all of them, depending on the broadcast + * bit. + * + * We update the PENDED flag to inform userland about the + * presence of waiters, so that it may decide not to issue any + * syscall for exiting the monitor if nobody else is waiting + * at the gate. + */ + list_for_each_entry_safe(thread, tmp, &mon->waiters, monitor_link) { + p = &thread->threadbase; + /* + * A thread might receive a grant signal albeit it + * does not wait on a monitor, or it might have timed + * out before we got there, so we really have to check + * that ->wchan does match our sleep queue. + */ + if (bcast || + (p->u_window->grant_value && p->wchan == &thread->monitor_synch)) { + xnsynch_wakeup_this_sleeper(&thread->monitor_synch, p); + list_del_init(&thread->monitor_link); + } + } +drain: + /* + * Unblock threads waiting for a drain event if that signal is + * pending, either one or all, depending on the broadcast + * flag. + */ + if ((state->flags & COBALT_MONITOR_DRAINED) != 0 && + xnsynch_pended_p(&mon->drain)) { + if (bcast) + xnsynch_flush(&mon->drain, 0); + else + xnsynch_wakeup_one_sleeper(&mon->drain); + } + + if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain)) + state->flags &= ~COBALT_MONITOR_PENDED; +} + +int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon, + int event, const struct timespec64 *ts, + int __user *u_ret) +{ + struct cobalt_thread *curr = cobalt_current_thread(); + struct cobalt_monitor_state *state; + xnticks_t timeout = XN_INFINITE; + int ret = 0, opret = 0, info; + struct cobalt_monitor *mon; + struct xnsynch *synch; + xnhandle_t handle; + xntmode_t tmode; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mon->handle); + + if (ts) { + if (!timespec64_valid(ts)) + return -EINVAL; + + timeout = ts2ns(ts) + 1; + } + + xnlock_get_irqsave(&nklock, s); + + mon = xnregistry_lookup(handle, NULL); + if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) { + ret = -EINVAL; + goto out; + } + + /* + * The current thread might have sent signals to the monitor + * it wants to sleep on: wake up satisfied waiters before + * going to sleep. + */ + state = mon->state; + if (state->flags & COBALT_MONITOR_SIGNALED) + monitor_wakeup(mon); + + synch = &curr->monitor_synch; + if (event & COBALT_MONITOR_WAITDRAIN) + synch = &mon->drain; + else { + curr->threadbase.u_window->grant_value = 0; + list_add_tail(&curr->monitor_link, &mon->waiters); + } + + /* + * Tell userland that somebody is now waiting for a signal, so + * that later exiting the monitor on the producer side will + * trigger a wakeup syscall. + * + * CAUTION: we must raise the PENDED flag while holding the + * gate mutex, to prevent a signal from sneaking in from a + * remote CPU without the producer issuing the corresponding + * wakeup call when dropping the gate lock. + */ + state->flags |= COBALT_MONITOR_PENDED; + + tmode = ts ? mon->tmode : XN_RELATIVE; + + /* Release the gate prior to waiting, all atomically. */ + xnsynch_release(&mon->gate, &curr->threadbase); + + info = xnsynch_sleep_on(synch, timeout, tmode); + if (info) { + if ((event & COBALT_MONITOR_WAITDRAIN) == 0 && + !list_empty(&curr->monitor_link)) + list_del_init(&curr->monitor_link); + + if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain)) + state->flags &= ~COBALT_MONITOR_PENDED; + + if (info & XNBREAK) { + opret = -EINTR; + goto out; + } + if (info & XNTIMEO) + opret = -ETIMEDOUT; + } + + ret = monitor_enter(handle, &curr->threadbase); +out: + xnlock_put_irqrestore(&nklock, s); + + __xn_put_user(opret, u_ret); + + return ret; +} + +int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon, + int event, + const struct __kernel_timespec __user *u_ts, + int __user *u_ret) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = cobalt_get_timespec64(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_monitor_wait(u_mon, event, tsp, u_ret); +} + +COBALT_SYSCALL(monitor_wait, nonrestartable, + (struct cobalt_monitor_shadow __user *u_mon, + int event, const struct __user_old_timespec __user *u_ts, + int __user *u_ret)) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = cobalt_get_u_timespec(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_monitor_wait(u_mon, event, tsp, u_ret); +} + +COBALT_SYSCALL(monitor_wait64, nonrestartable, + (struct cobalt_monitor_shadow __user *u_mon, int event, + const struct __kernel_timespec __user *u_ts, int __user *u_ret)) +{ + return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret); +} + +COBALT_SYSCALL(monitor_sync, nonrestartable, + (struct cobalt_monitor_shadow __user *u_mon)) +{ + struct cobalt_monitor *mon; + struct xnthread *curr; + xnhandle_t handle; + int ret = 0; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mon->handle); + curr = xnthread_current(); + + xnlock_get_irqsave(&nklock, s); + + mon = xnregistry_lookup(handle, NULL); + if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) + ret = -EINVAL; + else if (mon->state->flags & COBALT_MONITOR_SIGNALED) { + monitor_wakeup(mon); + xnsynch_release(&mon->gate, curr); + xnsched_run(); + ret = monitor_enter(handle, curr); + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(monitor_exit, primary, + (struct cobalt_monitor_shadow __user *u_mon)) +{ + struct cobalt_monitor *mon; + struct xnthread *curr; + xnhandle_t handle; + int ret = 0; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mon->handle); + curr = xnthread_current(); + + xnlock_get_irqsave(&nklock, s); + + mon = xnregistry_lookup(handle, NULL); + if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) + ret = -EINVAL; + else { + if (mon->state->flags & COBALT_MONITOR_SIGNALED) + monitor_wakeup(mon); + + xnsynch_release(&mon->gate, curr); + xnsched_run(); + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(monitor_destroy, primary, + (struct cobalt_monitor_shadow __user *u_mon)) +{ + struct cobalt_monitor_state *state; + struct cobalt_monitor *mon; + struct xnthread *curr; + xnhandle_t handle; + int ret = 0; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mon->handle); + curr = xnthread_current(); + + xnlock_get_irqsave(&nklock, s); + + mon = xnregistry_lookup(handle, NULL); + if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) { + ret = -EINVAL; + goto fail; + } + + state = mon->state; + if ((state->flags & COBALT_MONITOR_PENDED) != 0 || + xnsynch_pended_p(&mon->drain) || !list_empty(&mon->waiters)) { + ret = -EBUSY; + goto fail; + } + + /* + * A monitor must be destroyed by the thread currently holding + * its gate lock. + */ + if (xnsynch_owner_check(&mon->gate, curr)) { + ret = -EPERM; + goto fail; + } + + xnsynch_release(&mon->gate, curr); + cobalt_monitor_reclaim(&mon->resnode, s); /* drops lock */ + + xnsched_run(); + + return 0; + fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +void cobalt_monitor_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_monitor *mon; + struct cobalt_umm *umm; + int pshared; + + mon = container_of(node, struct cobalt_monitor, resnode); + pshared = (mon->flags & COBALT_MONITOR_SHARED) != 0; + xnsynch_destroy(&mon->gate); + xnsynch_destroy(&mon->drain); + xnregistry_remove(node->handle); + cobalt_del_resource(node); + cobalt_mark_deleted(mon); + xnlock_put_irqrestore(&nklock, s); + + umm = &cobalt_ppd_get(pshared)->umm; + cobalt_umm_free(umm, mon->state); + xnfree(mon); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h new file mode 100644 index 0000000..bf8794e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_POSIX_MONITOR_H +#define _COBALT_POSIX_MONITOR_H + +#include <cobalt/kernel/synch.h> +#include <cobalt/uapi/monitor.h> +#include <xenomai/posix/syscall.h> +#include <xenomai/posix/process.h> + +struct cobalt_resources; +struct cobalt_process; + +struct cobalt_monitor { + unsigned int magic; + struct xnsynch gate; + struct xnsynch drain; + struct cobalt_monitor_state *state; + struct list_head waiters; + int flags; + xntmode_t tmode; + struct cobalt_resnode resnode; +}; + +int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon, + int event, const struct timespec64 *ts, + int __user *u_ret); + +int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon, + int event, + const struct __kernel_timespec __user *u_ts, + int __user *u_ret); + +COBALT_SYSCALL_DECL(monitor_init, + (struct cobalt_monitor_shadow __user *u_monsh, + clockid_t clk_id, + int flags)); + +COBALT_SYSCALL_DECL(monitor_enter, + (struct cobalt_monitor_shadow __user *u_monsh)); + +COBALT_SYSCALL_DECL(monitor_sync, + (struct cobalt_monitor_shadow __user *u_monsh)); + +COBALT_SYSCALL_DECL(monitor_exit, + (struct cobalt_monitor_shadow __user *u_monsh)); + +COBALT_SYSCALL_DECL(monitor_wait, + (struct cobalt_monitor_shadow __user *u_monsh, + int event, const struct __user_old_timespec __user *u_ts, + int __user *u_ret)); + +COBALT_SYSCALL_DECL(monitor_wait64, + (struct cobalt_monitor_shadow __user *u_monsh, int event, + const struct __kernel_timespec __user *u_ts, + int __user *u_ret)); + +COBALT_SYSCALL_DECL(monitor_destroy, + (struct cobalt_monitor_shadow __user *u_monsh)); + +void cobalt_monitor_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_MONITOR_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c new file mode 100644 index 0000000..a156af5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c @@ -0,0 +1,1093 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/stdarg.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <cobalt/kernel/select.h> +#include <rtdm/fd.h> +#include "internal.h" +#include "thread.h" +#include "signal.h" +#include "timer.h" +#include "mqueue.h" +#include "clock.h" +#include <trace/events/cobalt-posix.h> +#include <cobalt/kernel/time.h> + +#define COBALT_MSGMAX 65536 +#define COBALT_MSGSIZEMAX (16*1024*1024) +#define COBALT_MSGPRIOMAX 32768 + +struct cobalt_mq { + unsigned magic; + + struct list_head link; + + struct xnsynch receivers; + struct xnsynch senders; + size_t memsize; + char *mem; + struct list_head queued; + struct list_head avail; + int nrqueued; + + /* mq_notify */ + struct siginfo si; + mqd_t target_qd; + struct cobalt_thread *target; + + struct mq_attr attr; + + unsigned refs; + char name[COBALT_MAXNAME]; + xnhandle_t handle; + + DECLARE_XNSELECT(read_select); + DECLARE_XNSELECT(write_select); +}; + +struct cobalt_mqd { + struct cobalt_mq *mq; + struct rtdm_fd fd; +}; + +struct cobalt_msg { + struct list_head link; + unsigned int prio; + size_t len; + char data[0]; +}; + +struct cobalt_mqwait_context { + struct xnthread_wait_context wc; + struct cobalt_msg *msg; +}; + +static struct mq_attr default_attr = { + .mq_maxmsg = 10, + .mq_msgsize = 8192, +}; + +static LIST_HEAD(cobalt_mqq); + +#ifdef CONFIG_XENO_OPT_VFILE + +static int mq_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + return 0; +} + +static struct xnvfile_regular_ops mq_vfile_ops = { + .show = mq_vfile_show, +}; + +static struct xnpnode_regular __mq_pnode = { + .node = { + .dirname = "mqueue", + .root = &posix_ptree, + .ops = &xnregistry_vfreg_ops, + }, + .vfile = { + .ops = &mq_vfile_ops, + }, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __mq_pnode = { + .node = { + .dirname = "mqueue", + } +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + + +static inline struct cobalt_msg *mq_msg_alloc(struct cobalt_mq *mq) +{ + if (list_empty(&mq->avail)) + return NULL; + + return list_get_entry(&mq->avail, struct cobalt_msg, link); +} + +static inline void mq_msg_free(struct cobalt_mq *mq, struct cobalt_msg * msg) +{ + list_add(&msg->link, &mq->avail); /* For earliest re-use of the block. */ +} + +static inline int mq_init(struct cobalt_mq *mq, const struct mq_attr *attr) +{ + unsigned i, msgsize, memsize; + char *mem; + + if (attr == NULL) + attr = &default_attr; + else { + if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) + return -EINVAL; + if (attr->mq_maxmsg > COBALT_MSGMAX) + return -EINVAL; + if (attr->mq_msgsize > COBALT_MSGSIZEMAX) + return -EINVAL; + } + + msgsize = attr->mq_msgsize + sizeof(struct cobalt_msg); + + /* Align msgsize on natural boundary. */ + if ((msgsize % sizeof(unsigned long))) + msgsize += + sizeof(unsigned long) - (msgsize % sizeof(unsigned long)); + + memsize = msgsize * attr->mq_maxmsg; + memsize = PAGE_ALIGN(memsize); + if (get_order(memsize) > MAX_ORDER) + return -ENOSPC; + + mem = xnheap_vmalloc(memsize); + if (mem == NULL) + return -ENOSPC; + + mq->memsize = memsize; + INIT_LIST_HEAD(&mq->queued); + mq->nrqueued = 0; + xnsynch_init(&mq->receivers, XNSYNCH_PRIO, NULL); + xnsynch_init(&mq->senders, XNSYNCH_PRIO, NULL); + mq->mem = mem; + + /* Fill the pool. */ + INIT_LIST_HEAD(&mq->avail); + for (i = 0; i < attr->mq_maxmsg; i++) { + struct cobalt_msg *msg = (struct cobalt_msg *) (mem + i * msgsize); + mq_msg_free(mq, msg); + } + + mq->attr = *attr; + mq->target = NULL; + xnselect_init(&mq->read_select); + xnselect_init(&mq->write_select); + mq->magic = COBALT_MQ_MAGIC; + mq->refs = 2; + INIT_LIST_HEAD(&mq->link); + + return 0; +} + +static inline void mq_destroy(struct cobalt_mq *mq) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xnsynch_destroy(&mq->receivers); + xnsynch_destroy(&mq->senders); + list_del(&mq->link); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + xnselect_destroy(&mq->read_select); /* Reschedules. */ + xnselect_destroy(&mq->write_select); /* Ditto. */ + xnregistry_remove(mq->handle); + xnheap_vfree(mq->mem); + kfree(mq); +} + +static int mq_unref_inner(struct cobalt_mq *mq, spl_t s) +{ + int destroy; + + destroy = --mq->refs == 0; + xnlock_put_irqrestore(&nklock, s); + + if (destroy) + mq_destroy(mq); + + return destroy; +} + +static int mq_unref(struct cobalt_mq *mq) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + return mq_unref_inner(mq, s); +} + +static void mqd_close(struct rtdm_fd *fd) +{ + struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd); + struct cobalt_mq *mq = mqd->mq; + + kfree(mqd); + mq_unref(mq); +} + +int +mqd_select(struct rtdm_fd *fd, struct xnselector *selector, + unsigned type, unsigned index) +{ + struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd); + struct xnselect_binding *binding; + struct cobalt_mq *mq; + int err; + spl_t s; + + if (type == XNSELECT_READ || type == XNSELECT_WRITE) { + binding = xnmalloc(sizeof(*binding)); + if (!binding) + return -ENOMEM; + } else + return -EBADF; + + xnlock_get_irqsave(&nklock, s); + mq = mqd->mq; + + switch(type) { + case XNSELECT_READ: + err = -EBADF; + if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_WRONLY) + goto unlock_and_error; + + err = xnselect_bind(&mq->read_select, binding, + selector, type, index, + !list_empty(&mq->queued)); + if (err) + goto unlock_and_error; + break; + + case XNSELECT_WRITE: + err = -EBADF; + if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_RDONLY) + goto unlock_and_error; + + err = xnselect_bind(&mq->write_select, binding, + selector, type, index, + !list_empty(&mq->avail)); + if (err) + goto unlock_and_error; + break; + } + xnlock_put_irqrestore(&nklock, s); + return 0; + + unlock_and_error: + xnlock_put_irqrestore(&nklock, s); + xnfree(binding); + return err; +} + +static struct rtdm_fd_ops mqd_ops = { + .close = mqd_close, + .select = mqd_select, +}; + +static inline int mqd_create(struct cobalt_mq *mq, unsigned long flags, int ufd) +{ + struct cobalt_mqd *mqd; + int ret; + + if (cobalt_ppd_get(0) == &cobalt_kernel_ppd) + return -EPERM; + + mqd = kmalloc(sizeof(*mqd), GFP_KERNEL); + if (mqd == NULL) + return -ENOSPC; + + mqd->fd.oflags = flags; + mqd->mq = mq; + + ret = rtdm_fd_enter(&mqd->fd, ufd, COBALT_MQD_MAGIC, &mqd_ops); + if (ret < 0) + return ret; + + return rtdm_fd_register(&mqd->fd, ufd); +} + +static int mq_open(int uqd, const char *name, int oflags, + int mode, struct mq_attr *attr) +{ + struct cobalt_mq *mq; + xnhandle_t handle; + spl_t s; + int err; + + if (name[0] != '/' || name[1] == '\0') + return -EINVAL; + + retry_bind: + err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle); + switch (err) { + case 0: + /* Found */ + if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + return -EEXIST; + + xnlock_get_irqsave(&nklock, s); + mq = xnregistry_lookup(handle, NULL); + if (mq && mq->magic != COBALT_MQ_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + if (mq) { + ++mq->refs; + xnlock_put_irqrestore(&nklock, s); + } else { + xnlock_put_irqrestore(&nklock, s); + goto retry_bind; + } + + err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK), + uqd); + if (err < 0) { + mq_unref(mq); + return err; + } + break; + + case -EWOULDBLOCK: + /* Not found */ + if ((oflags & O_CREAT) == 0) + return (mqd_t)-ENOENT; + + mq = kmalloc(sizeof(*mq), GFP_KERNEL); + if (mq == NULL) + return -ENOSPC; + + err = mq_init(mq, attr); + if (err) { + kfree(mq); + return err; + } + + snprintf(mq->name, sizeof(mq->name), "%s", &name[1]); + + err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK), + uqd); + if (err < 0) { + mq_destroy(mq); + return err; + } + + xnlock_get_irqsave(&nklock, s); + err = xnregistry_enter(mq->name, mq, &mq->handle, + &__mq_pnode.node); + if (err < 0) + --mq->refs; + else + list_add_tail(&mq->link, &cobalt_mqq); + xnlock_put_irqrestore(&nklock, s); + if (err < 0) { + rtdm_fd_close(uqd, COBALT_MQD_MAGIC); + if (err == -EEXIST) + goto retry_bind; + return err; + } + break; + + default: + return err; + } + + return 0; +} + +static inline int mq_close(mqd_t fd) +{ + int err; + + err = rtdm_fd_close(fd, COBALT_MQD_MAGIC); + return err == -EADV ? -EBADF : err; +} + +static inline int mq_unlink(const char *name) +{ + struct cobalt_mq *mq; + xnhandle_t handle; + spl_t s; + int err; + + if (name[0] != '/' || name[1] == '\0') + return -EINVAL; + + err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle); + if (err == -EWOULDBLOCK) + return -ENOENT; + if (err) + return err; + + xnlock_get_irqsave(&nklock, s); + mq = xnregistry_lookup(handle, NULL); + if (!mq) { + err = -ENOENT; + goto err_unlock; + } + if (mq->magic != COBALT_MQ_MAGIC) { + err = -EINVAL; + err_unlock: + xnlock_put_irqrestore(&nklock, s); + + return err; + } + if (mq_unref_inner(mq, s) == 0) + xnregistry_unlink(&name[1]); + return 0; +} + +static inline struct cobalt_msg * +mq_trysend(struct cobalt_mqd *mqd, size_t len) +{ + struct cobalt_msg *msg; + struct cobalt_mq *mq; + unsigned flags; + + mq = mqd->mq; + flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK; + + if (flags != O_WRONLY && flags != O_RDWR) + return ERR_PTR(-EBADF); + + if (len > mq->attr.mq_msgsize) + return ERR_PTR(-EMSGSIZE); + + msg = mq_msg_alloc(mq); + if (msg == NULL) + return ERR_PTR(-EAGAIN); + + if (list_empty(&mq->avail)) + xnselect_signal(&mq->write_select, 0); + + return msg; +} + +static inline struct cobalt_msg * +mq_tryrcv(struct cobalt_mqd *mqd, size_t len) +{ + struct cobalt_msg *msg; + unsigned int flags; + struct cobalt_mq *mq; + + mq = mqd->mq; + flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK; + + if (flags != O_RDONLY && flags != O_RDWR) + return ERR_PTR(-EBADF); + + if (len < mq->attr.mq_msgsize) + return ERR_PTR(-EMSGSIZE); + + if (list_empty(&mq->queued)) + return ERR_PTR(-EAGAIN); + + msg = list_get_entry(&mq->queued, struct cobalt_msg, link); + mq->nrqueued--; + + if (list_empty(&mq->queued)) + xnselect_signal(&mq->read_select, 0); + + return msg; +} + +static struct cobalt_msg * +mq_timedsend_inner(struct cobalt_mqd *mqd, + size_t len, const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_mqwait_context mwc; + struct cobalt_msg *msg; + struct cobalt_mq *mq; + struct timespec64 ts; + xntmode_t tmode; + xnticks_t to; + spl_t s; + int ret; + + to = XN_INFINITE; + tmode = XN_RELATIVE; +redo: + xnlock_get_irqsave(&nklock, s); + msg = mq_trysend(mqd, len); + if (msg != ERR_PTR(-EAGAIN)) + goto out; + + if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK) + goto out; + + if (fetch_timeout) { + xnlock_put_irqrestore(&nklock, s); + ret = fetch_timeout(&ts, u_ts); + if (ret) + return ERR_PTR(ret); + if (!timespec64_valid(&ts)) + return ERR_PTR(-EINVAL); + to = ts2ns(&ts) + 1; + tmode = XN_REALTIME; + fetch_timeout = NULL; + goto redo; + } + + mq = mqd->mq; + xnthread_prepare_wait(&mwc.wc); + ret = xnsynch_sleep_on(&mq->senders, to, tmode); + if (ret) { + if (ret & XNBREAK) + msg = ERR_PTR(-EINTR); + else if (ret & XNTIMEO) + msg = ERR_PTR(-ETIMEDOUT); + else if (ret & XNRMID) + msg = ERR_PTR(-EBADF); + } else + msg = mwc.msg; +out: + xnlock_put_irqrestore(&nklock, s); + + return msg; +} + +static void mq_release_msg(struct cobalt_mq *mq, struct cobalt_msg *msg) +{ + struct cobalt_mqwait_context *mwc; + struct xnthread_wait_context *wc; + struct xnthread *thread; + + /* + * Try passing the free message slot to a waiting sender, link + * it to the free queue otherwise. + */ + if (xnsynch_pended_p(&mq->senders)) { + thread = xnsynch_wakeup_one_sleeper(&mq->senders); + wc = xnthread_get_wait_context(thread); + mwc = container_of(wc, struct cobalt_mqwait_context, wc); + mwc->msg = msg; + xnthread_complete_wait(wc); + } else { + mq_msg_free(mq, msg); + if (list_is_singular(&mq->avail)) + xnselect_signal(&mq->write_select, 1); + } +} + +static int +mq_finish_send(struct cobalt_mqd *mqd, struct cobalt_msg *msg) +{ + struct cobalt_mqwait_context *mwc; + struct xnthread_wait_context *wc; + struct cobalt_sigpending *sigp; + struct xnthread *thread; + struct cobalt_mq *mq; + spl_t s; + + mq = mqd->mq; + + xnlock_get_irqsave(&nklock, s); + /* Can we do pipelined sending? */ + if (xnsynch_pended_p(&mq->receivers)) { + thread = xnsynch_wakeup_one_sleeper(&mq->receivers); + wc = xnthread_get_wait_context(thread); + mwc = container_of(wc, struct cobalt_mqwait_context, wc); + mwc->msg = msg; + xnthread_complete_wait(wc); + } else { + /* Nope, have to go through the queue. */ + list_add_priff(msg, &mq->queued, prio, link); + mq->nrqueued++; + + /* + * If first message and no pending reader, send a + * signal if notification was enabled via mq_notify(). + */ + if (list_is_singular(&mq->queued)) { + xnselect_signal(&mq->read_select, 1); + if (mq->target) { + sigp = cobalt_signal_alloc(); + if (sigp) { + cobalt_copy_siginfo(SI_MESGQ, &sigp->si, &mq->si); + if (cobalt_signal_send(mq->target, sigp, 0) <= 0) + cobalt_signal_free(sigp); + } + mq->target = NULL; + } + } + } + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static struct cobalt_msg * +mq_timedrcv_inner(struct cobalt_mqd *mqd, + size_t len, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_mqwait_context mwc; + struct cobalt_msg *msg; + struct cobalt_mq *mq; + struct timespec64 ts; + xntmode_t tmode; + xnticks_t to; + spl_t s; + int ret; + + to = XN_INFINITE; + tmode = XN_RELATIVE; +redo: + xnlock_get_irqsave(&nklock, s); + msg = mq_tryrcv(mqd, len); + if (msg != ERR_PTR(-EAGAIN)) + goto out; + + if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK) + goto out; + + if (fetch_timeout) { + xnlock_put_irqrestore(&nklock, s); + ret = fetch_timeout(&ts, u_ts); + if (ret) + return ERR_PTR(ret); + if (!timespec64_valid(&ts)) + return ERR_PTR(-EINVAL); + to = ts2ns(&ts) + 1; + tmode = XN_REALTIME; + fetch_timeout = NULL; + goto redo; + } + + mq = mqd->mq; + xnthread_prepare_wait(&mwc.wc); + ret = xnsynch_sleep_on(&mq->receivers, to, tmode); + if (ret == 0) + msg = mwc.msg; + else if (ret & XNRMID) + msg = ERR_PTR(-EBADF); + else if (ret & XNTIMEO) + msg = ERR_PTR(-ETIMEDOUT); + else + msg = ERR_PTR(-EINTR); +out: + xnlock_put_irqrestore(&nklock, s); + + return msg; +} + +static int +mq_finish_rcv(struct cobalt_mqd *mqd, struct cobalt_msg *msg) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + mq_release_msg(mqd->mq, msg); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static inline int mq_getattr(struct cobalt_mqd *mqd, struct mq_attr *attr) +{ + struct cobalt_mq *mq; + spl_t s; + + mq = mqd->mq; + *attr = mq->attr; + xnlock_get_irqsave(&nklock, s); + attr->mq_flags = rtdm_fd_flags(&mqd->fd); + attr->mq_curmsgs = mq->nrqueued; + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static inline int +mq_notify(struct cobalt_mqd *mqd, unsigned index, const struct sigevent *evp) +{ + struct cobalt_thread *thread = cobalt_current_thread(); + struct cobalt_mq *mq; + int err; + spl_t s; + + if (evp && ((evp->sigev_notify != SIGEV_SIGNAL && + evp->sigev_notify != SIGEV_NONE) || + (unsigned int)(evp->sigev_signo - 1) > SIGRTMAX - 1)) + return -EINVAL; + + if (xnsched_interrupt_p() || thread == NULL) + return -EPERM; + + xnlock_get_irqsave(&nklock, s); + mq = mqd->mq; + if (mq->target && mq->target != thread) { + err = -EBUSY; + goto unlock_and_error; + } + + if (evp == NULL || evp->sigev_notify == SIGEV_NONE) + /* Here, mq->target == cobalt_current_thread() or NULL. */ + mq->target = NULL; + else { + mq->target = thread; + mq->target_qd = index; + mq->si.si_signo = evp->sigev_signo; + mq->si.si_errno = 0; + mq->si.si_code = SI_MESGQ; + mq->si.si_value = evp->sigev_value; + /* + * XXX: we differ from the regular kernel here, which + * passes the sender's pid/uid data into the + * receiver's namespaces. We pass the receiver's creds + * into the init namespace instead. + */ + mq->si.si_pid = task_pid_nr(current); + mq->si.si_uid = get_current_uuid(); + } + + xnlock_put_irqrestore(&nklock, s); + return 0; + + unlock_and_error: + xnlock_put_irqrestore(&nklock, s); + return err; +} + +static inline struct cobalt_mqd *cobalt_mqd_get(mqd_t ufd) +{ + struct rtdm_fd *fd; + + fd = rtdm_fd_get(ufd, COBALT_MQD_MAGIC); + if (IS_ERR(fd)) { + int err = PTR_ERR(fd); + if (err == -EADV) + err = cobalt_current_process() ? -EBADF : -EPERM; + return ERR_PTR(err); + } + + return container_of(fd, struct cobalt_mqd, fd); +} + +static inline void cobalt_mqd_put(struct cobalt_mqd *mqd) +{ + rtdm_fd_put(&mqd->fd); +} + +int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp) +{ + struct cobalt_mqd *mqd; + int ret; + + mqd = cobalt_mqd_get(fd); + if (IS_ERR(mqd)) + ret = PTR_ERR(mqd); + else { + trace_cobalt_mq_notify(fd, evp); + ret = mq_notify(mqd, fd, evp); + cobalt_mqd_put(mqd); + } + + return ret; +} + +COBALT_SYSCALL(mq_notify, primary, + (mqd_t fd, const struct sigevent *__user evp)) +{ + struct sigevent sev; + + if (evp && cobalt_copy_from_user(&sev, evp, sizeof(sev))) + return -EFAULT; + + return __cobalt_mq_notify(fd, evp ? &sev : NULL); +} + +int __cobalt_mq_open(const char __user *u_name, int oflags, + mode_t mode, struct mq_attr *attr) +{ + char name[COBALT_MAXNAME]; + unsigned int len; + mqd_t uqd; + int ret; + + len = cobalt_strncpy_from_user(name, u_name, sizeof(name)); + if (len < 0) + return -EFAULT; + + if (len >= sizeof(name)) + return -ENAMETOOLONG; + + if (len == 0) + return -EINVAL; + + trace_cobalt_mq_open(name, oflags, mode); + + uqd = __rtdm_anon_getfd("[cobalt-mq]", oflags); + if (uqd < 0) + return uqd; + + ret = mq_open(uqd, name, oflags, mode, attr); + if (ret < 0) { + __rtdm_anon_putfd(uqd); + return ret; + } + + return uqd; +} + +COBALT_SYSCALL(mq_open, lostage, + (const char __user *u_name, int oflags, + mode_t mode, struct mq_attr __user *u_attr)) +{ + struct mq_attr _attr, *attr = &_attr; + + if ((oflags & O_CREAT) && u_attr) { + if (cobalt_copy_from_user(&_attr, u_attr, sizeof(_attr))) + return -EFAULT; + } else + attr = NULL; + + return __cobalt_mq_open(u_name, oflags, mode, attr); +} + +COBALT_SYSCALL(mq_close, lostage, (mqd_t uqd)) +{ + trace_cobalt_mq_close(uqd); + + return mq_close(uqd); +} + +COBALT_SYSCALL(mq_unlink, lostage, (const char __user *u_name)) +{ + char name[COBALT_MAXNAME]; + unsigned len; + + len = cobalt_strncpy_from_user(name, u_name, sizeof(name)); + if (len < 0) + return -EFAULT; + if (len >= sizeof(name)) + return -ENAMETOOLONG; + + trace_cobalt_mq_unlink(name); + + return mq_unlink(name); +} + +int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr) +{ + struct cobalt_mqd *mqd; + int ret; + + mqd = cobalt_mqd_get(uqd); + if (IS_ERR(mqd)) + return PTR_ERR(mqd); + + ret = mq_getattr(mqd, attr); + cobalt_mqd_put(mqd); + if (ret) + return ret; + + trace_cobalt_mq_getattr(uqd, attr); + + return 0; +} + +COBALT_SYSCALL(mq_getattr, current, + (mqd_t uqd, struct mq_attr __user *u_attr)) +{ + struct mq_attr attr; + int ret; + + ret = __cobalt_mq_getattr(uqd, &attr); + if (ret) + return ret; + + return cobalt_copy_to_user(u_attr, &attr, sizeof(attr)); +} + +static inline int mq_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts); +} + +static inline int mq_fetch_timeout64(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts); +} + +int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_msg *msg; + struct cobalt_mqd *mqd; + int ret; + + mqd = cobalt_mqd_get(uqd); + if (IS_ERR(mqd)) + return PTR_ERR(mqd); + + if (prio >= COBALT_MSGPRIOMAX) { + ret = -EINVAL; + goto out; + } + + if (len > 0 && !access_rok(u_buf, len)) { + ret = -EFAULT; + goto out; + } + + trace_cobalt_mq_send(uqd, u_buf, len, prio); + msg = mq_timedsend_inner(mqd, len, u_ts, fetch_timeout); + if (IS_ERR(msg)) { + ret = PTR_ERR(msg); + goto out; + } + + ret = cobalt_copy_from_user(msg->data, u_buf, len); + if (ret) { + mq_finish_rcv(mqd, msg); + goto out; + } + msg->len = len; + msg->prio = prio; + ret = mq_finish_send(mqd, msg); +out: + cobalt_mqd_put(mqd); + + return ret; +} + +int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts) +{ + return __cobalt_mq_timedsend(uqd, u_buf, len, prio, u_ts, + u_ts ? mq_fetch_timeout64 : NULL); +} + +COBALT_SYSCALL(mq_timedsend, primary, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const struct __user_old_timespec __user *u_ts)) +{ + return __cobalt_mq_timedsend(uqd, u_buf, len, prio, + u_ts, u_ts ? mq_fetch_timeout : NULL); +} + +COBALT_SYSCALL(mq_timedsend64, primary, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts); +} + +int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf, + ssize_t *lenp, + unsigned int __user *u_prio, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_mqd *mqd; + struct cobalt_msg *msg; + unsigned int prio; + int ret; + + mqd = cobalt_mqd_get(uqd); + if (IS_ERR(mqd)) + return PTR_ERR(mqd); + + if (*lenp > 0 && !access_wok(u_buf, *lenp)) { + ret = -EFAULT; + goto fail; + } + + msg = mq_timedrcv_inner(mqd, *lenp, u_ts, fetch_timeout); + if (IS_ERR(msg)) { + ret = PTR_ERR(msg); + goto fail; + } + + ret = cobalt_copy_to_user(u_buf, msg->data, msg->len); + if (ret) { + mq_finish_rcv(mqd, msg); + goto fail; + } + + *lenp = msg->len; + prio = msg->prio; + ret = mq_finish_rcv(mqd, msg); + if (ret) + goto fail; + + cobalt_mqd_put(mqd); + + if (u_prio && __xn_put_user(prio, u_prio)) + return -EFAULT; + + return 0; +fail: + cobalt_mqd_put(mqd); + + return ret; +} + +int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf, + ssize_t *len, + unsigned int __user *u_prio, + const void __user *u_ts) +{ + return __cobalt_mq_timedreceive(uqd, u_buf, len, u_prio, u_ts, + u_ts ? mq_fetch_timeout64 : NULL); +} + +COBALT_SYSCALL(mq_timedreceive, primary, + (mqd_t uqd, void __user *u_buf, + ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __user_old_timespec __user *u_ts)) +{ + ssize_t len; + int ret; + + ret = cobalt_copy_from_user(&len, u_len, sizeof(len)); + if (ret) + return ret; + + ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio, + u_ts, u_ts ? mq_fetch_timeout : NULL); + + return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len)); +} + +COBALT_SYSCALL(mq_timedreceive64, primary, + (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __kernel_timespec __user *u_ts)) +{ + ssize_t len; + int ret; + + ret = cobalt_copy_from_user(&len, u_len, sizeof(len)); + if (ret) + return ret; + + ret = __cobalt_mq_timedreceive64(uqd, u_buf, &len, u_prio, u_ts); + + return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len)); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h new file mode 100644 index 0000000..d922386 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h @@ -0,0 +1,92 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_POSIX_MQUEUE_H +#define _COBALT_POSIX_MQUEUE_H + +#include <linux/types.h> +#include <linux/fcntl.h> +#include <xenomai/posix/syscall.h> + +struct mq_attr { + long mq_flags; + long mq_maxmsg; + long mq_msgsize; + long mq_curmsgs; +}; + +int __cobalt_mq_open(const char __user *u_name, int oflags, + mode_t mode, struct mq_attr *attr); + +int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr); + +int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); + +int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts); + +int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf, + ssize_t *lenp, + unsigned int __user *u_prio, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); + +int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf, + ssize_t *len, + unsigned int __user *u_prio, + const void __user *u_ts); + +int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp); + +COBALT_SYSCALL_DECL(mq_open, + (const char __user *u_name, int oflags, + mode_t mode, struct mq_attr __user *u_attr)); + +COBALT_SYSCALL_DECL(mq_close, (mqd_t uqd)); + +COBALT_SYSCALL_DECL(mq_unlink, (const char __user *u_name)); + +COBALT_SYSCALL_DECL(mq_getattr, (mqd_t uqd, struct mq_attr __user *u_attr)); + +COBALT_SYSCALL_DECL(mq_timedsend, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_timedsend64, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_timedreceive, + (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_timedreceive64, + (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_notify, + (mqd_t fd, const struct sigevent *__user evp)); + +#endif /* !_COBALT_POSIX_MQUEUE_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c new file mode 100644 index 0000000..0f1c018 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c @@ -0,0 +1,446 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "internal.h" +#include "thread.h" +#include "mutex.h" +#include "cond.h" +#include "clock.h" +#include <cobalt/kernel/time.h> + +static int cobalt_mutex_init_inner(struct cobalt_mutex_shadow *shadow, + struct cobalt_mutex *mutex, + struct cobalt_mutex_state *state, + const struct cobalt_mutexattr *attr) +{ + int synch_flags = XNSYNCH_PRIO | XNSYNCH_OWNER; + struct cobalt_umm *umm; + spl_t s; + int ret; + + ret = xnregistry_enter_anon(mutex, &mutex->resnode.handle); + if (ret < 0) + return ret; + + umm = &cobalt_ppd_get(attr->pshared)->umm; + shadow->handle = mutex->resnode.handle; + shadow->magic = COBALT_MUTEX_MAGIC; + shadow->lockcnt = 0; + shadow->attr = *attr; + shadow->state_offset = cobalt_umm_offset(umm, state); + + mutex->magic = COBALT_MUTEX_MAGIC; + + if (attr->protocol == PTHREAD_PRIO_PROTECT) { + state->ceiling = attr->ceiling + 1; + xnsynch_init_protect(&mutex->synchbase, synch_flags, + &state->owner, &state->ceiling); + } else { + state->ceiling = 0; + if (attr->protocol == PTHREAD_PRIO_INHERIT) + synch_flags |= XNSYNCH_PI; + xnsynch_init(&mutex->synchbase, synch_flags, &state->owner); + } + + state->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK + ? COBALT_MUTEX_ERRORCHECK : 0); + mutex->attr = *attr; + INIT_LIST_HEAD(&mutex->conds); + + xnlock_get_irqsave(&nklock, s); + cobalt_add_resource(&mutex->resnode, mutex, attr->pshared); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +/* must be called with nklock locked, interrupts off. */ +int __cobalt_mutex_acquire_unchecked(struct xnthread *cur, + struct cobalt_mutex *mutex, + const struct timespec64 *ts) +{ + int ret; + + if (ts) { + if (!timespec64_valid(ts)) + return -EINVAL; + ret = xnsynch_acquire(&mutex->synchbase, ts2ns(ts) + 1, XN_REALTIME); + } else + ret = xnsynch_acquire(&mutex->synchbase, XN_INFINITE, XN_RELATIVE); + + if (ret) { + if (ret & XNBREAK) + return -EINTR; + if (ret & XNTIMEO) + return -ETIMEDOUT; + return -EINVAL; + } + + return 0; +} + +int cobalt_mutex_release(struct xnthread *curr, + struct cobalt_mutex *mutex) +{ /* nklock held, irqs off */ + struct cobalt_mutex_state *state; + struct cobalt_cond *cond; + unsigned long flags; + int need_resched; + + if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex)) + return -EINVAL; + + if (mutex->resnode.scope != + cobalt_current_resources(mutex->attr.pshared)) + return -EPERM; + + /* + * We are about to release a mutex which is still pending PP + * (i.e. we never got scheduled out while holding it). Clear + * the lazy handle. + */ + if (mutex->resnode.handle == curr->u_window->pp_pending) + curr->u_window->pp_pending = XN_NO_HANDLE; + + state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner); + flags = state->flags; + need_resched = 0; + if ((flags & COBALT_MUTEX_COND_SIGNAL)) { + state->flags = flags & ~COBALT_MUTEX_COND_SIGNAL; + if (!list_empty(&mutex->conds)) { + list_for_each_entry(cond, &mutex->conds, mutex_link) + need_resched |= + cobalt_cond_deferred_signals(cond); + } + } + need_resched |= xnsynch_release(&mutex->synchbase, curr); + + return need_resched; +} + +int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct xnthread *curr = xnthread_current(); + struct timespec64 ts, *tsp = NULL; + struct cobalt_mutex *mutex; + xnhandle_t handle; + spl_t s; + int ret; + + /* We need a valid thread handle for the fast lock. */ + if (curr->handle == XN_NO_HANDLE) + return -EPERM; + + handle = cobalt_get_handle_from_user(&u_mx->handle); +redo: + xnlock_get_irqsave(&nklock, s); + + mutex = xnregistry_lookup(handle, NULL); + if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex)) { + ret = -EINVAL; + goto out; + } + + if (mutex->resnode.scope != + cobalt_current_resources(mutex->attr.pshared)) { + ret = -EPERM; + goto out; + } + + xnthread_commit_ceiling(curr); + + if (xnsynch_owner_check(&mutex->synchbase, curr)) { + /* Check if we can take the mutex immediately */ + ret = xnsynch_try_acquire(&mutex->synchbase); + if (ret != -EBUSY) + goto out; + + if (fetch_timeout) { + xnlock_put_irqrestore(&nklock, s); + ret = fetch_timeout(&ts, u_ts); + if (ret) + return ret; + + fetch_timeout = NULL; + tsp = &ts; + goto redo; /* Revalidate handle. */ + } + ret = __cobalt_mutex_acquire_unchecked(curr, mutex, tsp); + xnlock_put_irqrestore(&nklock, s); + return ret; + } + + /* We already own the mutex, something looks wrong. */ + + ret = -EBUSY; + switch(mutex->attr.type) { + case PTHREAD_MUTEX_NORMAL: + /* Attempting to relock a normal mutex, deadlock. */ + if (IS_ENABLED(XENO_OPT_DEBUG_USER)) + printk(XENO_WARNING + "thread %s deadlocks on non-recursive mutex\n", + curr->name); + /* Make the caller hang. */ + __cobalt_mutex_acquire_unchecked(curr, mutex, NULL); + break; + + case PTHREAD_MUTEX_ERRORCHECK: + case PTHREAD_MUTEX_RECURSIVE: + /* + * Recursive mutexes are handled in user-space, so + * these cases should never happen. + */ + ret = -EINVAL; + break; + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(mutex_check_init, current, + (struct cobalt_mutex_shadow __user *u_mx)) +{ + struct cobalt_mutex *mutex; + xnhandle_t handle; + int err; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mx->handle); + + xnlock_get_irqsave(&nklock, s); + mutex = xnregistry_lookup(handle, NULL); + if (cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) + /* mutex is already in a queue. */ + err = -EBUSY; + else + err = 0; + + xnlock_put_irqrestore(&nklock, s); + return err; +} + +COBALT_SYSCALL(mutex_init, current, + (struct cobalt_mutex_shadow __user *u_mx, + const struct cobalt_mutexattr __user *u_attr)) +{ + struct cobalt_mutex_state *state; + struct cobalt_mutex_shadow mx; + struct cobalt_mutexattr attr; + struct cobalt_mutex *mutex; + int ret; + + if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx))) + return -EFAULT; + + if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr))) + return -EFAULT; + + mutex = xnmalloc(sizeof(*mutex)); + if (mutex == NULL) + return -ENOMEM; + + state = cobalt_umm_alloc(&cobalt_ppd_get(attr.pshared)->umm, + sizeof(*state)); + if (state == NULL) { + xnfree(mutex); + return -EAGAIN; + } + + ret = cobalt_mutex_init_inner(&mx, mutex, state, &attr); + if (ret) { + xnfree(mutex); + cobalt_umm_free(&cobalt_ppd_get(attr.pshared)->umm, state); + return ret; + } + + return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx)); +} + +COBALT_SYSCALL(mutex_destroy, current, + (struct cobalt_mutex_shadow __user *u_mx)) +{ + struct cobalt_mutex_shadow mx; + struct cobalt_mutex *mutex; + spl_t s; + int ret; + + if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx))) + return -EFAULT; + + xnlock_get_irqsave(&nklock, s); + + mutex = xnregistry_lookup(mx.handle, NULL); + if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) { + ret = -EINVAL; + goto fail; + } + if (cobalt_current_resources(mutex->attr.pshared) != + mutex->resnode.scope) { + ret = -EPERM; + goto fail; + } + if (xnsynch_fast_owner_check(mutex->synchbase.fastlock, + XN_NO_HANDLE) != 0 || + !list_empty(&mutex->conds)) { + ret = -EBUSY; + goto fail; + } + + cobalt_mutex_reclaim(&mutex->resnode, s); /* drops lock */ + + cobalt_mark_deleted(&mx); + + return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx)); +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(mutex_trylock, primary, + (struct cobalt_mutex_shadow __user *u_mx)) +{ + struct xnthread *curr = xnthread_current(); + struct cobalt_mutex *mutex; + xnhandle_t handle; + spl_t s; + int ret; + + handle = cobalt_get_handle_from_user(&u_mx->handle); + + xnlock_get_irqsave(&nklock, s); + + mutex = xnregistry_lookup(handle, NULL); + if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) { + ret = -EINVAL; + goto out; + } + + xnthread_commit_ceiling(curr); + + ret = xnsynch_try_acquire(&mutex->synchbase); + +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(mutex_lock, primary, + (struct cobalt_mutex_shadow __user *u_mx)) +{ + return __cobalt_mutex_timedlock_break(u_mx, NULL, NULL); +} + +static inline int mutex_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts); +} + +static inline int mutex_fetch_timeout64(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts); +} + +int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx, + const void __user *u_ts) +{ + return __cobalt_mutex_timedlock_break(u_mx, u_ts, + mutex_fetch_timeout64); +} + +COBALT_SYSCALL(mutex_timedlock, primary, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __user_old_timespec __user *u_ts)) +{ + return __cobalt_mutex_timedlock_break(u_mx, u_ts, mutex_fetch_timeout); +} + +COBALT_SYSCALL(mutex_timedlock64, primary, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_mutex_timedlock64(u_mx, u_ts); +} + +COBALT_SYSCALL(mutex_unlock, nonrestartable, + (struct cobalt_mutex_shadow __user *u_mx)) +{ + struct cobalt_mutex *mutex; + struct xnthread *curr; + xnhandle_t handle; + int ret; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_mx->handle); + curr = xnthread_current(); + + xnlock_get_irqsave(&nklock, s); + + mutex = xnregistry_lookup(handle, NULL); + ret = cobalt_mutex_release(curr, mutex); + if (ret > 0) { + xnsched_run(); + ret = 0; + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +void cobalt_mutex_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_mutex_state *state; + struct cobalt_mutex *mutex; + int pshared; + + mutex = container_of(node, struct cobalt_mutex, resnode); + state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner); + pshared = mutex->attr.pshared; + xnregistry_remove(node->handle); + cobalt_del_resource(node); + xnsynch_destroy(&mutex->synchbase); + cobalt_mark_deleted(mutex); + xnlock_put_irqrestore(&nklock, s); + + cobalt_umm_free(&cobalt_ppd_get(pshared)->umm, state); + xnfree(mutex); +} + +struct xnsynch *lookup_lazy_pp(xnhandle_t handle) +{ /* nklock held, irqs off */ + struct cobalt_mutex *mutex; + + /* Only mutexes may be PP-enabled. */ + + mutex = xnregistry_lookup(handle, NULL); + if (mutex == NULL || + !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex) || + mutex->attr.protocol != PTHREAD_PRIO_PROTECT) + return NULL; + + return &mutex->synchbase; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h new file mode 100644 index 0000000..d7fede2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h @@ -0,0 +1,83 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_POSIX_MUTEX_H +#define _COBALT_POSIX_MUTEX_H + +#include "thread.h" +#include <cobalt/uapi/mutex.h> +#include <xenomai/posix/syscall.h> +#include <xenomai/posix/process.h> + +struct cobalt_process; + +struct cobalt_mutex { + unsigned int magic; + struct xnsynch synchbase; + /** cobalt_mutexq */ + struct list_head conds; + struct cobalt_mutexattr attr; + struct cobalt_resnode resnode; +}; + +int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); + +int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx, + const void __user *u_ts); + +int __cobalt_mutex_acquire_unchecked(struct xnthread *cur, + struct cobalt_mutex *mutex, + const struct timespec64 *ts); + +COBALT_SYSCALL_DECL(mutex_check_init, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_init, + (struct cobalt_mutex_shadow __user *u_mx, + const struct cobalt_mutexattr __user *u_attr)); + +COBALT_SYSCALL_DECL(mutex_destroy, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_trylock, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_lock, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_timedlock, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mutex_timedlock64, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mutex_unlock, + (struct cobalt_mutex_shadow __user *u_mx)); + +int cobalt_mutex_release(struct xnthread *cur, + struct cobalt_mutex *mutex); + +void cobalt_mutex_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_MUTEX_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c new file mode 100644 index 0000000..89cf62b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/list.h> +#include <linux/err.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/tree.h> +#include "internal.h" +#include "sem.h" +#include "thread.h" +#include <trace/events/cobalt-posix.h> + +DEFINE_PRIVATE_XNLOCK(named_sem_lock); + +struct cobalt_named_sem { + struct cobalt_sem *sem; + struct cobalt_sem_shadow __user *usem; + unsigned int refs; + struct xnid id; +}; + +static struct cobalt_named_sem * +sem_search(struct cobalt_process *process, xnhandle_t handle) +{ + struct xnid *i; + + i = xnid_fetch(&process->usems, handle); + if (i == NULL) + return NULL; + + return container_of(i, struct cobalt_named_sem, id); +} + +static struct cobalt_sem_shadow __user * +sem_open(struct cobalt_process *process, + struct cobalt_sem_shadow __user *ushadow, + struct filename *filename, int oflags, mode_t mode, + unsigned int value) +{ + const char *name = filename->name; + struct cobalt_sem_shadow shadow; + struct cobalt_named_sem *u, *v; + struct cobalt_sem *sem; + xnhandle_t handle; + spl_t s; + int rc; + + if (name[0] != '/' || name[1] == '\0') + return ERR_PTR(-EINVAL); + + retry_bind: + rc = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle); + switch (rc) { + case 0: + /* Found */ + if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + return ERR_PTR(-EEXIST); + + xnlock_get_irqsave(&named_sem_lock, s); + u = sem_search(process, handle); + if (u) { + ++u->refs; + xnlock_put_irqrestore(&named_sem_lock, s); + return u->usem; + } + xnlock_put_irqrestore(&named_sem_lock, s); + + xnlock_get_irqsave(&nklock, s); + sem = xnregistry_lookup(handle, NULL); + if (sem && sem->magic != COBALT_SEM_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return ERR_PTR(-EINVAL); + } + + if (sem) { + ++sem->refs; + xnlock_put_irqrestore(&nklock, s); + } else { + xnlock_put_irqrestore(&nklock, s); + goto retry_bind; + } + + __cobalt_sem_shadow_init(sem, COBALT_NAMED_SEM_MAGIC, &shadow); + break; + + case -EWOULDBLOCK: + /* Not found */ + if ((oflags & O_CREAT) == 0) + return ERR_PTR(-ENOENT); + + shadow.magic = 0; + sem = __cobalt_sem_init(&name[1], &shadow, + SEM_PSHARED | SEM_NAMED, value); + if (IS_ERR(sem)) { + rc = PTR_ERR(sem); + if (rc == -EEXIST) + goto retry_bind; + return ERR_PTR(rc); + } + + sem->pathname = filename; + handle = shadow.handle; + break; + + default: + return ERR_PTR(rc); + } + + if (cobalt_copy_to_user(ushadow, &shadow, sizeof(shadow))) { + __cobalt_sem_destroy(handle); + return ERR_PTR(-EFAULT); + } + + u = xnmalloc(sizeof(*u)); + if (u == NULL) { + __cobalt_sem_destroy(handle); + return ERR_PTR(-ENOMEM); + } + + u->sem = sem; + u->usem = ushadow; + u->refs = 1; + + xnlock_get_irqsave(&named_sem_lock, s); + v = sem_search(process, handle); + if (v) { + ++v->refs; + xnlock_put_irqrestore(&named_sem_lock, s); + xnlock_get_irqsave(&nklock, s); + --sem->refs; + xnlock_put_irqrestore(&nklock, s); + putname(filename); + xnfree(u); + u = v; + } else { + xnid_enter(&process->usems, &u->id, handle); + xnlock_put_irqrestore(&named_sem_lock, s); + } + + trace_cobalt_psem_open(name, handle, oflags, mode, value); + + return u->usem; +} + +static int sem_close(struct cobalt_process *process, xnhandle_t handle) +{ + struct cobalt_named_sem *u; + spl_t s; + int err; + + xnlock_get_irqsave(&named_sem_lock, s); + u = sem_search(process, handle); + if (u == NULL) { + err = -ENOENT; + goto err_unlock; + } + + if (--u->refs) { + err = 0; + goto err_unlock; + } + + xnid_remove(&process->usems, &u->id); + xnlock_put_irqrestore(&named_sem_lock, s); + + __cobalt_sem_destroy(handle); + + xnfree(u); + return 1; + + err_unlock: + xnlock_put_irqrestore(&named_sem_lock, s); + return err; +} + +struct cobalt_sem_shadow __user * +__cobalt_sem_open(struct cobalt_sem_shadow __user *usm, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value) +{ + struct cobalt_process *process; + struct filename *filename; + + process = cobalt_current_process(); + if (process == NULL) + return ERR_PTR(-EPERM); + + filename = getname(u_name); + if (IS_ERR(filename)) + return ERR_CAST(filename); + + usm = sem_open(process, usm, filename, oflags, mode, value); + if (IS_ERR(usm)) { + trace_cobalt_psem_open_failed(filename->name, oflags, mode, + value, PTR_ERR(usm)); + putname(filename); + } + + return usm; +} + +COBALT_SYSCALL(sem_open, lostage, + (struct cobalt_sem_shadow __user *__user *u_addrp, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value)) +{ + struct cobalt_sem_shadow __user *usm; + + if (__xn_get_user(usm, u_addrp)) + return -EFAULT; + + usm = __cobalt_sem_open(usm, u_name, oflags, mode, value); + if (IS_ERR(usm)) + return PTR_ERR(usm); + + return __xn_put_user(usm, u_addrp) ? -EFAULT : 0; +} + +COBALT_SYSCALL(sem_close, lostage, + (struct cobalt_sem_shadow __user *usm)) +{ + struct cobalt_process *process; + xnhandle_t handle; + + process = cobalt_current_process(); + if (process == NULL) + return -EPERM; + + handle = cobalt_get_handle_from_user(&usm->handle); + trace_cobalt_psem_close(handle); + + return sem_close(process, handle); +} + +static inline int sem_unlink(const char *name) +{ + xnhandle_t handle; + int ret; + + if (name[0] != '/') + return -EINVAL; + + ret = xnregistry_bind(name + 1, XN_NONBLOCK, XN_RELATIVE, &handle); + if (ret == -EWOULDBLOCK) + return -ENOENT; + + if (__cobalt_sem_destroy(handle) == -EBUSY) + xnregistry_unlink(xnregistry_key(handle)); + + return 0; +} + +COBALT_SYSCALL(sem_unlink, lostage, + (const char __user *u_name)) +{ + struct filename *filename; + int ret; + + filename = getname(u_name); + if (IS_ERR(filename)) + return PTR_ERR(filename); + + trace_cobalt_psem_unlink(filename->name); + ret = sem_unlink(filename->name); + putname(filename); + + return ret; +} + +static void reclaim_named_sem(void *arg, struct xnid *i) +{ + struct cobalt_process *process = arg; + struct cobalt_named_sem *u; + + u = container_of(i, struct cobalt_named_sem, id); + u->refs = 1; + sem_close(process, xnid_key(i)); +} + +void cobalt_nsem_reclaim(struct cobalt_process *process) +{ + xntree_cleanup(&process->usems, process, reclaim_named_sem); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c new file mode 100644 index 0000000..935007f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c @@ -0,0 +1,1203 @@ +/* + * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org> + * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org> + * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org> + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/stdarg.h> +#include <linux/unistd.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/cred.h> +#include <linux/file.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <pipeline/kevents.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/trace.h> +#include <cobalt/kernel/stat.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/vdso.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/uapi/signal.h> +#include <cobalt/uapi/syscall.h> +#include <pipeline/sched.h> +#include <trace/events/cobalt-core.h> +#include <rtdm/driver.h> +#include <asm/xenomai/features.h> +#include <asm/xenomai/syscall.h> +#include "../debug.h" +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "mutex.h" +#include "cond.h" +#include "mqueue.h" +#include "sem.h" +#include "signal.h" +#include "timer.h" +#include "monitor.h" +#include "clock.h" +#include "event.h" +#include "timerfd.h" +#include "io.h" + +static int gid_arg = -1; +module_param_named(allowed_group, gid_arg, int, 0644); + +static DEFINE_MUTEX(personality_lock); + +static struct hlist_head *process_hash; +DEFINE_PRIVATE_XNLOCK(process_hash_lock); +#define PROCESS_HASH_SIZE 13 + +struct xnthread_personality *cobalt_personalities[NR_PERSONALITIES]; + +static struct xnsynch yield_sync; + +LIST_HEAD(cobalt_global_thread_list); + +DEFINE_XNPTREE(posix_ptree, "posix"); + +struct cobalt_resources cobalt_global_resources = { + .condq = LIST_HEAD_INIT(cobalt_global_resources.condq), + .mutexq = LIST_HEAD_INIT(cobalt_global_resources.mutexq), + .semq = LIST_HEAD_INIT(cobalt_global_resources.semq), + .monitorq = LIST_HEAD_INIT(cobalt_global_resources.monitorq), + .eventq = LIST_HEAD_INIT(cobalt_global_resources.eventq), + .schedq = LIST_HEAD_INIT(cobalt_global_resources.schedq), +}; + +static unsigned __attribute__((pure)) process_hash_crunch(struct mm_struct *mm) +{ + unsigned long hash = ((unsigned long)mm - PAGE_OFFSET) / sizeof(*mm); + return hash % PROCESS_HASH_SIZE; +} + +static struct cobalt_process *__process_hash_search(struct mm_struct *mm) +{ + unsigned int bucket = process_hash_crunch(mm); + struct cobalt_process *p; + + hlist_for_each_entry(p, &process_hash[bucket], hlink) + if (p->mm == mm) + return p; + + return NULL; +} + +static int process_hash_enter(struct cobalt_process *p) +{ + struct mm_struct *mm = current->mm; + unsigned int bucket = process_hash_crunch(mm); + int err; + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + if (__process_hash_search(mm)) { + err = -EBUSY; + goto out; + } + + p->mm = mm; + hlist_add_head(&p->hlink, &process_hash[bucket]); + err = 0; + out: + xnlock_put_irqrestore(&process_hash_lock, s); + return err; +} + +static void process_hash_remove(struct cobalt_process *p) +{ + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + if (p->mm) + hlist_del(&p->hlink); + xnlock_put_irqrestore(&process_hash_lock, s); +} + +struct cobalt_process *cobalt_search_process(struct mm_struct *mm) +{ + struct cobalt_process *process; + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + process = __process_hash_search(mm); + xnlock_put_irqrestore(&process_hash_lock, s); + + return process; +} + +static void *lookup_context(int xid) +{ + struct cobalt_process *process = cobalt_current_process(); + void *priv = NULL; + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + /* + * First try matching the process context attached to the + * (usually main) thread which issued sc_cobalt_bind. If not + * found, try matching by mm context, which should point us + * back to the latter. If none match, then the current process + * is unbound. + */ + if (process == NULL && current->mm) + process = __process_hash_search(current->mm); + if (process) + priv = process->priv[xid]; + + xnlock_put_irqrestore(&process_hash_lock, s); + + return priv; +} + +void cobalt_remove_process(struct cobalt_process *process) +{ + struct xnthread_personality *personality; + void *priv; + int xid; + + mutex_lock(&personality_lock); + + for (xid = NR_PERSONALITIES - 1; xid >= 0; xid--) { + if (!__test_and_clear_bit(xid, &process->permap)) + continue; + personality = cobalt_personalities[xid]; + priv = process->priv[xid]; + if (priv == NULL) + continue; + /* + * CAUTION: process potentially refers to stale memory + * upon return from detach_process() for the Cobalt + * personality, so don't dereference it afterwards. + */ + if (xid) + process->priv[xid] = NULL; + __clear_bit(personality->xid, &process->permap); + personality->ops.detach_process(priv); + atomic_dec(&personality->refcnt); + XENO_WARN_ON(COBALT, atomic_read(&personality->refcnt) < 0); + if (personality->module) + module_put(personality->module); + } + + cobalt_set_process(NULL); + + mutex_unlock(&personality_lock); +} + +static void post_ppd_release(struct cobalt_umm *umm) +{ + struct cobalt_process *process; + + process = container_of(umm, struct cobalt_process, sys_ppd.umm); + kfree(process); +} + +static inline char *get_exe_path(struct task_struct *p) +{ + struct file *exe_file; + char *pathname, *buf; + struct mm_struct *mm; + struct path path; + + /* + * PATH_MAX is fairly large, and in any case won't fit on the + * caller's stack happily; since we are mapping a shadow, + * which is a heavyweight operation anyway, let's pick the + * memory from the page allocator. + */ + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf == NULL) + return ERR_PTR(-ENOMEM); + + mm = get_task_mm(p); + if (mm == NULL) { + pathname = "vmlinux"; + goto copy; /* kernel thread */ + } + + exe_file = get_mm_exe_file(mm); + mmput(mm); + if (exe_file == NULL) { + pathname = ERR_PTR(-ENOENT); + goto out; /* no luck. */ + } + + path = exe_file->f_path; + path_get(&exe_file->f_path); + fput(exe_file); + pathname = d_path(&path, buf, PATH_MAX); + path_put(&path); + if (IS_ERR(pathname)) + goto out; /* mmmh... */ +copy: + /* caution: d_path() may start writing anywhere in the buffer. */ + pathname = kstrdup(pathname, GFP_KERNEL); +out: + free_page((unsigned long)buf); + + return pathname; +} + +static inline int raise_cap(int cap) +{ + struct cred *new; + + new = prepare_creds(); + if (new == NULL) + return -ENOMEM; + + cap_raise(new->cap_effective, cap); + + return commit_creds(new); +} + +static int bind_personality(struct xnthread_personality *personality) +{ + struct cobalt_process *process; + void *priv; + + /* + * We also check capabilities for stacking a Cobalt extension, + * in case the process dropped the supervisor privileges after + * a successful initial binding to the Cobalt interface. + */ + if (!capable(CAP_SYS_NICE) && + (gid_arg == -1 || !in_group_p(KGIDT_INIT(gid_arg)))) + return -EPERM; + /* + * Protect from the same process binding to the same interface + * several times. + */ + priv = lookup_context(personality->xid); + if (priv) + return 0; + + priv = personality->ops.attach_process(); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + process = cobalt_current_process(); + /* + * We are still covered by the personality_lock, so we may + * safely bump the module refcount after the attach handler + * has returned. + */ + if (personality->module && !try_module_get(personality->module)) { + personality->ops.detach_process(priv); + return -EAGAIN; + } + + __set_bit(personality->xid, &process->permap); + atomic_inc(&personality->refcnt); + process->priv[personality->xid] = priv; + + raise_cap(CAP_SYS_NICE); + raise_cap(CAP_IPC_LOCK); + raise_cap(CAP_SYS_RAWIO); + + return 0; +} + +int cobalt_bind_personality(unsigned int magic) +{ + struct xnthread_personality *personality; + int xid, ret = -ESRCH; + + mutex_lock(&personality_lock); + + for (xid = 1; xid < NR_PERSONALITIES; xid++) { + personality = cobalt_personalities[xid]; + if (personality && personality->magic == magic) { + ret = bind_personality(personality); + break; + } + } + + mutex_unlock(&personality_lock); + + return ret ?: xid; +} + +int cobalt_bind_core(int ufeatures) +{ + struct cobalt_process *process; + int ret; + + mutex_lock(&personality_lock); + ret = bind_personality(&cobalt_personality); + mutex_unlock(&personality_lock); + if (ret) + return ret; + + process = cobalt_current_process(); + /* Feature set userland knows about. */ + process->ufeatures = ufeatures; + + return 0; +} + +/** + * @fn int cobalt_register_personality(struct xnthread_personality *personality) + * @internal + * @brief Register a new interface personality. + * + * - personality->ops.attach_process() is called when a user-space + * process binds to the personality, on behalf of one of its + * threads. The attach_process() handler may return: + * + * . an opaque pointer, representing the context of the calling + * process for this personality; + * + * . a NULL pointer, meaning that no per-process structure should be + * attached to this process for this personality; + * + * . ERR_PTR(negative value) indicating an error, the binding + * process will then abort. + * + * - personality->ops.detach_process() is called on behalf of an + * exiting user-space process which has previously attached to the + * personality. This handler is passed a pointer to the per-process + * data received earlier from the ops->attach_process() handler. + * + * @return the personality (extension) identifier. + * + * @note cobalt_get_context() is NULL when ops.detach_process() is + * invoked for the personality the caller detaches from. + * + * @coretags{secondary-only} + */ +int cobalt_register_personality(struct xnthread_personality *personality) +{ + int xid; + + mutex_lock(&personality_lock); + + for (xid = 0; xid < NR_PERSONALITIES; xid++) { + if (cobalt_personalities[xid] == NULL) { + personality->xid = xid; + atomic_set(&personality->refcnt, 0); + cobalt_personalities[xid] = personality; + goto out; + } + } + + xid = -EAGAIN; +out: + mutex_unlock(&personality_lock); + + return xid; +} +EXPORT_SYMBOL_GPL(cobalt_register_personality); + +/* + * @brief Unregister an interface personality. + * + * @coretags{secondary-only} + */ +int cobalt_unregister_personality(int xid) +{ + struct xnthread_personality *personality; + int ret = 0; + + if (xid < 0 || xid >= NR_PERSONALITIES) + return -EINVAL; + + mutex_lock(&personality_lock); + + personality = cobalt_personalities[xid]; + if (atomic_read(&personality->refcnt) > 0) + ret = -EBUSY; + else + cobalt_personalities[xid] = NULL; + + mutex_unlock(&personality_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cobalt_unregister_personality); + +/** + * Stack a new personality over Cobalt for the current thread. + * + * This service registers the current thread as a member of the + * additional personality identified by @a xid. If the current thread + * is already assigned this personality, the call returns successfully + * with no effect. + * + * @param xid the identifier of the additional personality. + * + * @return A handle to the previous personality. The caller should + * save this handle for unstacking @a xid when applicable via a call + * to cobalt_pop_personality(). + * + * @coretags{secondary-only} + */ +struct xnthread_personality * +cobalt_push_personality(int xid) +{ + struct cobalt_threadinfo *p = pipeline_current(); + struct xnthread_personality *prev, *next; + struct xnthread *thread = p->thread; + + secondary_mode_only(); + + mutex_lock(&personality_lock); + + if (xid < 0 || xid >= NR_PERSONALITIES || + p->process == NULL || !test_bit(xid, &p->process->permap)) { + mutex_unlock(&personality_lock); + return NULL; + } + + next = cobalt_personalities[xid]; + prev = thread->personality; + if (next == prev) { + mutex_unlock(&personality_lock); + return prev; + } + + thread->personality = next; + mutex_unlock(&personality_lock); + xnthread_run_handler(thread, map_thread); + + return prev; +} +EXPORT_SYMBOL_GPL(cobalt_push_personality); + +/** + * Pop the topmost personality from the current thread. + * + * This service pops the topmost personality off the current thread. + * + * @param prev the previous personality which was returned by the + * latest call to cobalt_push_personality() for the current thread. + * + * @coretags{secondary-only} + */ +void cobalt_pop_personality(struct xnthread_personality *prev) +{ + struct cobalt_threadinfo *p = pipeline_current(); + struct xnthread *thread = p->thread; + + secondary_mode_only(); + thread->personality = prev; +} +EXPORT_SYMBOL_GPL(cobalt_pop_personality); + +/** + * Return the per-process data attached to the calling user process. + * + * This service returns the per-process data attached to the calling + * user process for the personality whose xid is @a xid. + * + * The per-process data was obtained from the ->attach_process() + * handler defined for the personality @a xid refers to. + * + * See cobalt_register_personality() documentation for information on + * the way to attach a per-process data to a process. + * + * @param xid the personality identifier. + * + * @return the per-process data if the current context is a user-space + * process; @return NULL otherwise. As a special case, + * cobalt_get_context(0) returns the current Cobalt process + * descriptor, which is strictly identical to calling + * cobalt_current_process(). + * + * @coretags{task-unrestricted} + */ +void *cobalt_get_context(int xid) +{ + return lookup_context(xid); +} +EXPORT_SYMBOL_GPL(cobalt_get_context); + +int cobalt_yield(xnticks_t min, xnticks_t max) +{ + xnticks_t start; + int ret; + + start = xnclock_read_monotonic(&nkclock); + max += start; + min += start; + + do { + ret = xnsynch_sleep_on(&yield_sync, max, XN_ABSOLUTE); + if (ret & XNBREAK) + return -EINTR; + } while (ret == 0 && xnclock_read_monotonic(&nkclock) < min); + + return 0; +} +EXPORT_SYMBOL_GPL(cobalt_yield); + +/** + * @fn int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff) + * @internal + * @brief Create a shadow thread context over a user task. + * + * This call maps a Xenomai thread to the current regular Linux task + * running in userland. The priority and scheduling class of the + * underlying Linux task are not affected; it is assumed that the + * interface library did set them appropriately before issuing the + * shadow mapping request. + * + * @param thread The descriptor address of the new shadow thread to be + * mapped to current. This descriptor must have been previously + * initialized by a call to xnthread_init(). + * + * @param u_winoff will receive the offset of the per-thread + * "u_window" structure in the global heap associated to @a + * thread. This structure reflects thread state information visible + * from userland through a shared memory window. + * + * @return 0 is returned on success. Otherwise: + * + * - -EINVAL is returned if the thread control block does not bear the + * XNUSER bit. + * + * - -EBUSY is returned if either the current Linux task or the + * associated shadow thread is already involved in a shadow mapping. + * + * @coretags{secondary-only} + */ +int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff) +{ + struct xnthread_user_window *u_window; + struct xnthread_start_attr attr; + struct cobalt_ppd *sys_ppd; + struct cobalt_umm *umm; + int ret; + + if (!xnthread_test_state(thread, XNUSER)) + return -EINVAL; + + if (xnthread_current() || xnthread_test_state(thread, XNMAPPED)) + return -EBUSY; + + if (!access_wok(u_winoff, sizeof(*u_winoff))) + return -EFAULT; + + ret = pipeline_prepare_current(); + if (ret) + return ret; + + umm = &cobalt_kernel_ppd.umm; + u_window = cobalt_umm_zalloc(umm, sizeof(*u_window)); + if (u_window == NULL) + return -ENOMEM; + + thread->u_window = u_window; + __xn_put_user(cobalt_umm_offset(umm, u_window), u_winoff); + xnthread_pin_initial(thread); + + /* + * CAUTION: we enable the pipeline notifier only when our + * shadow TCB is consistent, so that we won't trigger false + * positive in debug code from handle_schedule_event() and + * friends. + */ + pipeline_init_shadow_tcb(thread); + xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL); + pipeline_attach_current(thread); + xnthread_set_state(thread, XNMAPPED); + xndebug_shadow_init(thread); + sys_ppd = cobalt_ppd_get(0); + atomic_inc(&sys_ppd->refcnt); + /* + * ->map_thread() handler is invoked after the TCB is fully + * built, and when we know for sure that current will go + * through our task-exit handler, because it has a shadow + * extension and I-pipe notifications will soon be enabled for + * it. + */ + xnthread_run_handler(thread, map_thread); + pipeline_enable_kevents(); + + attr.mode = 0; + attr.entry = NULL; + attr.cookie = NULL; + ret = xnthread_start(thread, &attr); + if (ret) + return ret; + + xnthread_sync_window(thread); + + xntrace_pid(xnthread_host_pid(thread), + xnthread_current_priority(thread)); + + return 0; +} + +void cobalt_signal_yield(void) +{ + spl_t s; + + if (!xnsynch_pended_p(&yield_sync)) + return; + + xnlock_get_irqsave(&nklock, s); + if (xnsynch_pended_p(&yield_sync)) { + xnsynch_flush(&yield_sync, 0); + xnsched_run(); + } + xnlock_put_irqrestore(&nklock, s); +} + +static inline struct cobalt_process * +process_from_thread(struct xnthread *thread) +{ + return container_of(thread, struct cobalt_thread, threadbase)->process; +} + +void cobalt_stop_debugged_process(struct xnthread *thread) +{ + struct cobalt_process *process = process_from_thread(thread); + struct cobalt_thread *cth; + + if (process->debugged_threads > 0) + return; + + list_for_each_entry(cth, &process->thread_list, next) { + if (&cth->threadbase == thread) + continue; + + xnthread_suspend(&cth->threadbase, XNDBGSTOP, XN_INFINITE, + XN_RELATIVE, NULL); + } +} + +static void cobalt_resume_debugged_process(struct cobalt_process *process) +{ + struct cobalt_thread *cth; + + xnsched_lock(); + + list_for_each_entry(cth, &process->thread_list, next) + if (xnthread_test_state(&cth->threadbase, XNDBGSTOP)) + xnthread_resume(&cth->threadbase, XNDBGSTOP); + + xnsched_unlock(); +} + +/* called with nklock held */ +void cobalt_register_debugged_thread(struct xnthread *thread) +{ + struct cobalt_process *process = process_from_thread(thread); + + xnthread_set_state(thread, XNSSTEP); + + cobalt_stop_debugged_process(thread); + process->debugged_threads++; + + if (xnthread_test_state(thread, XNRELAX)) + xnthread_suspend(thread, XNDBGSTOP, XN_INFINITE, XN_RELATIVE, + NULL); +} + +/* called with nklock held */ +void cobalt_unregister_debugged_thread(struct xnthread *thread) +{ + struct cobalt_process *process = process_from_thread(thread); + + process->debugged_threads--; + xnthread_clear_state(thread, XNSSTEP); + + if (process->debugged_threads == 0) + cobalt_resume_debugged_process(process); +} + +int cobalt_handle_setaffinity_event(struct task_struct *task) +{ +#ifdef CONFIG_SMP + struct xnthread *thread; + spl_t s; + + thread = xnthread_from_task(task); + if (thread == NULL) + return KEVENT_PROPAGATE; + + /* + * Detect a Cobalt thread sleeping in primary mode which is + * required to migrate to another CPU by the host kernel. + * + * We may NOT fix up thread->sched immediately using the + * passive migration call, because that latter always has to + * take place on behalf of the target thread itself while + * running in secondary mode. Therefore, that thread needs to + * go through secondary mode first, then move back to primary + * mode, so that affinity_ok() does the fixup work. + * + * We force this by sending a SIGSHADOW signal to the migrated + * thread, asking it to switch back to primary mode from the + * handler, at which point the interrupted syscall may be + * restarted. + */ + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS & ~XNRELAX)) + __xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN); + + xnlock_put_irqrestore(&nklock, s); +#endif /* CONFIG_SMP */ + + return KEVENT_PROPAGATE; +} + +#ifdef CONFIG_SMP +void cobalt_adjust_affinity(struct task_struct *task) /* nklocked, IRQs off */ +{ + struct xnthread *thread = xnthread_from_task(task); + struct xnsched *sched; + int cpu = task_cpu(task); + + /* + * To maintain consistency between both Cobalt and host + * schedulers, reflecting a thread migration to another CPU + * into the Cobalt scheduler state must happen from secondary + * mode only, on behalf of the migrated thread itself once it + * runs on the target CPU. + * + * This means that the Cobalt scheduler state regarding the + * CPU information lags behind the host scheduler state until + * the migrated thread switches back to primary mode + * (i.e. task_cpu(p) != xnsched_cpu(xnthread_from_task(p)->sched)). + * This is ok since Cobalt does not schedule such thread until then. + * + * check_affinity() detects when a Cobalt thread switching + * back to primary mode did move to another CPU earlier while + * in secondary mode. If so, do the fixups to reflect the + * change. + */ + if (!xnsched_threading_cpu(cpu)) { + /* + * The thread is about to switch to primary mode on a + * non-rt CPU, which is damn wrong and hopeless. + * Whine and cancel that thread. + */ + printk(XENO_WARNING "thread %s[%d] switched to non-rt CPU%d, aborted.\n", + thread->name, xnthread_host_pid(thread), cpu); + /* + * Can't call xnthread_cancel() from a migration + * point, that would break. Since we are on the wakeup + * path to hardening, just raise XNCANCELD to catch it + * in xnthread_harden(). + */ + xnthread_set_info(thread, XNCANCELD); + return; + } + + sched = xnsched_struct(cpu); + if (sched == thread->sched) + return; + + /* + * The current thread moved to a supported real-time CPU, + * which is not part of its original affinity mask + * though. Assume user wants to extend this mask. + */ + if (!cpumask_test_cpu(cpu, &thread->affinity)) + cpumask_set_cpu(cpu, &thread->affinity); + + xnthread_run_handler_stack(thread, move_thread, cpu); + xnthread_migrate_passive(thread, sched); +} +#endif /* CONFIG_SMP */ + +static void __handle_taskexit_event(struct task_struct *p) +{ + struct cobalt_ppd *sys_ppd; + struct xnthread *thread; + spl_t s; + + /* + * We are called for both kernel and user shadows over the + * root thread. + */ + secondary_mode_only(); + + thread = xnthread_current(); + XENO_BUG_ON(COBALT, thread == NULL); + trace_cobalt_shadow_unmap(thread); + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(thread, XNSSTEP)) + cobalt_unregister_debugged_thread(thread); + + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); + + xnthread_run_handler_stack(thread, exit_thread); + + if (xnthread_test_state(thread, XNUSER)) { + cobalt_umm_free(&cobalt_kernel_ppd.umm, thread->u_window); + thread->u_window = NULL; + sys_ppd = cobalt_ppd_get(0); + if (atomic_dec_and_test(&sys_ppd->refcnt)) + cobalt_remove_process(cobalt_current_process()); + } +} + +int cobalt_handle_user_return(struct task_struct *task) +{ + struct xnthread *thread; + spl_t s; + int err; + + thread = xnthread_from_task(task); + if (thread == NULL) + return KEVENT_PROPAGATE; + + if (xnthread_test_info(thread, XNCONTHI)) { + xnlock_get_irqsave(&nklock, s); + xnthread_clear_info(thread, XNCONTHI); + xnlock_put_irqrestore(&nklock, s); + + err = xnthread_harden(); + + /* + * XNCONTHI may or may not have been re-applied if + * harden bailed out due to pending signals. Make sure + * it is set in that case. + */ + if (err == -ERESTARTSYS) { + xnlock_get_irqsave(&nklock, s); + xnthread_set_info(thread, XNCONTHI); + xnlock_put_irqrestore(&nklock, s); + } + } + + return KEVENT_PROPAGATE; +} + +static void detach_current(void) +{ + struct cobalt_threadinfo *p = pipeline_current(); + + p->thread = NULL; + p->process = NULL; +} + +int cobalt_handle_taskexit_event(struct task_struct *task) /* task == current */ +{ + __handle_taskexit_event(task); + + /* + * __xnthread_cleanup() -> ... -> finalize_thread + * handler. From that point, the TCB is dropped. Be careful of + * not treading on stale memory within @thread. + */ + __xnthread_cleanup(xnthread_current()); + + detach_current(); + + return KEVENT_PROPAGATE; +} + +int cobalt_handle_cleanup_event(struct mm_struct *mm) +{ + struct cobalt_process *old, *process; + struct cobalt_ppd *sys_ppd; + struct xnthread *curr; + + /* + * We are NOT called for exiting kernel shadows. + * cobalt_current_process() is cleared if we get there after + * handle_task_exit(), so we need to restore this context + * pointer temporarily. + */ + process = cobalt_search_process(mm); + old = cobalt_set_process(process); + sys_ppd = cobalt_ppd_get(0); + if (sys_ppd != &cobalt_kernel_ppd) { + bool running_exec; + + /* + * Detect a userland shadow running exec(), i.e. still + * attached to the current linux task (no prior + * detach_current). In this case, we emulate a task + * exit, since the Xenomai binding shall not survive + * the exec() syscall. Since the process will keep on + * running though, we have to disable the event + * notifier manually for it. + */ + curr = xnthread_current(); + running_exec = curr && (current->flags & PF_EXITING) == 0; + if (running_exec) { + __handle_taskexit_event(current); + pipeline_cleanup_process(); + } + if (atomic_dec_and_test(&sys_ppd->refcnt)) + cobalt_remove_process(process); + if (running_exec) { + __xnthread_cleanup(curr); + detach_current(); + } + } + + /* + * CAUTION: Do not override a state change caused by + * cobalt_remove_process(). + */ + if (cobalt_current_process() == process) + cobalt_set_process(old); + + return KEVENT_PROPAGATE; +} + +static int attach_process(struct cobalt_process *process) +{ + struct cobalt_ppd *p = &process->sys_ppd; + char *exe_path; + int ret; + + ret = cobalt_umm_init(&p->umm, CONFIG_XENO_OPT_PRIVATE_HEAPSZ * 1024, + post_ppd_release); + if (ret) + return ret; + + cobalt_umm_set_name(&p->umm, "private heap[%d]", task_pid_nr(current)); + + ret = pipeline_attach_process(process); + if (ret) + goto fail_pipeline; + + exe_path = get_exe_path(current); + if (IS_ERR(exe_path)) { + printk(XENO_WARNING + "%s[%d] can't find exe path\n", + current->comm, task_pid_nr(current)); + exe_path = NULL; /* Not lethal, but weird. */ + } + p->exe_path = exe_path; + xntree_init(&p->fds); + atomic_set(&p->refcnt, 1); + + ret = process_hash_enter(process); + if (ret) + goto fail_hash; + + return 0; +fail_hash: + pipeline_detach_process(process); + if (p->exe_path) + kfree(p->exe_path); +fail_pipeline: + cobalt_umm_destroy(&p->umm); + + return ret; +} + +static void *cobalt_process_attach(void) +{ + struct cobalt_process *process; + int ret; + + process = kzalloc(sizeof(*process), GFP_KERNEL); + if (process == NULL) + return ERR_PTR(-ENOMEM); + + ret = attach_process(process); + if (ret) { + kfree(process); + return ERR_PTR(ret); + } + + INIT_LIST_HEAD(&process->resources.condq); + INIT_LIST_HEAD(&process->resources.mutexq); + INIT_LIST_HEAD(&process->resources.semq); + INIT_LIST_HEAD(&process->resources.monitorq); + INIT_LIST_HEAD(&process->resources.eventq); + INIT_LIST_HEAD(&process->resources.schedq); + INIT_LIST_HEAD(&process->sigwaiters); + INIT_LIST_HEAD(&process->thread_list); + xntree_init(&process->usems); + bitmap_fill(process->timers_map, CONFIG_XENO_OPT_NRTIMERS); + cobalt_set_process(process); + + return process; +} + +static void detach_process(struct cobalt_process *process) +{ + struct cobalt_ppd *p = &process->sys_ppd; + + if (p->exe_path) + kfree(p->exe_path); + + rtdm_fd_cleanup(p); + process_hash_remove(process); + /* + * CAUTION: the process descriptor might be immediately + * released as a result of calling cobalt_umm_destroy(), so we + * must do this last, not to tread on stale memory. + */ + cobalt_umm_destroy(&p->umm); +} + +static void __reclaim_resource(struct cobalt_process *process, + void (*reclaim)(struct cobalt_resnode *node, spl_t s), + struct list_head *local, + struct list_head *global) +{ + struct cobalt_resnode *node, *tmp; + LIST_HEAD(stash); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(global)) + goto flush_local; + + list_for_each_entry_safe(node, tmp, global, next) { + if (node->owner == process) { + list_del(&node->next); + list_add(&node->next, &stash); + } + } + + list_for_each_entry_safe(node, tmp, &stash, next) { + reclaim(node, s); + xnlock_get_irqsave(&nklock, s); + } + + XENO_BUG_ON(COBALT, !list_empty(&stash)); + +flush_local: + if (list_empty(local)) + goto out; + + list_for_each_entry_safe(node, tmp, local, next) { + reclaim(node, s); + xnlock_get_irqsave(&nklock, s); + } +out: + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); +} + +#define cobalt_reclaim_resource(__process, __reclaim, __type) \ + __reclaim_resource(__process, __reclaim, \ + &(__process)->resources.__type ## q, \ + &cobalt_global_resources.__type ## q) + +static void cobalt_process_detach(void *arg) +{ + struct cobalt_process *process = arg; + + cobalt_nsem_reclaim(process); + cobalt_timer_reclaim(process); + cobalt_sched_reclaim(process); + cobalt_reclaim_resource(process, cobalt_cond_reclaim, cond); + cobalt_reclaim_resource(process, cobalt_mutex_reclaim, mutex); + cobalt_reclaim_resource(process, cobalt_event_reclaim, event); + cobalt_reclaim_resource(process, cobalt_monitor_reclaim, monitor); + cobalt_reclaim_resource(process, cobalt_sem_reclaim, sem); + detach_process(process); + /* + * The cobalt_process descriptor release may be deferred until + * the last mapping on the private heap is gone. However, this + * is potentially stale memory already. + */ +} + +struct xnthread_personality cobalt_personality = { + .name = "cobalt", + .magic = 0, + .ops = { + .attach_process = cobalt_process_attach, + .detach_process = cobalt_process_detach, + .map_thread = cobalt_thread_map, + .exit_thread = cobalt_thread_exit, + .finalize_thread = cobalt_thread_finalize, + }, +}; +EXPORT_SYMBOL_GPL(cobalt_personality); + +__init int cobalt_init(void) +{ + unsigned int i, size; + int ret; + + size = sizeof(*process_hash) * PROCESS_HASH_SIZE; + process_hash = kmalloc(size, GFP_KERNEL); + if (process_hash == NULL) { + printk(XENO_ERR "cannot allocate processes hash table\n"); + return -ENOMEM; + } + + ret = xndebug_init(); + if (ret) + goto fail_debug; + + for (i = 0; i < PROCESS_HASH_SIZE; i++) + INIT_HLIST_HEAD(&process_hash[i]); + + xnsynch_init(&yield_sync, XNSYNCH_FIFO, NULL); + + ret = cobalt_memdev_init(); + if (ret) + goto fail_memdev; + + ret = cobalt_register_personality(&cobalt_personality); + if (ret) + goto fail_register; + + ret = cobalt_signal_init(); + if (ret) + goto fail_siginit; + + ret = pipeline_trap_kevents(); + if (ret) + goto fail_kevents; + + if (gid_arg != -1) + printk(XENO_INFO "allowing access to group %d\n", gid_arg); + + return 0; +fail_kevents: + cobalt_signal_cleanup(); +fail_siginit: + cobalt_unregister_personality(0); +fail_register: + cobalt_memdev_cleanup(); +fail_memdev: + xnsynch_destroy(&yield_sync); + xndebug_cleanup(); +fail_debug: + kfree(process_hash); + + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h new file mode 100644 index 0000000..279707a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_PROCESS_H +#define _COBALT_POSIX_PROCESS_H + +#include <linux/list.h> +#include <linux/bitmap.h> +#include <pipeline/thread.h> +#include <cobalt/kernel/ppd.h> + +#define NR_PERSONALITIES 4 +#if BITS_PER_LONG < NR_PERSONALITIES +#error "NR_PERSONALITIES overflows internal bitmap" +#endif + +struct mm_struct; +struct xnthread_personality; +struct cobalt_timer; + +struct cobalt_resources { + struct list_head condq; + struct list_head mutexq; + struct list_head semq; + struct list_head monitorq; + struct list_head eventq; + struct list_head schedq; +}; + +struct cobalt_process { + struct mm_struct *mm; + struct hlist_node hlink; + struct cobalt_ppd sys_ppd; + unsigned long permap; + struct rb_root usems; + struct list_head sigwaiters; + struct cobalt_resources resources; + struct list_head thread_list; + DECLARE_BITMAP(timers_map, CONFIG_XENO_OPT_NRTIMERS); + struct cobalt_timer *timers[CONFIG_XENO_OPT_NRTIMERS]; + void *priv[NR_PERSONALITIES]; + int ufeatures; + unsigned int debugged_threads; +}; + +struct cobalt_resnode { + struct cobalt_resources *scope; + struct cobalt_process *owner; + struct list_head next; + xnhandle_t handle; +}; + +int cobalt_register_personality(struct xnthread_personality *personality); + +int cobalt_unregister_personality(int xid); + +struct xnthread_personality *cobalt_push_personality(int xid); + +void cobalt_pop_personality(struct xnthread_personality *prev); + +int cobalt_bind_core(int ufeatures); + +int cobalt_bind_personality(unsigned int magic); + +struct cobalt_process *cobalt_search_process(struct mm_struct *mm); + +int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff); + +void *cobalt_get_context(int xid); + +int cobalt_yield(xnticks_t min, xnticks_t max); + +int cobalt_process_init(void); + +extern struct list_head cobalt_global_thread_list; + +extern struct cobalt_resources cobalt_global_resources; + +static inline struct cobalt_process *cobalt_current_process(void) +{ + return pipeline_current()->process; +} + +static inline struct cobalt_process * +cobalt_set_process(struct cobalt_process *process) +{ + struct cobalt_threadinfo *p = pipeline_current(); + struct cobalt_process *old; + + old = p->process; + p->process = process; + + return old; +} + +static inline struct cobalt_ppd *cobalt_ppd_get(int global) +{ + struct cobalt_process *process; + + if (global || (process = cobalt_current_process()) == NULL) + return &cobalt_kernel_ppd; + + return &process->sys_ppd; +} + +static inline struct cobalt_resources *cobalt_current_resources(int pshared) +{ + struct cobalt_process *process; + + if (pshared || (process = cobalt_current_process()) == NULL) + return &cobalt_global_resources; + + return &process->resources; +} + +static inline +void __cobalt_add_resource(struct cobalt_resnode *node, int pshared) +{ + node->owner = cobalt_current_process(); + node->scope = cobalt_current_resources(pshared); +} + +#define cobalt_add_resource(__node, __type, __pshared) \ + do { \ + __cobalt_add_resource(__node, __pshared); \ + list_add_tail(&(__node)->next, \ + &((__node)->scope)->__type ## q); \ + } while (0) + +static inline +void cobalt_del_resource(struct cobalt_resnode *node) +{ + list_del(&node->next); +} + +void cobalt_remove_process(struct cobalt_process *process); + +void cobalt_signal_yield(void); + +void cobalt_stop_debugged_process(struct xnthread *thread); + +void cobalt_register_debugged_thread(struct xnthread *thread); + +void cobalt_unregister_debugged_thread(struct xnthread *thread); + +extern struct xnthread_personality *cobalt_personalities[]; + +extern struct xnthread_personality cobalt_personality; + +int cobalt_handle_setaffinity_event(struct task_struct *task); + +#ifdef CONFIG_SMP +void cobalt_adjust_affinity(struct task_struct *task); +#else +static inline void cobalt_adjust_affinity(struct task_struct *task) { } +#endif + +int cobalt_handle_taskexit_event(struct task_struct *task); + +int cobalt_handle_cleanup_event(struct mm_struct *mm); + +int cobalt_handle_user_return(struct task_struct *task); + +#endif /* !_COBALT_POSIX_PROCESS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c new file mode 100644 index 0000000..994ee88 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c @@ -0,0 +1,853 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/types.h> +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "clock.h" +#include <trace/events/cobalt-posix.h> + +struct xnsched_class * +cobalt_sched_policy_param(union xnsched_policy_param *param, + int u_policy, const struct sched_param_ex *param_ex, + xnticks_t *tslice_r) +{ + struct xnsched_class *sched_class; + int prio, policy; + xnticks_t tslice; + + prio = param_ex->sched_priority; + tslice = XN_INFINITE; + policy = u_policy; + + /* + * NOTE: The user-defined policy may be different than ours, + * e.g. SCHED_FIFO,prio=-7 from userland would be interpreted + * as SCHED_WEAK,prio=7 in kernel space. + */ + if (prio < 0) { + prio = -prio; + policy = SCHED_WEAK; + } + sched_class = &xnsched_class_rt; + param->rt.prio = prio; + + switch (policy) { + case SCHED_NORMAL: + if (prio) + return NULL; + /* + * When the weak scheduling class is compiled in, + * SCHED_WEAK and SCHED_NORMAL threads are scheduled + * by xnsched_class_weak, at their respective priority + * levels. Otherwise, SCHED_NORMAL is scheduled by + * xnsched_class_rt at priority level #0. + */ + fallthrough; + case SCHED_WEAK: +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + if (prio < XNSCHED_WEAK_MIN_PRIO || + prio > XNSCHED_WEAK_MAX_PRIO) + return NULL; + param->weak.prio = prio; + sched_class = &xnsched_class_weak; +#else + if (prio) + return NULL; +#endif + break; + case SCHED_RR: + /* if unspecified, use current one. */ + tslice = u_ts2ns(¶m_ex->sched_rr_quantum); + if (tslice == XN_INFINITE && tslice_r) + tslice = *tslice_r; + fallthrough; + case SCHED_FIFO: + if (prio < XNSCHED_FIFO_MIN_PRIO || + prio > XNSCHED_FIFO_MAX_PRIO) + return NULL; + break; + case SCHED_COBALT: + if (prio < XNSCHED_CORE_MIN_PRIO || + prio > XNSCHED_CORE_MAX_PRIO) + return NULL; + break; +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + case SCHED_SPORADIC: + param->pss.normal_prio = param_ex->sched_priority; + param->pss.low_prio = param_ex->sched_ss_low_priority; + param->pss.current_prio = param->pss.normal_prio; + param->pss.init_budget = u_ts2ns(¶m_ex->sched_ss_init_budget); + param->pss.repl_period = u_ts2ns(¶m_ex->sched_ss_repl_period); + param->pss.max_repl = param_ex->sched_ss_max_repl; + sched_class = &xnsched_class_sporadic; + break; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + case SCHED_TP: + param->tp.prio = param_ex->sched_priority; + param->tp.ptid = param_ex->sched_tp_partition; + sched_class = &xnsched_class_tp; + break; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + case SCHED_QUOTA: + param->quota.prio = param_ex->sched_priority; + param->quota.tgid = param_ex->sched_quota_group; + sched_class = &xnsched_class_quota; + break; +#endif + default: + return NULL; + } + + if (tslice_r) + *tslice_r = tslice; + + return sched_class; +} + +COBALT_SYSCALL(sched_minprio, current, (int policy)) +{ + int ret; + + switch (policy) { + case SCHED_FIFO: + case SCHED_RR: + case SCHED_SPORADIC: + case SCHED_TP: + case SCHED_QUOTA: + ret = XNSCHED_FIFO_MIN_PRIO; + break; + case SCHED_COBALT: + ret = XNSCHED_CORE_MIN_PRIO; + break; + case SCHED_NORMAL: + case SCHED_WEAK: + ret = 0; + break; + default: + ret = -EINVAL; + } + + trace_cobalt_sched_min_prio(policy, ret); + + return ret; +} + +COBALT_SYSCALL(sched_maxprio, current, (int policy)) +{ + int ret; + + switch (policy) { + case SCHED_FIFO: + case SCHED_RR: + case SCHED_SPORADIC: + case SCHED_TP: + case SCHED_QUOTA: + ret = XNSCHED_FIFO_MAX_PRIO; + break; + case SCHED_COBALT: + ret = XNSCHED_CORE_MAX_PRIO; + break; + case SCHED_NORMAL: + ret = 0; + break; + case SCHED_WEAK: +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + ret = XNSCHED_FIFO_MAX_PRIO; +#else + ret = 0; +#endif + break; + default: + ret = -EINVAL; + } + + trace_cobalt_sched_max_prio(policy, ret); + + return ret; +} + +COBALT_SYSCALL(sched_yield, primary, (void)) +{ + struct cobalt_thread *curr = cobalt_current_thread(); + int ret = 0; + + trace_cobalt_pthread_yield(0); + + /* Maybe some extension wants to handle this. */ + if (cobalt_call_extension(sched_yield, &curr->extref, ret) && ret) + return ret > 0 ? 0 : ret; + + xnthread_resume(&curr->threadbase, 0); + if (xnsched_run()) + return 0; + + /* + * If the round-robin move did not beget any context switch to + * a thread running in primary mode, then wait for the next + * linux context switch to happen. + * + * Rationale: it is most probably unexpected that + * sched_yield() does not cause any context switch, since this + * service is commonly used for implementing a poor man's + * cooperative scheduling. By waiting for a context switch to + * happen in the regular kernel, we guarantee that the CPU has + * been relinquished for a while. + * + * Typically, this behavior allows a thread running in primary + * mode to effectively yield the CPU to a thread of + * same/higher priority stuck in secondary mode. + * + * NOTE: calling cobalt_yield() with no timeout + * (i.e. XN_INFINITE) is probably never a good idea. This + * means that a SCHED_FIFO non-rt thread stuck in a tight loop + * would prevent the caller from waking up, since no + * linux-originated schedule event would happen for unblocking + * it on the current CPU. For this reason, we pass the + * arbitrary TICK_NSEC value to limit the wait time to a + * reasonable amount. + */ + return cobalt_yield(TICK_NSEC, TICK_NSEC); +} + +#ifdef CONFIG_XENO_OPT_SCHED_TP + +static inline +int set_tp_config(int cpu, union sched_config *config, size_t len) +{ + xnticks_t offset, duration, next_offset; + struct xnsched_tp_schedule *gps, *ogps; + struct xnsched_tp_window *w; + struct sched_tp_window *p; + struct xnsched *sched; + spl_t s; + int n; + + if (len < sizeof(config->tp)) + return -EINVAL; + + sched = xnsched_struct(cpu); + + switch (config->tp.op) { + case sched_tp_install: + if (config->tp.nr_windows > 0) + break; + fallthrough; + case sched_tp_uninstall: + gps = NULL; + goto set_schedule; + case sched_tp_start: + xnlock_get_irqsave(&nklock, s); + xnsched_tp_start_schedule(sched); + xnlock_put_irqrestore(&nklock, s); + return 0; + case sched_tp_stop: + xnlock_get_irqsave(&nklock, s); + xnsched_tp_stop_schedule(sched); + xnlock_put_irqrestore(&nklock, s); + return 0; + default: + return -EINVAL; + } + + /* Install a new TP schedule on CPU. */ + + gps = xnmalloc(sizeof(*gps) + config->tp.nr_windows * sizeof(*w)); + if (gps == NULL) + return -ENOMEM; + + for (n = 0, p = config->tp.windows, w = gps->pwins, next_offset = 0; + n < config->tp.nr_windows; n++, p++, w++) { + /* + * Time windows must be strictly contiguous. Holes may + * be defined using windows assigned to the pseudo + * partition #-1. + */ + offset = u_ts2ns(&p->offset); + if (offset != next_offset) + goto cleanup_and_fail; + + duration = u_ts2ns(&p->duration); + if (duration <= 0) + goto cleanup_and_fail; + + if (p->ptid < -1 || + p->ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART) + goto cleanup_and_fail; + + w->w_offset = next_offset; + w->w_part = p->ptid; + next_offset += duration; + } + + atomic_set(&gps->refcount, 1); + gps->pwin_nr = n; + gps->tf_duration = next_offset; +set_schedule: + xnlock_get_irqsave(&nklock, s); + ogps = xnsched_tp_set_schedule(sched, gps); + xnlock_put_irqrestore(&nklock, s); + + if (ogps) + xnsched_tp_put_schedule(ogps); + + return 0; + +cleanup_and_fail: + xnfree(gps); + + return -EINVAL; +} + +static inline +ssize_t get_tp_config(int cpu, void __user *u_config, size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + struct xnsched_tp_window *pw, *w; + struct xnsched_tp_schedule *gps; + struct sched_tp_window *pp, *p; + union sched_config *config; + struct xnsched *sched; + ssize_t ret, elen; + spl_t s; + int n; + + xnlock_get_irqsave(&nklock, s); + + sched = xnsched_struct(cpu); + gps = xnsched_tp_get_schedule(sched); + if (gps == NULL) { + xnlock_put_irqrestore(&nklock, s); + return 0; + } + + xnlock_put_irqrestore(&nklock, s); + + elen = sched_tp_confsz(gps->pwin_nr); + config = xnmalloc(elen); + if (config == NULL) { + ret = -ENOMEM; + goto out; + } + + config->tp.op = sched_tp_install; + config->tp.nr_windows = gps->pwin_nr; + for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins; + n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) { + u_ns2ts(&p->offset, w->w_offset); + u_ns2ts(&pp->duration, w->w_offset - pw->w_offset); + p->ptid = w->w_part; + } + u_ns2ts(&pp->duration, gps->tf_duration - pw->w_offset); + ret = put_config(SCHED_TP, u_config, len, config, elen); + xnfree(config); +out: + xnsched_tp_put_schedule(gps); + + return ret; +} + +#else /* !CONFIG_XENO_OPT_SCHED_TP */ + +static inline int +set_tp_config(int cpu, union sched_config *config, size_t len) +{ + return -EINVAL; +} + +static inline ssize_t +get_tp_config(int cpu, union sched_config __user *u_config, size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + return -EINVAL; +} + +#endif /* !CONFIG_XENO_OPT_SCHED_TP */ + +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + +static inline +int set_quota_config(int cpu, union sched_config *config, size_t len) +{ + struct __sched_config_quota *p = &config->quota; + struct __sched_quota_info *iq = &p->info; + struct cobalt_sched_group *group; + struct xnsched_quota_group *tg; + struct xnsched *sched; + int ret, quota_sum; + spl_t s; + + if (len < sizeof(*p)) + return -EINVAL; + + switch (p->op) { + case sched_quota_add: + group = xnmalloc(sizeof(*group)); + if (group == NULL) + return -ENOMEM; + tg = &group->quota; + group->pshared = p->add.pshared != 0; + group->scope = cobalt_current_resources(group->pshared); + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + ret = xnsched_quota_create_group(tg, sched, "a_sum); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + xnfree(group); + return ret; + } + list_add(&group->next, &group->scope->schedq); + xnlock_put_irqrestore(&nklock, s); + break; + case sched_quota_remove: + case sched_quota_force_remove: + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + tg = xnsched_quota_find_group(sched, p->remove.tgid); + if (tg == NULL) + goto bad_tgid; + group = container_of(tg, struct cobalt_sched_group, quota); + if (group->scope != cobalt_current_resources(group->pshared)) + goto bad_tgid; + ret = xnsched_quota_destroy_group(tg, + p->op == sched_quota_force_remove, + "a_sum); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + return ret; + } + list_del(&group->next); + xnlock_put_irqrestore(&nklock, s); + iq->tgid = tg->tgid; + iq->quota = tg->quota_percent; + iq->quota_peak = tg->quota_peak_percent; + iq->quota_sum = quota_sum; + xnfree(group); + return 0; + case sched_quota_set: + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + tg = xnsched_quota_find_group(sched, p->set.tgid); + if (tg == NULL) + goto bad_tgid; + group = container_of(tg, struct cobalt_sched_group, quota); + if (group->scope != cobalt_current_resources(group->pshared)) + goto bad_tgid; + xnsched_quota_set_limit(tg, p->set.quota, p->set.quota_peak, + "a_sum); + xnlock_put_irqrestore(&nklock, s); + break; + default: + return -EINVAL; + } + + iq->tgid = tg->tgid; + iq->quota = tg->quota_percent; + iq->quota_peak = tg->quota_peak_percent; + iq->quota_sum = quota_sum; + + return 0; +bad_tgid: + xnlock_put_irqrestore(&nklock, s); + + return -ESRCH; +} + +static inline +ssize_t get_quota_config(int cpu, void __user *u_config, size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + struct cobalt_sched_group *group; + struct xnsched_quota_group *tg; + union sched_config *config; + struct xnsched *sched; + ssize_t ret; + spl_t s; + + config = fetch_config(SCHED_QUOTA, u_config, &len); + if (IS_ERR(config)) + return PTR_ERR(config); + + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + tg = xnsched_quota_find_group(sched, config->quota.get.tgid); + if (tg == NULL) + goto bad_tgid; + + group = container_of(tg, struct cobalt_sched_group, quota); + if (group->scope != cobalt_current_resources(group->pshared)) + goto bad_tgid; + + config->quota.info.tgid = tg->tgid; + config->quota.info.quota = tg->quota_percent; + config->quota.info.quota_peak = tg->quota_peak_percent; + config->quota.info.quota_sum = xnsched_quota_sum_all(sched); + xnlock_put_irqrestore(&nklock, s); + + ret = put_config(SCHED_QUOTA, u_config, len, config, sizeof(*config)); + xnfree(config); + + return ret; +bad_tgid: + xnlock_put_irqrestore(&nklock, s); + xnfree(config); + + return -ESRCH; +} + +#else /* !CONFIG_XENO_OPT_SCHED_QUOTA */ + +static inline +int set_quota_config(int cpu, union sched_config *config, size_t len) +{ + return -EINVAL; +} + +static inline +ssize_t get_quota_config(int cpu, void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + return -EINVAL; +} + +#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */ + +static union sched_config * +sched_fetch_config(int policy, const void __user *u_config, size_t *len) +{ + union sched_config *buf; + int ret; + + if (u_config == NULL) + return ERR_PTR(-EFAULT); + + if (policy == SCHED_QUOTA && *len < sizeof(buf->quota)) + return ERR_PTR(-EINVAL); + + buf = xnmalloc(*len); + if (buf == NULL) + return ERR_PTR(-ENOMEM); + + ret = cobalt_copy_from_user(buf, u_config, *len); + if (ret) { + xnfree(buf); + return ERR_PTR(ret); + } + + return buf; +} + +static int sched_ack_config(int policy, const union sched_config *config, + void __user *u_config) +{ + union sched_config __user *u_p = u_config; + + if (policy != SCHED_QUOTA) + return 0; + + return u_p == NULL ? -EFAULT : + cobalt_copy_to_user(&u_p->quota.info, &config->quota.info, + sizeof(u_p->quota.info)); +} + +static ssize_t sched_put_config(int policy, + void __user *u_config, size_t u_len, + const union sched_config *config, size_t len) +{ + union sched_config *u_p = u_config; + + if (u_config == NULL) + return -EFAULT; + + if (policy == SCHED_QUOTA) { + if (u_len < sizeof(config->quota)) + return -EINVAL; + return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info, + sizeof(u_p->quota.info)) ?: + sizeof(u_p->quota.info); + } + + return cobalt_copy_to_user(u_config, config, len) ?: len; +} + +int __cobalt_sched_setconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + int (*ack_config)(int policy, + const union sched_config *config, + void __user *u_config)) +{ + union sched_config *buf; + int ret; + + trace_cobalt_sched_setconfig(cpu, policy, len); + + if (cpu < 0 || cpu >= NR_CPUS || !xnsched_threading_cpu(cpu)) + return -EINVAL; + + if (len == 0) + return -EINVAL; + + buf = fetch_config(policy, u_config, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + switch (policy) { + case SCHED_TP: + ret = set_tp_config(cpu, buf, len); + break; + case SCHED_QUOTA: + ret = set_quota_config(cpu, buf, len); + break; + default: + ret = -EINVAL; + } + + if (ret == 0) + ret = ack_config(policy, buf, u_config); + + xnfree(buf); + + return ret; +} + +COBALT_SYSCALL(sched_setconfig_np, conforming, + (int cpu, int policy, + union sched_config __user *u_config, + size_t len)) +{ + return __cobalt_sched_setconfig_np(cpu, policy, u_config, len, + sched_fetch_config, sched_ack_config); +} + +ssize_t __cobalt_sched_getconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, + void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + ssize_t ret; + + switch (policy) { + case SCHED_TP: + ret = get_tp_config(cpu, u_config, len, + fetch_config, put_config); + break; + case SCHED_QUOTA: + ret = get_quota_config(cpu, u_config, len, + fetch_config, put_config); + break; + default: + ret = -EINVAL; + } + + trace_cobalt_sched_get_config(cpu, policy, ret); + + return ret; +} + +COBALT_SYSCALL(sched_getconfig_np, conforming, + (int cpu, int policy, + union sched_config __user *u_config, + size_t len)) +{ + return __cobalt_sched_getconfig_np(cpu, policy, u_config, len, + sched_fetch_config, sched_put_config); +} + +int __cobalt_sched_weightprio(int policy, + const struct sched_param_ex *param_ex) +{ + struct xnsched_class *sched_class; + union xnsched_policy_param param; + int prio; + + sched_class = cobalt_sched_policy_param(¶m, policy, + param_ex, NULL); + if (sched_class == NULL) + return -EINVAL; + + prio = param_ex->sched_priority; + if (prio < 0) + prio = -prio; + + return prio + sched_class->weight; +} + +COBALT_SYSCALL(sched_weightprio, current, + (int policy, const struct sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + + if (cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex))) + return -EFAULT; + + return __cobalt_sched_weightprio(policy, ¶m_ex); +} + +int cobalt_sched_setscheduler_ex(pid_t pid, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + int ret, promoted = 0; + spl_t s; + + trace_cobalt_sched_setscheduler(pid, policy, param_ex); + + if (pid) { + xnlock_get_irqsave(&nklock, s); + thread = cobalt_thread_find(pid); + xnlock_put_irqrestore(&nklock, s); + } else + thread = cobalt_current_thread(); + + if (thread == NULL) { + if (u_winoff == NULL || pid != task_pid_vnr(current)) + return -ESRCH; + + thread = cobalt_thread_shadow(&hkey, u_winoff); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + promoted = 1; + } + + ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex); + if (ret) + return ret; + + return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted)); +} + +COBALT_SYSCALL(sched_setscheduler_ex, conforming, + (pid_t pid, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + struct sched_param_ex param_ex; + + if (cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex))) + return -EFAULT; + + return cobalt_sched_setscheduler_ex(pid, policy, ¶m_ex, + u_winoff, u_promoted); +} + +int cobalt_sched_getscheduler_ex(pid_t pid, + int *policy_r, + struct sched_param_ex *param_ex) +{ + struct cobalt_thread *thread; + spl_t s; + + trace_cobalt_sched_getscheduler(pid); + + if (pid) { + xnlock_get_irqsave(&nklock, s); + thread = cobalt_thread_find(pid); + xnlock_put_irqrestore(&nklock, s); + } else + thread = cobalt_current_thread(); + + if (thread == NULL) + return -ESRCH; + + return __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex); +} + +COBALT_SYSCALL(sched_getscheduler_ex, current, + (pid_t pid, + int __user *u_policy, + struct sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret, policy; + + ret = cobalt_sched_getscheduler_ex(pid, &policy, ¶m_ex); + if (ret) + return ret; + + if (cobalt_copy_to_user(u_param, ¶m_ex, sizeof(param_ex)) || + cobalt_copy_to_user(u_policy, &policy, sizeof(policy))) + return -EFAULT; + + return 0; +} + +void cobalt_sched_reclaim(struct cobalt_process *process) +{ + struct cobalt_resources *p = &process->resources; + struct cobalt_sched_group *group; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + while (!list_empty(&p->schedq)) { + group = list_get_entry(&p->schedq, struct cobalt_sched_group, next); +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + xnsched_quota_destroy_group(&group->quota, 1, NULL); +#endif + xnlock_put_irqrestore(&nklock, s); + xnfree(group); + xnlock_get_irqsave(&nklock, s); + } + + xnlock_put_irqrestore(&nklock, s); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h new file mode 100644 index 0000000..2b23be0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SCHED_H +#define _COBALT_POSIX_SCHED_H + +#include <linux/list.h> +#include <cobalt/kernel/sched.h> +#include <xenomai/posix/syscall.h> + +struct cobalt_resources; +struct cobalt_process; + +struct cobalt_sched_group { +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + struct xnsched_quota_group quota; +#endif + struct cobalt_resources *scope; + int pshared; + struct list_head next; +}; + +int __cobalt_sched_weightprio(int policy, + const struct sched_param_ex *param_ex); + +int __cobalt_sched_setconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + int (*ack_config)(int policy, + const union sched_config *config, + void __user *u_config)); + +ssize_t __cobalt_sched_getconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, + void __user *u_config, size_t u_len, + const union sched_config *config, + size_t len)); +int cobalt_sched_setscheduler_ex(pid_t pid, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted); + +int cobalt_sched_getscheduler_ex(pid_t pid, + int *policy_r, + struct sched_param_ex *param_ex); + +struct xnsched_class * +cobalt_sched_policy_param(union xnsched_policy_param *param, + int u_policy, const struct sched_param_ex *param_ex, + xnticks_t *tslice_r); + +COBALT_SYSCALL_DECL(sched_yield, (void)); + +COBALT_SYSCALL_DECL(sched_weightprio, + (int policy, const struct sched_param_ex __user *u_param)); + +COBALT_SYSCALL_DECL(sched_minprio, (int policy)); + +COBALT_SYSCALL_DECL(sched_maxprio, (int policy)); + +COBALT_SYSCALL_DECL(sched_setconfig_np, + (int cpu, + int policy, + union sched_config __user *u_config, + size_t len)); + +COBALT_SYSCALL_DECL(sched_getconfig_np, + (int cpu, int policy, + union sched_config __user *u_config, + size_t len)); + +COBALT_SYSCALL_DECL(sched_setscheduler_ex, + (pid_t pid, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)); + +COBALT_SYSCALL_DECL(sched_getscheduler_ex, + (pid_t pid, + int __user *u_policy, + struct sched_param_ex __user *u_param)); + +void cobalt_sched_reclaim(struct cobalt_process *process); + +#endif /* !_COBALT_POSIX_SCHED_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c new file mode 100644 index 0000000..71b8c52 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c @@ -0,0 +1,667 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * Copyright (C) 2014,2015 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/stddef.h> +#include <linux/err.h> +#include <cobalt/kernel/time.h> +#include "internal.h" +#include "thread.h" +#include "clock.h" +#include "sem.h" +#include <trace/events/cobalt-posix.h> + +#ifdef CONFIG_XENO_OPT_VFILE + +static int sem_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + return 0; +} + +static struct xnvfile_regular_ops sem_vfile_ops = { + .show = sem_vfile_show, +}; + +static struct xnpnode_regular __sem_pnode = { + .node = { + .dirname = "sem", + .root = &posix_ptree, + .ops = &xnregistry_vfreg_ops, + }, + .vfile = { + .ops = &sem_vfile_ops, + }, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __sem_pnode = { + .node = { + .dirname = "sem", + } +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +static inline struct cobalt_resources *sem_kqueue(struct cobalt_sem *sem) +{ + int pshared = !!(sem->flags & SEM_PSHARED); + return cobalt_current_resources(pshared); +} + +static inline int sem_check(struct cobalt_sem *sem) +{ + if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) + return -EINVAL; + + if (sem->resnode.scope && sem->resnode.scope != sem_kqueue(sem)) + return -EPERM; + + return 0; +} + +int __cobalt_sem_destroy(xnhandle_t handle) +{ + struct cobalt_sem *sem; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + sem = xnregistry_lookup(handle, NULL); + if (!cobalt_obj_active(sem, COBALT_SEM_MAGIC, typeof(*sem))) { + ret = -EINVAL; + goto fail; + } + + if (--sem->refs) { + ret = -EBUSY; + goto fail; + } + + cobalt_mark_deleted(sem); + if (!sem->pathname) + cobalt_del_resource(&sem->resnode); + if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) { + xnsched_run(); + ret = 1; + } + + xnlock_put_irqrestore(&nklock, s); + + xnregistry_remove(sem->resnode.handle); + if (sem->pathname) + putname(sem->pathname); + + cobalt_umm_free(&cobalt_ppd_get(!!(sem->flags & SEM_PSHARED))->umm, + sem->state); + + xnfree(sem); + + return ret; +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +struct cobalt_sem * +__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sm, + int flags, unsigned int value) +{ + struct cobalt_sem_state *state; + struct cobalt_sem *sem, *osem; + struct cobalt_ppd *sys_ppd; + int ret, sflags, pshared; + struct list_head *semq; + spl_t s; + + if ((flags & SEM_PULSE) != 0 && value > 0) { + ret = -EINVAL; + goto out; + } + + sem = xnmalloc(sizeof(*sem)); + if (sem == NULL) { + ret = -ENOMEM; + goto out; + } + + pshared = !!(flags & SEM_PSHARED); + sys_ppd = cobalt_ppd_get(pshared); + state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state)); + if (state == NULL) { + ret = -EAGAIN; + goto err_free_sem; + } + + xnlock_get_irqsave(&nklock, s); + + semq = &cobalt_current_resources(pshared)->semq; + if ((sm->magic == COBALT_SEM_MAGIC && !list_empty(semq)) || + sm->magic == COBALT_NAMED_SEM_MAGIC) { + osem = xnregistry_lookup(sm->handle, NULL); + if (cobalt_obj_active(osem, COBALT_SEM_MAGIC, typeof(*osem))) { + ret = -EBUSY; + goto err_lock_put; + } + } + + if (value > (unsigned)SEM_VALUE_MAX) { + ret = -EINVAL; + goto err_lock_put; + } + + ret = xnregistry_enter(name ?: "", sem, &sem->resnode.handle, + name ? &__sem_pnode.node : NULL); + if (ret < 0) + goto err_lock_put; + + sem->magic = COBALT_SEM_MAGIC; + if (!name) + cobalt_add_resource(&sem->resnode, sem, pshared); + else + sem->resnode.scope = NULL; + sflags = flags & SEM_FIFO ? 0 : XNSYNCH_PRIO; + xnsynch_init(&sem->synchbase, sflags, NULL); + + sem->state = state; + atomic_set(&state->value, value); + state->flags = flags; + sem->flags = flags; + sem->refs = name ? 2 : 1; + sem->pathname = NULL; + + xnlock_put_irqrestore(&nklock, s); + + __cobalt_sem_shadow_init(sem, + name ? COBALT_NAMED_SEM_MAGIC : COBALT_SEM_MAGIC, sm); + + trace_cobalt_psem_init(name ?: "anon", + sem->resnode.handle, flags, value); + + return sem; + +err_lock_put: + xnlock_put_irqrestore(&nklock, s); + cobalt_umm_free(&sys_ppd->umm, state); +err_free_sem: + xnfree(sem); +out: + trace_cobalt_psem_init_failed(name ?: "anon", flags, value, ret); + + return ERR_PTR(ret); +} + +void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic, + struct cobalt_sem_shadow *sm) +{ + __u32 flags = sem->state->flags; + struct cobalt_ppd *sys_ppd; + + sys_ppd = cobalt_ppd_get(!!(flags & SEM_PSHARED)); + + sm->magic = magic; + sm->handle = sem->resnode.handle; + sm->state_offset = cobalt_umm_offset(&sys_ppd->umm, sem->state); + if (sem->state->flags & SEM_PSHARED) + sm->state_offset = -sm->state_offset; +} + +static int sem_destroy(struct cobalt_sem_shadow *sm) +{ + struct cobalt_sem *sem; + int warn, ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (sm->magic != COBALT_SEM_MAGIC) { + ret = -EINVAL; + goto fail; + } + + sem = xnregistry_lookup(sm->handle, NULL); + ret = sem_check(sem); + if (ret) + goto fail; + + if ((sem->flags & SEM_NOBUSYDEL) != 0 && + xnsynch_pended_p(&sem->synchbase)) { + ret = -EBUSY; + goto fail; + } + + warn = sem->flags & SEM_WARNDEL; + cobalt_mark_deleted(sm); + + xnlock_put_irqrestore(&nklock, s); + + ret = __cobalt_sem_destroy(sem->resnode.handle); + + return warn ? ret : 0; +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +static inline int do_trywait(struct cobalt_sem *sem) +{ + int ret; + + ret = sem_check(sem); + if (ret) + return ret; + + if (atomic_sub_return(1, &sem->state->value) < 0) + return -EAGAIN; + + return 0; +} + +static int sem_wait(xnhandle_t handle) +{ + struct cobalt_sem *sem; + int ret, info; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = do_trywait(sem); + if (ret != -EAGAIN) + goto out; + + ret = 0; + info = xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE); + if (info & XNRMID) { + ret = -EINVAL; + } else if (info & XNBREAK) { + atomic_inc(&sem->state->value); /* undo do_trywait() */ + ret = -EINTR; + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem, + const struct timespec64 *ts) +{ + int ret, info; + bool validate_ts = true; + struct cobalt_sem *sem; + xnhandle_t handle; + xntmode_t tmode; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_timedwait(handle); + + xnlock_get_irqsave(&nklock, s); + + for (;;) { + sem = xnregistry_lookup(handle, NULL); + ret = do_trywait(sem); + if (ret != -EAGAIN) + break; + + /* + * POSIX states that the validity of the timeout spec + * _need_ not be checked if the semaphore can be + * locked immediately, we show this behavior despite + * it's actually more complex, to keep some + * applications ported to Linux happy. + */ + if (validate_ts) { + atomic_inc(&sem->state->value); + if (!ts) { + ret = -EFAULT; + break; + } + if (!timespec64_valid(ts)) { + ret = -EINVAL; + break; + } + validate_ts = false; + continue; + } + + ret = 0; + tmode = sem->flags & SEM_RAWCLOCK ? XN_ABSOLUTE : XN_REALTIME; + info = xnsynch_sleep_on(&sem->synchbase, ts2ns(ts) + 1, tmode); + if (info & XNRMID) + ret = -EINVAL; + else if (info & (XNBREAK|XNTIMEO)) { + ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT; + atomic_inc(&sem->state->value); + } + break; + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts) +{ + int ret = 1; + struct timespec64 ts64; + + if (u_ts) + ret = cobalt_get_timespec64(&ts64, u_ts); + + return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64); +} + +static int sem_post(xnhandle_t handle) +{ + struct cobalt_sem *sem; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = sem_check(sem); + if (ret) + goto out; + + if (atomic_read(&sem->state->value) == SEM_VALUE_MAX) { + ret = -EINVAL; + goto out; + } + + if (atomic_inc_return(&sem->state->value) <= 0) { + if (xnsynch_wakeup_one_sleeper(&sem->synchbase)) + xnsched_run(); + } else if (sem->flags & SEM_PULSE) + atomic_set(&sem->state->value, 0); +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +static int sem_getvalue(xnhandle_t handle, int *value) +{ + struct cobalt_sem *sem; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = sem_check(sem); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + return ret; + } + + *value = atomic_read(&sem->state->value); + if ((sem->flags & SEM_REPORT) == 0 && *value < 0) + *value = 0; + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +COBALT_SYSCALL(sem_init, current, + (struct cobalt_sem_shadow __user *u_sem, + int flags, unsigned int value)) +{ + struct cobalt_sem_shadow sm; + struct cobalt_sem *sem; + + if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm))) + return -EFAULT; + + if (flags & ~(SEM_FIFO|SEM_PULSE|SEM_PSHARED|SEM_REPORT|\ + SEM_WARNDEL|SEM_RAWCLOCK|SEM_NOBUSYDEL)) + return -EINVAL; + + sem = __cobalt_sem_init(NULL, &sm, flags, value); + if (IS_ERR(sem)) + return PTR_ERR(sem); + + return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)); +} + +COBALT_SYSCALL(sem_post, current, + (struct cobalt_sem_shadow __user *u_sem)) +{ + xnhandle_t handle; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_post(handle); + + return sem_post(handle); +} + +COBALT_SYSCALL(sem_wait, primary, + (struct cobalt_sem_shadow __user *u_sem)) +{ + xnhandle_t handle; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_wait(handle); + + return sem_wait(handle); +} + +COBALT_SYSCALL(sem_timedwait, primary, + (struct cobalt_sem_shadow __user *u_sem, + const struct __user_old_timespec __user *u_ts)) +{ + int ret = 1; + struct timespec64 ts64; + + if (u_ts) + ret = cobalt_get_u_timespec(&ts64, u_ts); + + return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64); +} + +COBALT_SYSCALL(sem_timedwait64, primary, + (struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_sem_timedwait64(u_sem, u_ts); +} + +COBALT_SYSCALL(sem_trywait, primary, + (struct cobalt_sem_shadow __user *u_sem)) +{ + struct cobalt_sem *sem; + xnhandle_t handle; + int ret; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_trywait(handle); + + xnlock_get_irqsave(&nklock, s); + sem = xnregistry_lookup(handle, NULL); + ret = do_trywait(sem); + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(sem_getvalue, current, + (struct cobalt_sem_shadow __user *u_sem, + int __user *u_sval)) +{ + int ret, sval = -1; + xnhandle_t handle; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + + ret = sem_getvalue(handle, &sval); + trace_cobalt_psem_getvalue(handle, sval); + if (ret) + return ret; + + return cobalt_copy_to_user(u_sval, &sval, sizeof(sval)); +} + +COBALT_SYSCALL(sem_destroy, current, + (struct cobalt_sem_shadow __user *u_sem)) +{ + struct cobalt_sem_shadow sm; + int err; + + if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm))) + return -EFAULT; + + trace_cobalt_psem_destroy(sm.handle); + + err = sem_destroy(&sm); + if (err < 0) + return err; + + return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)) ?: err; +} + +COBALT_SYSCALL(sem_broadcast_np, current, + (struct cobalt_sem_shadow __user *u_sem)) +{ + struct cobalt_sem *sem; + xnhandle_t handle; + spl_t s; + int ret; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_broadcast(handle); + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = sem_check(sem); + if (ret == 0 && atomic_read(&sem->state->value) < 0) { + atomic_set(&sem->state->value, 0); + xnsynch_flush(&sem->synchbase, 0); + xnsched_run(); + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(sem_inquire, current, + (struct cobalt_sem_shadow __user *u_sem, + struct cobalt_sem_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)) +{ + int val = 0, nrwait = 0, nrpids, ret = 0; + unsigned long pstamp, nstamp = 0; + struct cobalt_sem_info info; + pid_t *t = NULL, fbuf[16]; + struct xnthread *thread; + struct cobalt_sem *sem; + xnhandle_t handle; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_inquire(handle); + + nrpids = waitsz / sizeof(pid_t); + + xnlock_get_irqsave(&nklock, s); + + for (;;) { + pstamp = nstamp; + sem = xnregistry_lookup(handle, &nstamp); + if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + /* + * Allocate memory to return the wait list without + * holding any lock, then revalidate the handle. + */ + if (t == NULL) { + val = atomic_read(&sem->state->value); + if (val >= 0 || u_waitlist == NULL) + break; + xnlock_put_irqrestore(&nklock, s); + if (nrpids > -val) + nrpids = -val; + if (-val <= ARRAY_SIZE(fbuf)) + t = fbuf; /* Use fast buffer. */ + else { + t = xnmalloc(-val * sizeof(pid_t)); + if (t == NULL) + return -ENOMEM; + } + xnlock_get_irqsave(&nklock, s); + } else if (pstamp == nstamp) + break; + else if (val != atomic_read(&sem->state->value)) { + xnlock_put_irqrestore(&nklock, s); + if (t != fbuf) + xnfree(t); + t = NULL; + xnlock_get_irqsave(&nklock, s); + } + } + + info.flags = sem->flags; + info.value = (sem->flags & SEM_REPORT) || val >= 0 ? val : 0; + info.nrwait = val < 0 ? -val : 0; + + if (xnsynch_pended_p(&sem->synchbase) && u_waitlist != NULL) { + xnsynch_for_each_sleeper(thread, &sem->synchbase) { + if (nrwait >= nrpids) + break; + t[nrwait++] = xnthread_host_pid(thread); + } + } + + xnlock_put_irqrestore(&nklock, s); + + ret = cobalt_copy_to_user(u_info, &info, sizeof(info)); + if (ret == 0 && nrwait > 0) + ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t)); + + if (t && t != fbuf) + xnfree(t); + + return ret ?: nrwait; +} + +void cobalt_sem_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_sem *sem; + xnhandle_t handle; + int named, ret; + + sem = container_of(node, struct cobalt_sem, resnode); + named = (sem->flags & SEM_NAMED) != 0; + handle = node->handle; + xnlock_put_irqrestore(&nklock, s); + ret = __cobalt_sem_destroy(handle); + if (named && ret == -EBUSY) + xnregistry_unlink(xnregistry_key(handle)); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h new file mode 100644 index 0000000..d7dbb90 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h @@ -0,0 +1,133 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SEM_H +#define _COBALT_POSIX_SEM_H + +#include <linux/kernel.h> +#include <linux/fcntl.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/registry.h> +#include <xenomai/posix/syscall.h> +#include <xenomai/posix/process.h> + +struct cobalt_process; +struct filename; + +struct cobalt_sem { + unsigned int magic; + struct xnsynch synchbase; + struct cobalt_sem_state *state; + int flags; + unsigned int refs; + struct filename *pathname; + struct cobalt_resnode resnode; +}; + +/* Copied from Linuxthreads semaphore.h. */ +struct _sem_fastlock +{ + long int __status; + int __spinlock; +}; + +typedef struct +{ + struct _sem_fastlock __sem_lock; + int __sem_value; + long __sem_waiting; +} sem_t; + +#include <cobalt/uapi/sem.h> + +#define SEM_VALUE_MAX (INT_MAX) +#define SEM_FAILED NULL +#define SEM_NAMED 0x80000000 + +struct cobalt_sem_shadow __user * +__cobalt_sem_open(struct cobalt_sem_shadow __user *usm, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value); + +int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem, + const struct timespec64 *ts); + +int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts); + +int __cobalt_sem_destroy(xnhandle_t handle); + +void cobalt_nsem_reclaim(struct cobalt_process *process); + +struct cobalt_sem * +__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sem, + int flags, unsigned value); + +void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic, + struct cobalt_sem_shadow *sm); + +COBALT_SYSCALL_DECL(sem_init, + (struct cobalt_sem_shadow __user *u_sem, + int flags, unsigned value)); + +COBALT_SYSCALL_DECL(sem_post, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_wait, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_timedwait, + (struct cobalt_sem_shadow __user *u_sem, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(sem_timedwait64, + (struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(sem_trywait, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_getvalue, + (struct cobalt_sem_shadow __user *u_sem, + int __user *u_sval)); + +COBALT_SYSCALL_DECL(sem_destroy, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_open, + (struct cobalt_sem_shadow __user *__user *u_addrp, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value)); + +COBALT_SYSCALL_DECL(sem_close, + (struct cobalt_sem_shadow __user *usm)); + +COBALT_SYSCALL_DECL(sem_unlink, (const char __user *u_name)); + +COBALT_SYSCALL_DECL(sem_broadcast_np, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_inquire, + (struct cobalt_sem_shadow __user *u_sem, + struct cobalt_sem_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)); + +void cobalt_sem_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_SEM_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c new file mode 100644 index 0000000..5f5cb85 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c @@ -0,0 +1,638 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/sched.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/compat.h> +#include <cobalt/kernel/time.h> +#include "internal.h" +#include "signal.h" +#include "thread.h" +#include "timer.h" +#include "clock.h" + +static void *sigpending_mem; + +static LIST_HEAD(sigpending_pool); + +/* + * How many signal notifications which may be pending at any given + * time, except timers. Cobalt signals are always thread directed, + * and we assume that in practice, each signal number is processed by + * a dedicated thread. We provide for up to three real-time signal + * events to pile up, and a single notification pending for other + * signals. Timers use a fast queuing logic maintaining a count of + * overruns, and therefore do not consume any memory from this pool. + */ +#define __SIGPOOL_SIZE (sizeof(struct cobalt_sigpending) * \ + (_NSIG + (SIGRTMAX - SIGRTMIN) * 2)) + +static int cobalt_signal_deliver(struct cobalt_thread *thread, + struct cobalt_sigpending *sigp, + int group) +{ /* nklocked, IRQs off */ + struct cobalt_sigwait_context *swc; + struct xnthread_wait_context *wc; + struct list_head *sigwaiters; + int sig, ret; + + sig = sigp->si.si_signo; + XENO_BUG_ON(COBALT, sig < 1 || sig > _NSIG); + + /* + * Attempt to deliver the signal immediately to the initial + * target that waits for it. + */ + if (xnsynch_pended_p(&thread->sigwait)) { + wc = xnthread_get_wait_context(&thread->threadbase); + swc = container_of(wc, struct cobalt_sigwait_context, wc); + if (sigismember(swc->set, sig)) + goto deliver; + } + + /* + * If that does not work out and we are sending to a thread + * group, try to deliver to any thread from the same process + * waiting for that signal. + */ + sigwaiters = &thread->process->sigwaiters; + if (!group || list_empty(sigwaiters)) + return 0; + + list_for_each_entry(thread, sigwaiters, signext) { + wc = xnthread_get_wait_context(&thread->threadbase); + swc = container_of(wc, struct cobalt_sigwait_context, wc); + if (sigismember(swc->set, sig)) + goto deliver; + } + + return 0; +deliver: + cobalt_copy_siginfo(sigp->si.si_code, swc->si, &sigp->si); + cobalt_call_extension(signal_deliver, &thread->extref, + ret, swc->si, sigp); + xnthread_complete_wait(&swc->wc); + xnsynch_wakeup_one_sleeper(&thread->sigwait); + list_del(&thread->signext); + + /* + * This is an immediate delivery bypassing any queuing, so we + * have to release the sigpending data right away before + * leaving. + */ + cobalt_signal_free(sigp); + + return 1; +} + +int cobalt_signal_send(struct cobalt_thread *thread, + struct cobalt_sigpending *sigp, + int group) +{ /* nklocked, IRQs off */ + struct list_head *sigq; + int sig, ret; + + /* Can we deliver this signal immediately? */ + ret = cobalt_signal_deliver(thread, sigp, group); + if (ret) + return ret; /* Yep, done. */ + + /* + * Nope, attempt to queue it. We start by calling any Cobalt + * extension for queuing the signal first. + */ + if (cobalt_call_extension(signal_queue, &thread->extref, ret, sigp)) { + if (ret) + /* Queuing done remotely or error. */ + return ret; + } + + sig = sigp->si.si_signo; + sigq = thread->sigqueues + sig - 1; + if (!list_empty(sigq)) { + /* Queue non-rt signals only once. */ + if (sig < SIGRTMIN) + return 0; + /* Queue rt signal source only once (SI_TIMER). */ + if (!list_empty(&sigp->next)) + return 0; + } + + sigaddset(&thread->sigpending, sig); + list_add_tail(&sigp->next, sigq); + + return 1; +} +EXPORT_SYMBOL_GPL(cobalt_signal_send); + +int cobalt_signal_send_pid(pid_t pid, struct cobalt_sigpending *sigp) +{ /* nklocked, IRQs off */ + struct cobalt_thread *thread; + + thread = cobalt_thread_find(pid); + if (thread) + return cobalt_signal_send(thread, sigp, 0); + + return -ESRCH; +} +EXPORT_SYMBOL_GPL(cobalt_signal_send_pid); + +struct cobalt_sigpending *cobalt_signal_alloc(void) +{ /* nklocked, IRQs off */ + struct cobalt_sigpending *sigp; + + if (list_empty(&sigpending_pool)) { + if (xnclock_ratelimit()) + printk(XENO_WARNING "signal bucket pool underflows\n"); + return NULL; + } + + sigp = list_get_entry(&sigpending_pool, struct cobalt_sigpending, next); + INIT_LIST_HEAD(&sigp->next); + + return sigp; +} +EXPORT_SYMBOL_GPL(cobalt_signal_alloc); + +void cobalt_signal_free(struct cobalt_sigpending *sigp) +{ /* nklocked, IRQs off */ + if ((void *)sigp >= sigpending_mem && + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) + list_add_tail(&sigp->next, &sigpending_pool); +} +EXPORT_SYMBOL_GPL(cobalt_signal_free); + +void cobalt_signal_flush(struct cobalt_thread *thread) +{ + struct cobalt_sigpending *sigp, *tmp; + struct list_head *sigq; + spl_t s; + int n; + + /* + * TCB is not accessible from userland anymore, no locking + * required. + */ + if (sigisemptyset(&thread->sigpending)) + return; + + for (n = 0; n < _NSIG; n++) { + sigq = thread->sigqueues + n; + if (list_empty(sigq)) + continue; + /* + * sigpending blocks must be unlinked so that we + * detect this fact when deleting their respective + * owners. + */ + list_for_each_entry_safe(sigp, tmp, sigq, next) { + list_del_init(&sigp->next); + if ((void *)sigp >= sigpending_mem && + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) { + xnlock_get_irqsave(&nklock, s); + list_add_tail(&sigp->next, &sigpending_pool); + xnlock_put_irqrestore(&nklock, s); + } + } + } + + sigemptyset(&thread->sigpending); +} + +static int signal_put_siginfo(void __user *u_si, const struct siginfo *si, + int overrun) +{ + struct siginfo __user *u_p = u_si; + int ret; + + ret = __xn_put_user(si->si_signo, &u_p->si_signo); + ret |= __xn_put_user(si->si_errno, &u_p->si_errno); + ret |= __xn_put_user(si->si_code, &u_p->si_code); + + /* + * Copy the generic/standard siginfo bits to userland. + */ + switch (si->si_code) { + case SI_TIMER: + ret |= __xn_put_user(si->si_tid, &u_p->si_tid); + ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr); + ret |= __xn_put_user(overrun, &u_p->si_overrun); + break; + case SI_QUEUE: + case SI_MESGQ: + ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr); + fallthrough; + case SI_USER: + ret |= __xn_put_user(si->si_pid, &u_p->si_pid); + ret |= __xn_put_user(si->si_uid, &u_p->si_uid); + } + + return ret; +} + +static int signal_wait(sigset_t *set, xnticks_t timeout, + void __user *u_si, bool compat) +{ + struct cobalt_sigpending *sigp = NULL; + struct cobalt_sigwait_context swc; + struct cobalt_thread *curr; + int ret, sig, n, overrun; + unsigned long *p, *t, m; + struct siginfo si, *sip; + struct list_head *sigq; + spl_t s; + + curr = cobalt_current_thread(); + XENO_BUG_ON(COBALT, curr == NULL); + + if (u_si && !access_wok(u_si, sizeof(*u_si))) + return -EFAULT; + + xnlock_get_irqsave(&nklock, s); + +check: + if (sigisemptyset(&curr->sigpending)) + /* Most common/fast path. */ + goto wait; + + p = curr->sigpending.sig; /* pending */ + t = set->sig; /* tested */ + + for (n = 0, sig = 0; n < _NSIG_WORDS; ++n) { + m = *p++ & *t++; + if (m == 0) + continue; + sig = ffz(~m) + n *_NSIG_BPW + 1; + break; + } + + if (sig) { + sigq = curr->sigqueues + sig - 1; + if (list_empty(sigq)) { + sigdelset(&curr->sigpending, sig); + goto check; + } + sigp = list_get_entry(sigq, struct cobalt_sigpending, next); + INIT_LIST_HEAD(&sigp->next); /* Mark sigp as unlinked. */ + if (list_empty(sigq)) + sigdelset(&curr->sigpending, sig); + sip = &sigp->si; + ret = 0; + goto done; + } + +wait: + if (timeout == XN_NONBLOCK) { + ret = -EAGAIN; + goto fail; + } + swc.set = set; + swc.si = &si; + xnthread_prepare_wait(&swc.wc); + list_add_tail(&curr->signext, &curr->process->sigwaiters); + ret = xnsynch_sleep_on(&curr->sigwait, timeout, XN_RELATIVE); + if (ret) { + list_del(&curr->signext); + ret = ret & XNBREAK ? -EINTR : -EAGAIN; + goto fail; + } + sig = si.si_signo; + sip = &si; +done: + /* + * si_overrun raises a nasty issue since we have to + * collect+clear it atomically before we drop the lock, + * although we don't know in advance if any extension would + * use it along with the additional si_codes it may provide, + * but we must drop the lock before running the + * signal_copyinfo handler. + * + * Observing that si_overrun is likely the only "unstable" + * data from the signal information which might change under + * our feet while we copy the bits to userland, we collect it + * here from the atomic section for all unknown si_codes, + * then pass its value to the signal_copyinfo handler. + */ + switch (sip->si_code) { + case SI_TIMER: + overrun = cobalt_timer_deliver(curr, sip->si_tid); + break; + case SI_USER: + case SI_MESGQ: + case SI_QUEUE: + overrun = 0; + break; + default: + overrun = sip->si_overrun; + if (overrun) + sip->si_overrun = 0; + } + + xnlock_put_irqrestore(&nklock, s); + + if (u_si == NULL) + goto out; /* Return signo only. */ + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + ret = sys32_put_siginfo(u_si, sip, overrun); + if (!ret) + /* Allow an extended target to receive more data. */ + cobalt_call_extension(signal_copyinfo_compat, + &curr->extref, ret, u_si, sip, + overrun); + } else +#endif + { + ret = signal_put_siginfo(u_si, sip, overrun); + if (!ret) + /* Allow an extended target to receive more data. */ + cobalt_call_extension(signal_copyinfo, &curr->extref, + ret, u_si, sip, overrun); + } + +out: + /* + * If we pulled the signal information from a sigpending + * block, release it to the free pool if applicable. + */ + if (sigp && + (void *)sigp >= sigpending_mem && + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) { + xnlock_get_irqsave(&nklock, s); + list_add_tail(&sigp->next, &sigpending_pool); + xnlock_put_irqrestore(&nklock, s); + /* no more ref. to sigp beyond this point. */ + } + + return ret ? -EFAULT : sig; +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sigwait(sigset_t *set) +{ + return signal_wait(set, XN_INFINITE, NULL, false); +} + +COBALT_SYSCALL(sigwait, primary, + (const sigset_t __user *u_set, int __user *u_sig)) +{ + sigset_t set; + int sig; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + sig = signal_wait(&set, XN_INFINITE, NULL, false); + if (sig < 0) + return sig; + + return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig)); +} + +int __cobalt_sigtimedwait(sigset_t *set, + const struct timespec64 *timeout, + void __user *u_si, + bool compat) +{ + xnticks_t ticks; + + if (!timespec64_valid(timeout)) + return -EINVAL; + ticks = ts2ns(timeout); + if (ticks++ == 0) + ticks = XN_NONBLOCK; + + return signal_wait(set, ticks, u_si, compat); +} + +COBALT_SYSCALL(sigtimedwait, nonrestartable, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __user_old_timespec __user *u_timeout)) +{ + struct timespec64 timeout; + sigset_t set; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + if (cobalt_copy_from_user(&timeout, u_timeout, sizeof(timeout))) + return -EFAULT; + + return __cobalt_sigtimedwait(&set, &timeout, u_si, false); +} + +COBALT_SYSCALL(sigtimedwait64, nonrestartable, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __kernel_timespec __user *u_timeout)) +{ + struct timespec64 timeout; + sigset_t set; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + if (cobalt_get_timespec64(&timeout, u_timeout)) + return -EFAULT; + + return __cobalt_sigtimedwait(&set, &timeout, u_si, false); +} + +int __cobalt_sigwaitinfo(sigset_t *set, + void __user *u_si, + bool compat) +{ + return signal_wait(set, XN_INFINITE, u_si, compat); +} + +COBALT_SYSCALL(sigwaitinfo, nonrestartable, + (const sigset_t __user *u_set, struct siginfo __user *u_si)) +{ + sigset_t set; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + return __cobalt_sigwaitinfo(&set, u_si, false); +} + +COBALT_SYSCALL(sigpending, primary, (old_sigset_t __user *u_set)) +{ + struct cobalt_thread *curr = cobalt_current_thread(); + + return cobalt_copy_to_user(u_set, &curr->sigpending, sizeof(*u_set)); +} + +int __cobalt_kill(struct cobalt_thread *thread, int sig, int group) /* nklocked, IRQs off */ +{ + struct cobalt_sigpending *sigp; + int ret = 0; + + /* + * We have undocumented pseudo-signals to suspend/resume/unblock + * threads, force them out of primary mode or even demote them + * to the weak scheduling class/priority. Process them early, + * before anyone can notice... + */ + switch(sig) { + case 0: + /* Check for existence only. */ + break; + case SIGSUSP: + /* + * All callers shall be tagged as conforming calls, so + * self-directed suspension can only happen from + * primary mode. Yummie. + */ + xnthread_suspend(&thread->threadbase, XNSUSP, + XN_INFINITE, XN_RELATIVE, NULL); + if (&thread->threadbase == xnthread_current() && + xnthread_test_info(&thread->threadbase, XNBREAK)) + ret = -EINTR; + break; + case SIGRESM: + xnthread_resume(&thread->threadbase, XNSUSP); + goto resched; + case SIGRELS: + xnthread_unblock(&thread->threadbase); + goto resched; + case SIGKICK: + xnthread_kick(&thread->threadbase); + goto resched; + case SIGDEMT: + xnthread_demote(&thread->threadbase); + goto resched; + case 1 ... _NSIG: + sigp = cobalt_signal_alloc(); + if (sigp) { + sigp->si.si_signo = sig; + sigp->si.si_errno = 0; + sigp->si.si_code = SI_USER; + sigp->si.si_pid = task_pid_nr(current); + sigp->si.si_uid = get_current_uuid(); + if (cobalt_signal_send(thread, sigp, group) <= 0) + cobalt_signal_free(sigp); + } + resched: + xnsched_run(); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +COBALT_SYSCALL(kill, conforming, (pid_t pid, int sig)) +{ + struct cobalt_thread *thread; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + thread = cobalt_thread_find(pid); + if (thread == NULL) + ret = -ESRCH; + else + ret = __cobalt_kill(thread, sig, 1); + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value) +{ + struct cobalt_sigpending *sigp; + struct cobalt_thread *thread; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + thread = cobalt_thread_find(pid); + if (thread == NULL) { + ret = -ESRCH; + goto out; + } + + switch(sig) { + case 0: + /* Check for existence only. */ + break; + case 1 ... _NSIG: + sigp = cobalt_signal_alloc(); + if (sigp) { + sigp->si.si_signo = sig; + sigp->si.si_errno = 0; + sigp->si.si_code = SI_QUEUE; + sigp->si.si_pid = task_pid_nr(current); + sigp->si.si_uid = get_current_uuid(); + sigp->si.si_value = *value; + if (cobalt_signal_send(thread, sigp, 1) <= 0) + cobalt_signal_free(sigp); + else + xnsched_run(); + } + break; + default: + /* Cobalt pseudo-signals are never process-directed. */ + ret = __cobalt_kill(thread, sig, 0); + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(__cobalt_sigqueue); + +COBALT_SYSCALL(sigqueue, conforming, + (pid_t pid, int sig, const union sigval __user *u_value)) +{ + union sigval val; + int ret; + + ret = cobalt_copy_from_user(&val, u_value, sizeof(val)); + + return ret ?: __cobalt_sigqueue(pid, sig, &val); +} + +__init int cobalt_signal_init(void) +{ + struct cobalt_sigpending *sigp; + + sigpending_mem = xnheap_vmalloc(__SIGPOOL_SIZE); + if (sigpending_mem == NULL) + return -ENOMEM; + + for (sigp = sigpending_mem; + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE; sigp++) + list_add_tail(&sigp->next, &sigpending_pool); + + return 0; +} + +__init void cobalt_signal_cleanup(void) +{ + xnheap_vfree(sigpending_mem); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h new file mode 100644 index 0000000..0b5d11e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SIGNAL_H +#define _COBALT_POSIX_SIGNAL_H + +#include <linux/signal.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/list.h> +#include <cobalt/uapi/signal.h> +#include <xenomai/posix/syscall.h> + +struct cobalt_thread; + +struct cobalt_sigpending { + struct siginfo si; + struct list_head next; +}; + +static inline +void cobalt_copy_siginfo(int code, + struct siginfo *__restrict__ dst, + const struct siginfo *__restrict__ src) +{ + dst->si_signo = src->si_signo; + dst->si_errno = src->si_errno; + dst->si_code = code; + + switch (code) { + case SI_TIMER: + dst->si_tid = src->si_tid; + dst->si_overrun = src->si_overrun; + dst->si_value = src->si_value; + break; + case SI_QUEUE: + case SI_MESGQ: + dst->si_value = src->si_value; + fallthrough; + case SI_USER: + dst->si_pid = src->si_pid; + dst->si_uid = src->si_uid; + } +} + +int __cobalt_sigwait(sigset_t *set); + +int __cobalt_sigtimedwait(sigset_t *set, + const struct timespec64 *timeout, + void __user *u_si, + bool compat); + +int __cobalt_sigwaitinfo(sigset_t *set, + void __user *u_si, + bool compat); + +int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value); + +int cobalt_signal_send(struct cobalt_thread *thread, + struct cobalt_sigpending *sigp, + int group); + +int cobalt_signal_send_pid(pid_t pid, + struct cobalt_sigpending *sigp); + +struct cobalt_sigpending *cobalt_signal_alloc(void); + +void cobalt_signal_free(struct cobalt_sigpending *sigp); + +void cobalt_signal_flush(struct cobalt_thread *thread); + +int cobalt_signal_wait(sigset_t *set, struct siginfo *si, + xnticks_t timeout, xntmode_t tmode); + +int __cobalt_kill(struct cobalt_thread *thread, + int sig, int group); + +COBALT_SYSCALL_DECL(sigwait, + (const sigset_t __user *u_set, int __user *u_sig)); + +COBALT_SYSCALL_DECL(sigtimedwait, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __user_old_timespec __user *u_timeout)); + +COBALT_SYSCALL_DECL(sigtimedwait64, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __kernel_timespec __user *u_timeout)); + +COBALT_SYSCALL_DECL(sigwaitinfo, + (const sigset_t __user *u_set, + struct siginfo __user *u_si)); + +COBALT_SYSCALL_DECL(sigpending, + (old_sigset_t __user *u_set)); + +COBALT_SYSCALL_DECL(kill, (pid_t pid, int sig)); + +COBALT_SYSCALL_DECL(sigqueue, + (pid_t pid, int sig, const union sigval __user *u_value)); + +int cobalt_signal_init(void); + +void cobalt_signal_cleanup(void); + +#endif /* !_COBALT_POSIX_SIGNAL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c new file mode 100644 index 0000000..46c4998 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c @@ -0,0 +1,798 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org> + * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/types.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/kconfig.h> +#include <linux/unistd.h> +#include <cobalt/uapi/corectl.h> +#include <cobalt/kernel/tree.h> +#include <cobalt/kernel/vdso.h> +#include <cobalt/kernel/init.h> +#include <pipeline/kevents.h> +#include <pipeline/vdso_fallback.h> +#include <asm/syscall.h> +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "mutex.h" +#include "cond.h" +#include "mqueue.h" +#include "sem.h" +#include "signal.h" +#include "timer.h" +#include "monitor.h" +#include "clock.h" +#include "event.h" +#include "timerfd.h" +#include "io.h" +#include "corectl.h" +#include "../debug.h" +#include <trace/events/cobalt-posix.h> + +/* Syscall must run into the Linux domain. */ +#define __xn_exec_lostage 0x1 +/* Syscall must run into the Xenomai domain. */ +#define __xn_exec_histage 0x2 +/* Shadow syscall: caller must be mapped. */ +#define __xn_exec_shadow 0x4 +/* Switch back toggle; caller must return to its original mode. */ +#define __xn_exec_switchback 0x8 +/* Exec in current domain. */ +#define __xn_exec_current 0x10 +/* Exec in conforming domain, Xenomai or Linux. */ +#define __xn_exec_conforming 0x20 +/* Attempt syscall restart in the opposite domain upon -ENOSYS. */ +#define __xn_exec_adaptive 0x40 +/* Do not restart syscall upon signal receipt. */ +#define __xn_exec_norestart 0x80 +/* Shorthand for shadow init syscall. */ +#define __xn_exec_init __xn_exec_lostage +/* Shorthand for shadow syscall in Xenomai space. */ +#define __xn_exec_primary (__xn_exec_shadow|__xn_exec_histage) +/* Shorthand for shadow syscall in Linux space. */ +#define __xn_exec_secondary (__xn_exec_shadow|__xn_exec_lostage) +/* Shorthand for syscall in Linux space with switchback if shadow. */ +#define __xn_exec_downup (__xn_exec_lostage|__xn_exec_switchback) +/* Shorthand for non-restartable primary syscall. */ +#define __xn_exec_nonrestartable (__xn_exec_primary|__xn_exec_norestart) +/* Domain probing syscall starting in conforming mode. */ +#define __xn_exec_probing (__xn_exec_conforming|__xn_exec_adaptive) +/* Hand over mode selection to syscall. */ +#define __xn_exec_handover (__xn_exec_current|__xn_exec_adaptive) + +typedef long (*cobalt_syshand)(unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5); + +static void prepare_for_signal(struct task_struct *p, + struct xnthread *thread, + struct pt_regs *regs, + int sysflags) +{ + int notify = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_info(thread, XNKICKED)) { + if (signal_pending(p)) { + __xn_error_return(regs, + (sysflags & __xn_exec_norestart) ? + -EINTR : -ERESTARTSYS); + notify = !xnthread_test_state(thread, XNSSTEP); + xnthread_clear_info(thread, XNBREAK); + } + xnthread_clear_info(thread, XNKICKED); + } + + xnlock_put_irqrestore(&nklock, s); + + xnthread_test_cancel(); + + xnthread_relax(notify, SIGDEBUG_MIGRATE_SIGNAL); +} + +static COBALT_SYSCALL(migrate, current, (int domain)) +{ + struct xnthread *thread = xnthread_current(); + + if (is_secondary_domain()) { + if (domain == COBALT_PRIMARY) { + if (thread == NULL) + return -EPERM; + /* + * Paranoid: a corner case where userland + * fiddles with SIGSHADOW while the target + * thread is still waiting to be started. + */ + if (xnthread_test_state(thread, XNDORMANT)) + return 0; + + return xnthread_harden() ? : 1; + } + return 0; + } + + /* We are running on the head stage, apply relax request. */ + if (domain == COBALT_SECONDARY) { + xnthread_relax(0, 0); + return 1; + } + + return 0; +} + +static COBALT_SYSCALL(trace, current, + (int op, unsigned long a1, + unsigned long a2, unsigned long a3)) +{ + int ret = -EINVAL; + + switch (op) { + case __xntrace_op_max_begin: + ret = xntrace_max_begin(a1); + break; + + case __xntrace_op_max_end: + ret = xntrace_max_end(a1); + break; + + case __xntrace_op_max_reset: + ret = xntrace_max_reset(); + break; + + case __xntrace_op_user_start: + ret = xntrace_user_start(); + break; + + case __xntrace_op_user_stop: + ret = xntrace_user_stop(a1); + break; + + case __xntrace_op_user_freeze: + ret = xntrace_user_freeze(a1, a2); + break; + + case __xntrace_op_special: + ret = xntrace_special(a1 & 0xFF, a2); + break; + + case __xntrace_op_special_u64: + ret = xntrace_special_u64(a1 & 0xFF, + (((u64) a2) << 32) | a3); + break; + + case __xntrace_op_latpeak_freeze: + xntrace_latpeak_freeze(a1); + ret = 0; + break; + + } + return ret; +} + +static COBALT_SYSCALL(ftrace_puts, current, + (const char __user *str)) +{ + char buf[256]; + unsigned len; + + len = cobalt_strncpy_from_user(buf, str, sizeof(buf)); + if (len < 0) + return -EFAULT; + +#ifdef CONFIG_TRACING + __trace_puts(_THIS_IP_, buf, len); +#endif + + return 0; +} + +static COBALT_SYSCALL(archcall, current, + (unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5)) +{ + return xnarch_local_syscall(a1, a2, a3, a4, a5); +} + +static COBALT_SYSCALL(get_current, current, + (xnhandle_t __user *u_handle)) +{ + struct xnthread *cur = xnthread_current(); + + if (cur == NULL) + return -EPERM; + + return cobalt_copy_to_user(u_handle, &cur->handle, + sizeof(*u_handle)); +} + +static COBALT_SYSCALL(backtrace, lostage, + (int nr, unsigned long __user *u_backtrace, int reason)) +{ + unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH]; + int ret; + + /* + * In case backtrace() in userland is broken or fails. We may + * want to know about this in kernel space however, for future + * use. + */ + if (nr <= 0) + return 0; + /* + * We may omit the older frames if we can't store the full + * backtrace. + */ + if (nr > SIGSHADOW_BACKTRACE_DEPTH) + nr = SIGSHADOW_BACKTRACE_DEPTH; + /* + * Fetch the backtrace array, filled with PC values as seen + * from the relaxing thread in user-space. This can't fail + */ + ret = cobalt_copy_from_user(backtrace, u_backtrace, nr * sizeof(long)); + if (ret) + return ret; + + xndebug_trace_relax(nr, backtrace, reason); + + return 0; +} + +static COBALT_SYSCALL(serialdbg, current, + (const char __user *u_msg, int len)) +{ + char buf[128]; + int n; + + while (len > 0) { + n = len; + if (n > sizeof(buf)) + n = sizeof(buf); + if (cobalt_copy_from_user(buf, u_msg, n)) + return -EFAULT; + raw_printk("%.*s", n, buf); + u_msg += n; + len -= n; + } + + return 0; +} + +static void stringify_feature_set(unsigned long fset, char *buf, int size) +{ + unsigned long feature; + int nc, nfeat; + + *buf = '\0'; + + for (feature = 1, nc = nfeat = 0; fset != 0 && size > 0; feature <<= 1) { + if (fset & feature) { + nc = ksformat(buf, size, "%s%s", + nfeat > 0 ? " " : "", + get_feature_label(feature)); + nfeat++; + size -= nc; + buf += nc; + fset &= ~feature; + } + } +} + +static COBALT_SYSCALL(bind, lostage, + (struct cobalt_bindreq __user *u_breq)) +{ + unsigned long featreq, featmis; + struct cobalt_bindreq breq; + struct cobalt_featinfo *f; + int abirev; + + if (cobalt_copy_from_user(&breq, u_breq, sizeof(breq))) + return -EFAULT; + + f = &breq.feat_ret; + featreq = breq.feat_req; + if (!realtime_core_running() && (featreq & __xn_feat_control) == 0) + return -EAGAIN; + + /* + * Calculate the missing feature set: + * kernel_unavailable_set & user_mandatory_set. + */ + featmis = (~XENOMAI_FEAT_DEP & (featreq & XENOMAI_FEAT_MAN)); + abirev = breq.abi_rev; + + /* + * Pass back the supported feature set and the ABI revision + * level to user-space. + */ + f->feat_all = XENOMAI_FEAT_DEP; + stringify_feature_set(XENOMAI_FEAT_DEP, f->feat_all_s, + sizeof(f->feat_all_s)); + f->feat_man = featreq & XENOMAI_FEAT_MAN; + stringify_feature_set(f->feat_man, f->feat_man_s, + sizeof(f->feat_man_s)); + f->feat_mis = featmis; + stringify_feature_set(featmis, f->feat_mis_s, + sizeof(f->feat_mis_s)); + f->feat_req = featreq; + stringify_feature_set(featreq, f->feat_req_s, + sizeof(f->feat_req_s)); + f->feat_abirev = XENOMAI_ABI_REV; + collect_arch_features(f); + + pipeline_collect_features(f); + f->vdso_offset = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso); + + if (cobalt_copy_to_user(u_breq, &breq, sizeof(breq))) + return -EFAULT; + + /* + * If some mandatory features the user-space code relies on + * are missing at kernel level, we cannot go further. + */ + if (featmis) + return -EINVAL; + + if (!check_abi_revision(abirev)) + return -ENOEXEC; + + return cobalt_bind_core(featreq); +} + +static COBALT_SYSCALL(extend, lostage, (unsigned int magic)) +{ + return cobalt_bind_personality(magic); +} + +static int CoBaLt_ni(void) +{ + return -ENOSYS; +} + +/* + * We have a single syscall table for all ABI models, i.e. 64bit + * native + 32bit emulation) or plain 32bit. + * + * The syscall table is set up in a single step, based on three + * subsequent sources of initializers: + * + * - first, all syscall entries are defaulted to a placeholder + * returning -ENOSYS (__COBALT_CALL_NI), as the table may be sparse. + * + * - then __COBALT_CALL_ENTRY() produces a native call entry + * (e.g. pure 64bit call handler for a 64bit architecture, 32bit + * handler for a 32bit architecture), optionally followed by a set of + * 32bit syscall entries offset by an arch-specific base index, which + * default to the native calls. These nitty-gritty details are defined + * by <asm/xenomai/syscall32.h>. 32bit architectures - or 64bit ones + * for which we don't support any 32bit ABI model - will simply define + * __COBALT_CALL32_ENTRY() as an empty macro. + * + * - finally, 32bit thunk entries are generated by including + * <asm/xenomai/syscall32-table.h>, overriding the default handlers + * installed during the previous step. + * + * For instance, with CONFIG_IA32_EMULATION support enabled in an + * x86_64 kernel, sc_cobalt_mq_timedreceive would appear twice in the + * table, as: + * + * [sc_cobalt_mq_timedreceive] = CoBaLt_mq_timedreceive, + * ... + * [sc_cobalt_mq_timedreceive + __COBALT_IA32_BASE] = CoBaLt32emu_mq_timedreceive, + * + * CoBaLt32emu_mq_timedreceive() would do the required thunking for + * dealing with the 32<->64bit conversion of arguments. On the other + * hand, sc_cobalt_sched_yield - which do not require any thunk - + * would also appear twice, but both entries would point at the native + * syscall implementation: + * + * [sc_cobalt_sched_yield] = CoBaLt_sched_yield, + * ... + * [sc_cobalt_sched_yield + __COBALT_IA32_BASE] = CoBaLt_sched_yield, + * + * Accordingly, applications targeting the ia32 model issue syscalls + * in the range [__COBALT_IA32_BASE..__COBALT_IA32_BASE + + * __NR_COBALT_SYSCALLS-1], whilst native (32/64bit) ones issue + * syscalls in the range [0..__NR_COBALT_SYSCALLS-1]. + * + * In short, this is an incremental process where the arch-specific + * code can override the 32bit syscall entries, pointing at the thunk + * routines it may need for handing 32bit calls over their respective + * 64bit implementation. + * + * By convention, there is NO pure 32bit syscall, which means that + * each 32bit syscall defined by a compat ABI interface MUST match a + * native (64bit) syscall. This is important as we share the call + * modes (i.e. __xn_exec_ bits) between all ABI models. + * + * --rpm + */ +#define __syshand__(__name) \ + ((cobalt_syshand)(void (*)(void))(CoBaLt_ ## __name)) + +#define __COBALT_NI __syshand__(ni) + +#define __COBALT_CALL_NI \ + [0 ... __NR_COBALT_SYSCALLS-1] = __COBALT_NI, \ + __COBALT_CALL32_INITHAND(__COBALT_NI) + +#define __COBALT_CALL_NFLAGS \ + [0 ... __NR_COBALT_SYSCALLS-1] = 0, \ + __COBALT_CALL32_INITMODE(0) + +#define __COBALT_CALL_ENTRY(__name) \ + [sc_cobalt_ ## __name] = __syshand__(__name), \ + __COBALT_CALL32_ENTRY(__name, __syshand__(__name)) + +#define __COBALT_MODE(__name, __mode) \ + [sc_cobalt_ ## __name] = __xn_exec_##__mode, + +#ifdef CONFIG_XENO_ARCH_SYS3264 +#include "syscall32.h" +#endif + +#include "syscall_entries.h" + +static const cobalt_syshand cobalt_syscalls[] = { + __COBALT_CALL_NI + __COBALT_CALL_ENTRIES +#ifdef CONFIG_XENO_ARCH_SYS3264 +#include <asm/xenomai/syscall32-table.h> +#endif +}; + +static const int cobalt_sysmodes[] = { + __COBALT_CALL_NFLAGS + __COBALT_CALL_MODES +}; + +static inline int allowed_syscall(struct cobalt_process *process, + struct xnthread *thread, + int sysflags, int nr) +{ + if (nr == sc_cobalt_bind) + return 1; + + if (process == NULL) + return 0; + + if (thread == NULL && (sysflags & __xn_exec_shadow)) + return 0; + + return cap_raised(current_cap(), CAP_SYS_NICE); +} + +int handle_head_syscall(bool caller_is_relaxed, struct pt_regs *regs) +{ + struct cobalt_process *process; + int switched, sigs, sysflags; + struct xnthread *thread; + cobalt_syshand handler; + struct task_struct *p; + unsigned long args[6]; + unsigned int nr, code; + long ret; + + if (!__xn_syscall_p(regs)) + goto linux_syscall; + + thread = xnthread_current(); + code = __xn_syscall(regs); + if (code >= ARRAY_SIZE(cobalt_syscalls)) + goto bad_syscall; + + nr = code & (__NR_COBALT_SYSCALLS - 1); + + trace_cobalt_head_sysentry(code); + + process = cobalt_current_process(); + if (process == NULL) { + process = cobalt_search_process(current->mm); + cobalt_set_process(process); + } + + handler = cobalt_syscalls[code]; + sysflags = cobalt_sysmodes[nr]; + + /* + * Executing Cobalt services requires CAP_SYS_NICE, except for + * sc_cobalt_bind which does its own checks. + */ + if (unlikely(!allowed_syscall(process, thread, sysflags, nr))) { + /* + * Exclude get_current from reporting, it is used to probe the + * execution context. + */ + if (XENO_DEBUG(COBALT) && nr != sc_cobalt_get_current) + printk(XENO_WARNING + "syscall <%d> denied to %s[%d]\n", + nr, current->comm, task_pid_nr(current)); + __xn_error_return(regs, -EPERM); + goto ret_handled; + } + + if (sysflags & __xn_exec_conforming) + /* + * If the conforming exec bit is set, turn the exec + * bitmask for the syscall into the most appropriate + * setup for the caller, i.e. Xenomai domain for + * shadow threads, Linux otherwise. + */ + sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage); + + /* + * Here we have to dispatch the syscall execution properly, + * depending on: + * + * o Whether the syscall must be run into the Linux or Xenomai + * domain, or indifferently in the current Xenomai domain. + * + * o Whether the caller currently runs in the Linux or Xenomai + * domain. + */ +restart: + /* + * Process adaptive syscalls by restarting them in the + * opposite domain upon receiving -ENOSYS from the syscall + * handler. + */ + switched = 0; + if (sysflags & __xn_exec_lostage) { + /* + * The syscall must run from the Linux domain. + */ + if (!caller_is_relaxed) { + /* + * Request originates from the Xenomai domain: + * relax the caller then invoke the syscall + * handler right after. + */ + xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL); + switched = 1; + } else + /* + * Request originates from the Linux domain: + * propagate the event to our Linux-based + * handler, so that the syscall is executed + * from there. + */ + return KEVENT_PROPAGATE; + } else if (sysflags & (__xn_exec_histage | __xn_exec_current)) { + /* + * Syscall must run either from the Xenomai domain, or + * from the calling domain. + * + * If the request originates from the Linux domain, + * hand it over to our secondary-mode dispatcher. + * Otherwise, invoke the syscall handler immediately. + */ + if (caller_is_relaxed) + return KEVENT_PROPAGATE; + } + + /* + * 'thread' has to be valid from that point: all syscalls + * regular threads may call have been pipelined to the root + * handler (lostage ones), or rejected by allowed_syscall(). + */ + + p = current; + pipeline_get_syscall_args(p, regs, args); + + ret = handler(args[0], args[1], args[2], args[3], args[4]); + if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) { + if (switched) { + ret = xnthread_harden(); + if (ret) { + switched = 0; + goto done; + } + } else /* Mark the primary -> secondary transition. */ + xnthread_set_localinfo(thread, XNDESCENT); + sysflags ^= + (__xn_exec_lostage | __xn_exec_histage | + __xn_exec_adaptive); + goto restart; + } +done: + __xn_status_return(regs, ret); + sigs = 0; + if (!xnsched_root_p()) { + if (signal_pending(p) || + xnthread_test_info(thread, XNKICKED)) { + sigs = 1; + prepare_for_signal(p, thread, regs, sysflags); + } else if (xnthread_test_state(thread, XNWEAK) && + thread->res_count == 0) { + if (switched) + switched = 0; + else + xnthread_relax(0, 0); + } + } + if (!sigs && (sysflags & __xn_exec_switchback) && switched) + /* -EPERM will be trapped later if needed. */ + xnthread_harden(); + +ret_handled: + /* Update the stats and userland-visible state. */ + if (thread) { + xnthread_clear_localinfo(thread, XNDESCENT); + xnstat_counter_inc(&thread->stat.xsc); + xnthread_sync_window(thread); + } + + trace_cobalt_head_sysexit(__xn_reg_rval(regs)); + + return KEVENT_STOP; + +linux_syscall: + if (xnsched_root_p()) + /* + * The call originates from the Linux domain, either + * from a relaxed shadow or from a regular Linux task; + * just propagate the event so that we will fall back + * to handle_root_syscall(). + */ + return KEVENT_PROPAGATE; + + if (!__xn_rootcall_p(regs, &code)) + goto bad_syscall; + + if (pipeline_handle_vdso_fallback(code, regs)) + return KEVENT_STOP; + + /* + * We know this is a Cobalt thread since it runs over the head + * domain, however the current syscall should be handled by + * the host kernel instead. Before this happens, we have to + * re-enter the root domain. + */ + xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL); + + return KEVENT_PROPAGATE; + +bad_syscall: + printk(XENO_WARNING "bad syscall <%#x>\n", code); + + __xn_error_return(regs, -ENOSYS); + + return KEVENT_STOP; +} + +int handle_root_syscall(struct pt_regs *regs) +{ + int sysflags, switched, sigs; + struct xnthread *thread; + cobalt_syshand handler; + struct task_struct *p; + unsigned long args[6]; + unsigned int nr, code; + long ret; + + /* + * Catch cancellation requests pending for user shadows + * running mostly in secondary mode, i.e. XNWEAK. In that + * case, we won't run prepare_for_signal() that frequently, so + * check for cancellation here. + */ + xnthread_test_cancel(); + + if (!__xn_syscall_p(regs)) + /* Fall back to Linux syscall handling. */ + return KEVENT_PROPAGATE; + + thread = xnthread_current(); + /* code has already been checked in the head domain handler. */ + code = __xn_syscall(regs); + nr = code & (__NR_COBALT_SYSCALLS - 1); + + trace_cobalt_root_sysentry(code); + + /* Processing a Xenomai syscall. */ + + handler = cobalt_syscalls[code]; + sysflags = cobalt_sysmodes[nr]; + + if (thread && (sysflags & __xn_exec_conforming)) + sysflags |= __xn_exec_histage; +restart: + /* + * Process adaptive syscalls by restarting them in the + * opposite domain upon receiving -ENOSYS from the syscall + * handler. + */ + switched = 0; + if (sysflags & __xn_exec_histage) { + /* + * This request originates from the Linux domain but + * should run into the Xenomai domain: harden the + * caller before invoking the syscall handler. + */ + ret = xnthread_harden(); + if (ret) { + __xn_error_return(regs, ret); + goto ret_handled; + } + switched = 1; + } else { + /* + * We want to run the syscall in the current Linux + * domain. This is a slow path, so proceed with any + * pending schedparam update on the fly. + */ + if (thread) + xnthread_propagate_schedparam(thread); + } + + p = current; + pipeline_get_syscall_args(p, regs, args); + + ret = handler(args[0], args[1], args[2], args[3], args[4]); + if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) { + sysflags ^= __xn_exec_histage; + if (switched) { + xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL); + sysflags &= ~__xn_exec_adaptive; + /* Mark the primary -> secondary transition. */ + xnthread_set_localinfo(thread, XNDESCENT); + } + goto restart; + } + + __xn_status_return(regs, ret); + + sigs = 0; + if (!xnsched_root_p()) { + /* + * We may have gained a shadow TCB from the syscall we + * just invoked, so make sure to fetch it. + */ + thread = xnthread_current(); + if (signal_pending(p)) { + sigs = 1; + prepare_for_signal(p, thread, regs, sysflags); + } else if (xnthread_test_state(thread, XNWEAK) && + thread->res_count == 0) + sysflags |= __xn_exec_switchback; + } + if (!sigs && (sysflags & __xn_exec_switchback) + && (switched || xnsched_primary_p())) + xnthread_relax(0, 0); + +ret_handled: + /* Update the stats and userland-visible state. */ + if (thread) { + xnthread_clear_localinfo(thread, XNDESCENT|XNHICCUP); + xnstat_counter_inc(&thread->stat.xsc); + xnthread_sync_window(thread); + } + + trace_cobalt_root_sysexit(__xn_reg_rval(regs)); + + return KEVENT_STOP; +} + +long cobalt_restart_syscall_placeholder(struct restart_block *param) +{ + return -EINVAL; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h new file mode 100644 index 0000000..3a4c98d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SYSCALL_H +#define _COBALT_POSIX_SYSCALL_H + +#include <cobalt/uapi/syscall.h> + +struct pt_regs; + +/* Regular (native) syscall handler implementation. */ +#define COBALT_SYSCALL(__name, __mode, __args) \ + long CoBaLt_ ## __name __args + +/* Regular (native) syscall handler declaration. */ +#define COBALT_SYSCALL_DECL(__name, __args) \ + long CoBaLt_ ## __name __args + +#include <asm/xenomai/syscall32.h> + +int handle_head_syscall(bool caller_is_relaxed, + struct pt_regs *regs); + +int handle_root_syscall(struct pt_regs *regs); + +#endif /* !_COBALT_POSIX_SYSCALL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c new file mode 100644 index 0000000..9be0971 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c @@ -0,0 +1,963 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/types.h> +#include <linux/err.h> +#include <cobalt/uapi/syscall.h> +#include <cobalt/kernel/time.h> +#include <xenomai/rtdm/internal.h> +#include "internal.h" +#include "syscall32.h" +#include "thread.h" +#include "mutex.h" +#include "cond.h" +#include "sem.h" +#include "sched.h" +#include "clock.h" +#include "timer.h" +#include "timerfd.h" +#include "signal.h" +#include "monitor.h" +#include "event.h" +#include "mqueue.h" +#include "io.h" +#include "../debug.h" + +COBALT_SYSCALL32emu(thread_create, init, + (compat_ulong_t pth, + int policy, + const struct compat_sched_param_ex __user *u_param_ex, + int xid, + __u32 __user *u_winoff)) +{ + struct sched_param_ex param_ex; + int ret; + + ret = sys32_get_param_ex(policy, ¶m_ex, u_param_ex); + if (ret) + return ret; + + return __cobalt_thread_create(pth, policy, ¶m_ex, xid, u_winoff); +} + +COBALT_SYSCALL32emu(thread_setschedparam_ex, conforming, + (compat_ulong_t pth, + int policy, + const struct compat_sched_param_ex __user *u_param_ex, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + struct sched_param_ex param_ex; + int ret; + + ret = sys32_get_param_ex(policy, ¶m_ex, u_param_ex); + if (ret) + return ret; + + return cobalt_thread_setschedparam_ex(pth, policy, ¶m_ex, + u_winoff, u_promoted); +} + +COBALT_SYSCALL32emu(thread_getschedparam_ex, current, + (compat_ulong_t pth, + int __user *u_policy, + struct compat_sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret, policy; + + ret = cobalt_thread_getschedparam_ex(pth, &policy, ¶m_ex); + if (ret) + return ret; + + ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy)); + + return ret ?: sys32_put_param_ex(policy, u_param, ¶m_ex); +} + +COBALT_SYSCALL32emu(thread_setschedprio, conforming, + (compat_ulong_t pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted); +} + +static inline int sys32_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : + sys32_get_timespec(ts, u_ts); +} + +COBALT_SYSCALL32emu(sem_open, lostage, + (compat_uptr_t __user *u_addrp, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value)) +{ + struct cobalt_sem_shadow __user *usm; + compat_uptr_t cusm; + + if (__xn_get_user(cusm, u_addrp)) + return -EFAULT; + + usm = __cobalt_sem_open(compat_ptr(cusm), u_name, oflags, mode, value); + if (IS_ERR(usm)) + return PTR_ERR(usm); + + return __xn_put_user(ptr_to_compat(usm), u_addrp) ? -EFAULT : 0; +} + +COBALT_SYSCALL32emu(sem_timedwait, primary, + (struct cobalt_sem_shadow __user *u_sem, + const struct old_timespec32 __user *u_ts)) +{ + int ret = 1; + struct timespec64 ts64; + + if (u_ts) + ret = sys32_fetch_timeout(&ts64, u_ts); + + return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64); +} + +COBALT_SYSCALL32emu(sem_timedwait64, primary, + (struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_sem_timedwait64(u_sem, u_ts); +} + +COBALT_SYSCALL32emu(clock_getres, current, + (clockid_t clock_id, + struct old_timespec32 __user *u_ts)) +{ + struct timespec64 ts; + int ret; + + ret = __cobalt_clock_getres(clock_id, &ts); + if (ret) + return ret; + + return u_ts ? sys32_put_timespec(u_ts, &ts) : 0; +} + +COBALT_SYSCALL32emu(clock_getres64, current, + (clockid_t clock_id, + struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_clock_getres64(clock_id, u_ts); +} + +COBALT_SYSCALL32emu(clock_gettime, current, + (clockid_t clock_id, + struct old_timespec32 __user *u_ts)) +{ + struct timespec64 ts; + int ret; + + ret = __cobalt_clock_gettime(clock_id, &ts); + if (ret) + return ret; + + return sys32_put_timespec(u_ts, &ts); +} + +COBALT_SYSCALL32emu(clock_gettime64, current, + (clockid_t clock_id, + struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_clock_gettime64(clock_id, u_ts); +} + +COBALT_SYSCALL32emu(clock_settime, current, + (clockid_t clock_id, + const struct old_timespec32 __user *u_ts)) +{ + struct timespec64 ts; + int ret; + + ret = sys32_get_timespec(&ts, u_ts); + if (ret) + return ret; + + return __cobalt_clock_settime(clock_id, &ts); +} + +COBALT_SYSCALL32emu(clock_settime64, current, + (clockid_t clock_id, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_clock_settime64(clock_id, u_ts); +} + +COBALT_SYSCALL32emu(clock_adjtime, current, + (clockid_t clock_id, struct old_timex32 __user *u_tx)) +{ + struct __kernel_timex tx; + int ret; + + ret = sys32_get_timex(&tx, u_tx); + if (ret) + return ret; + + ret = __cobalt_clock_adjtime(clock_id, &tx); + if (ret) + return ret; + + return sys32_put_timex(u_tx, &tx); +} + +COBALT_SYSCALL32emu(clock_adjtime64, current, + (clockid_t clock_id, struct __kernel_timex __user *u_tx)) +{ + return __cobalt_clock_adjtime64(clock_id, u_tx); +} + + +COBALT_SYSCALL32emu(clock_nanosleep, primary, + (clockid_t clock_id, int flags, + const struct old_timespec32 __user *u_rqt, + struct old_timespec32 __user *u_rmt)) +{ + struct timespec64 rqt, rmt, *rmtp = NULL; + int ret; + + if (u_rmt) + rmtp = &rmt; + + ret = sys32_get_timespec(&rqt, u_rqt); + if (ret) + return ret; + + ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp); + if (ret == -EINTR && flags == 0 && rmtp) + ret = sys32_put_timespec(u_rmt, rmtp); + + return ret; +} + +COBALT_SYSCALL32emu(clock_nanosleep64, nonrestartable, + (clockid_t clock_id, int flags, + const struct __kernel_timespec __user *u_rqt, + struct __kernel_timespec __user *u_rmt)) +{ + return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt); +} + + +COBALT_SYSCALL32emu(mutex_timedlock, primary, + (struct cobalt_mutex_shadow __user *u_mx, + const struct old_timespec32 __user *u_ts)) +{ + return __cobalt_mutex_timedlock_break(u_mx, u_ts, sys32_fetch_timeout); +} + +COBALT_SYSCALL32emu(mutex_timedlock64, primary, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_mutex_timedlock64(u_mx, u_ts); +} + +COBALT_SYSCALL32emu(cond_wait_prologue, nonrestartable, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + unsigned int timed, + struct old_timespec32 __user *u_ts)) +{ + return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts, + timed ? sys32_fetch_timeout : NULL); +} + +COBALT_SYSCALL32emu(mq_open, lostage, + (const char __user *u_name, int oflags, + mode_t mode, struct compat_mq_attr __user *u_attr)) +{ + struct mq_attr _attr, *attr = &_attr; + int ret; + + if ((oflags & O_CREAT) && u_attr) { + ret = sys32_get_mqattr(&_attr, u_attr); + if (ret) + return ret; + } else + attr = NULL; + + return __cobalt_mq_open(u_name, oflags, mode, attr); +} + +COBALT_SYSCALL32emu(mq_getattr, current, + (mqd_t uqd, struct compat_mq_attr __user *u_attr)) +{ + struct mq_attr attr; + int ret; + + ret = __cobalt_mq_getattr(uqd, &attr); + if (ret) + return ret; + + return sys32_put_mqattr(u_attr, &attr); +} + +COBALT_SYSCALL32emu(mq_timedsend, primary, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, + const struct old_timespec32 __user *u_ts)) +{ + return __cobalt_mq_timedsend(uqd, u_buf, len, prio, + u_ts, u_ts ? sys32_fetch_timeout : NULL); +} + +COBALT_SYSCALL32emu(mq_timedsend64, primary, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts); +} + +COBALT_SYSCALL32emu(mq_timedreceive, primary, + (mqd_t uqd, void __user *u_buf, + compat_ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct old_timespec32 __user *u_ts)) +{ + compat_ssize_t clen; + ssize_t len; + int ret; + + ret = cobalt_copy_from_user(&clen, u_len, sizeof(*u_len)); + if (ret) + return ret; + + len = clen; + ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio, + u_ts, u_ts ? sys32_fetch_timeout : NULL); + clen = len; + + return ret ?: cobalt_copy_to_user(u_len, &clen, sizeof(*u_len)); +} + +COBALT_SYSCALL32emu(mq_timedreceive64, primary, + (mqd_t uqd, void __user *u_buf, + compat_ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __kernel_timespec __user *u_ts)) +{ + compat_ssize_t clen; + ssize_t len; + int ret; + + ret = cobalt_copy_from_user(&clen, u_len, sizeof(*u_len)); + if (ret) + return ret; + + len = clen; + ret = __cobalt_mq_timedreceive64(uqd, u_buf, &len, u_prio, u_ts); + clen = len; + + return ret ?: cobalt_copy_to_user(u_len, &clen, sizeof(*u_len)); +} + +static inline int mq_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts); +} + +COBALT_SYSCALL32emu(mq_notify, primary, + (mqd_t fd, const struct compat_sigevent *__user u_cev)) +{ + struct sigevent sev; + int ret; + + if (u_cev) { + ret = sys32_get_sigevent(&sev, u_cev); + if (ret) + return ret; + } + + return __cobalt_mq_notify(fd, u_cev ? &sev : NULL); +} + +COBALT_SYSCALL32emu(sched_weightprio, current, + (int policy, + const struct compat_sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret; + + ret = sys32_get_param_ex(policy, ¶m_ex, u_param); + if (ret) + return ret; + + return __cobalt_sched_weightprio(policy, ¶m_ex); +} + +static union sched_config * +sys32_fetch_config(int policy, const void __user *u_config, size_t *len) +{ + union compat_sched_config *cbuf; + union sched_config *buf; + int ret, n; + + if (u_config == NULL) + return ERR_PTR(-EFAULT); + + if (policy == SCHED_QUOTA && *len < sizeof(cbuf->quota)) + return ERR_PTR(-EINVAL); + + cbuf = xnmalloc(*len); + if (cbuf == NULL) + return ERR_PTR(-ENOMEM); + + ret = cobalt_copy_from_user(cbuf, u_config, *len); + if (ret) { + buf = ERR_PTR(ret); + goto out; + } + + switch (policy) { + case SCHED_TP: + *len = sched_tp_confsz(cbuf->tp.nr_windows); + break; + case SCHED_QUOTA: + break; + default: + buf = ERR_PTR(-EINVAL); + goto out; + } + + buf = xnmalloc(*len); + if (buf == NULL) { + buf = ERR_PTR(-ENOMEM); + goto out; + } + + if (policy == SCHED_QUOTA) + memcpy(&buf->quota, &cbuf->quota, sizeof(cbuf->quota)); + else { + buf->tp.op = cbuf->tp.op; + buf->tp.nr_windows = cbuf->tp.nr_windows; + for (n = 0; n < buf->tp.nr_windows; n++) { + buf->tp.windows[n].ptid = cbuf->tp.windows[n].ptid; + buf->tp.windows[n].offset.tv_sec = cbuf->tp.windows[n].offset.tv_sec; + buf->tp.windows[n].offset.tv_nsec = cbuf->tp.windows[n].offset.tv_nsec; + buf->tp.windows[n].duration.tv_sec = cbuf->tp.windows[n].duration.tv_sec; + buf->tp.windows[n].duration.tv_nsec = cbuf->tp.windows[n].duration.tv_nsec; + } + } +out: + xnfree(cbuf); + + return buf; +} + +static int sys32_ack_config(int policy, const union sched_config *config, + void __user *u_config) +{ + union compat_sched_config __user *u_p = u_config; + + if (policy != SCHED_QUOTA) + return 0; + + return u_config == NULL ? -EFAULT : + cobalt_copy_to_user(&u_p->quota.info, &config->quota.info, + sizeof(u_p->quota.info)); +} + +static ssize_t sys32_put_config(int policy, + void __user *u_config, size_t u_len, + const union sched_config *config, size_t len) +{ + union compat_sched_config __user *u_p = u_config; + int n, ret; + + if (u_config == NULL) + return -EFAULT; + + if (policy == SCHED_QUOTA) { + if (u_len < sizeof(u_p->quota)) + return -EINVAL; + return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info, + sizeof(u_p->quota.info)) ?: + sizeof(u_p->quota.info); + } + + /* SCHED_TP */ + + if (u_len < compat_sched_tp_confsz(config->tp.nr_windows)) + return -ENOSPC; + + __xn_put_user(config->tp.op, &u_p->tp.op); + __xn_put_user(config->tp.nr_windows, &u_p->tp.nr_windows); + + for (n = 0, ret = 0; n < config->tp.nr_windows; n++) { + ret |= __xn_put_user(config->tp.windows[n].ptid, + &u_p->tp.windows[n].ptid); + ret |= __xn_put_user(config->tp.windows[n].offset.tv_sec, + &u_p->tp.windows[n].offset.tv_sec); + ret |= __xn_put_user(config->tp.windows[n].offset.tv_nsec, + &u_p->tp.windows[n].offset.tv_nsec); + ret |= __xn_put_user(config->tp.windows[n].duration.tv_sec, + &u_p->tp.windows[n].duration.tv_sec); + ret |= __xn_put_user(config->tp.windows[n].duration.tv_nsec, + &u_p->tp.windows[n].duration.tv_nsec); + } + + return ret ?: u_len; +} + +COBALT_SYSCALL32emu(sched_setconfig_np, conforming, + (int cpu, int policy, + union compat_sched_config __user *u_config, + size_t len)) +{ + return __cobalt_sched_setconfig_np(cpu, policy, u_config, len, + sys32_fetch_config, sys32_ack_config); +} + +COBALT_SYSCALL32emu(sched_getconfig_np, conformin, + (int cpu, int policy, + union compat_sched_config __user *u_config, + size_t len)) +{ + return __cobalt_sched_getconfig_np(cpu, policy, u_config, len, + sys32_fetch_config, sys32_put_config); +} + +COBALT_SYSCALL32emu(sched_setscheduler_ex, conforming, + (compat_pid_t pid, + int policy, + const struct compat_sched_param_ex __user *u_param_ex, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + struct sched_param_ex param_ex; + int ret; + + ret = sys32_get_param_ex(policy, ¶m_ex, u_param_ex); + if (ret) + return ret; + + return cobalt_sched_setscheduler_ex(pid, policy, ¶m_ex, + u_winoff, u_promoted); +} + +COBALT_SYSCALL32emu(sched_getscheduler_ex, current, + (compat_pid_t pid, + int __user *u_policy, + struct compat_sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret, policy; + + ret = cobalt_sched_getscheduler_ex(pid, &policy, ¶m_ex); + if (ret) + return ret; + + ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy)); + + return ret ?: sys32_put_param_ex(policy, u_param, ¶m_ex); +} + +COBALT_SYSCALL32emu(timer_create, current, + (clockid_t clock, + const struct compat_sigevent __user *u_sev, + timer_t __user *u_tm)) +{ + struct sigevent sev, *evp = NULL; + int ret; + + if (u_sev) { + evp = &sev; + ret = sys32_get_sigevent(&sev, u_sev); + if (ret) + return ret; + } + + return __cobalt_timer_create(clock, evp, u_tm); +} + +COBALT_SYSCALL32emu(timer_settime, primary, + (timer_t tm, int flags, + const struct old_itimerspec32 __user *u_newval, + struct old_itimerspec32 __user *u_oldval)) +{ + struct itimerspec64 newv, oldv, *oldvp = &oldv; + int ret; + + if (u_oldval == NULL) + oldvp = NULL; + + ret = sys32_get_itimerspec(&newv, u_newval); + if (ret) + return ret; + + ret = __cobalt_timer_settime(tm, flags, &newv, oldvp); + if (ret) + return ret; + + if (oldvp) { + ret = sys32_put_itimerspec(u_oldval, oldvp); + if (ret) + __cobalt_timer_settime(tm, flags, oldvp, NULL); + } + + return ret; +} + +COBALT_SYSCALL32emu(timer_gettime, current, + (timer_t tm, struct old_itimerspec32 __user *u_val)) +{ + struct itimerspec64 val; + int ret; + + ret = __cobalt_timer_gettime(tm, &val); + + return ret ?: sys32_put_itimerspec(u_val, &val); +} + +COBALT_SYSCALL32emu(timerfd_settime, primary, + (int fd, int flags, + const struct old_itimerspec32 __user *new_value, + struct old_itimerspec32 __user *old_value)) +{ + struct itimerspec64 ovalue, value; + int ret; + + ret = sys32_get_itimerspec(&value, new_value); + if (ret) + return ret; + + ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue); + if (ret) + return ret; + + if (old_value) { + ret = sys32_put_itimerspec(old_value, &ovalue); + value.it_value.tv_sec = 0; + value.it_value.tv_nsec = 0; + __cobalt_timerfd_settime(fd, flags, &value, NULL); + } + + return ret; +} + +COBALT_SYSCALL32emu(timerfd_gettime, current, + (int fd, struct old_itimerspec32 __user *curr_value)) +{ + struct itimerspec64 value; + int ret; + + ret = __cobalt_timerfd_gettime(fd, &value); + + return ret ?: sys32_put_itimerspec(curr_value, &value); +} + +COBALT_SYSCALL32emu(sigwait, primary, + (const compat_sigset_t __user *u_set, + int __user *u_sig)) +{ + sigset_t set; + int ret, sig; + + ret = sys32_get_sigset(&set, u_set); + if (ret) + return ret; + + sig = __cobalt_sigwait(&set); + if (sig < 0) + return sig; + + return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig)); +} + +COBALT_SYSCALL32emu(sigtimedwait, nonrestartable, + (const compat_sigset_t __user *u_set, + struct compat_siginfo __user *u_si, + const struct old_timespec32 __user *u_timeout)) +{ + struct timespec64 timeout; + sigset_t set; + int ret; + + ret = sys32_get_sigset(&set, u_set); + if (ret) + return ret; + + ret = sys32_get_timespec(&timeout, u_timeout); + if (ret) + return ret; + + return __cobalt_sigtimedwait(&set, &timeout, u_si, true); +} + +COBALT_SYSCALL32emu(sigtimedwait64, nonrestartable, + (const compat_sigset_t __user *u_set, + struct compat_siginfo __user *u_si, + const struct __kernel_timespec __user *u_timeout)) +{ + struct timespec64 timeout; + sigset_t set; + int ret; + + ret = sys32_get_sigset(&set, u_set); + if (ret) + return ret; + + ret = cobalt_get_timespec64(&timeout, u_timeout); + if (ret) + return ret; + + return __cobalt_sigtimedwait(&set, &timeout, u_si, true); +} + +COBALT_SYSCALL32emu(sigwaitinfo, nonrestartable, + (const compat_sigset_t __user *u_set, + struct compat_siginfo __user *u_si)) +{ + sigset_t set; + int ret; + + ret = sys32_get_sigset(&set, u_set); + if (ret) + return ret; + + return __cobalt_sigwaitinfo(&set, u_si, true); +} + +COBALT_SYSCALL32emu(sigpending, primary, (compat_old_sigset_t __user *u_set)) +{ + struct cobalt_thread *curr = cobalt_current_thread(); + + return sys32_put_sigset((compat_sigset_t *)u_set, &curr->sigpending); +} + +COBALT_SYSCALL32emu(sigqueue, conforming, + (pid_t pid, int sig, + const union compat_sigval __user *u_value)) +{ + union sigval val; + int ret; + + ret = sys32_get_sigval(&val, u_value); + + return ret ?: __cobalt_sigqueue(pid, sig, &val); +} + +COBALT_SYSCALL32emu(monitor_wait, nonrestartable, + (struct cobalt_monitor_shadow __user *u_mon, + int event, const struct old_timespec32 __user *u_ts, + int __user *u_ret)) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = sys32_get_timespec(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_monitor_wait(u_mon, event, tsp, u_ret); +} + +COBALT_SYSCALL32emu(monitor_wait64, nonrestartable, + (struct cobalt_monitor_shadow __user *u_mon, int event, + const struct __kernel_timespec __user *u_ts, + int __user *u_ret)) +{ + return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret); +} + +COBALT_SYSCALL32emu(event_wait, primary, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct old_timespec32 __user *u_ts)) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = sys32_get_timespec(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp); +} + +COBALT_SYSCALL32emu(event_wait64, primary, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts); +} + +COBALT_SYSCALL32emu(select, primary, + (int nfds, + compat_fd_set __user *u_rfds, + compat_fd_set __user *u_wfds, + compat_fd_set __user *u_xfds, + struct old_timeval32 __user *u_tv)) +{ + return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, true); +} + +COBALT_SYSCALL32emu(recvmsg, handover, + (int fd, struct compat_msghdr __user *umsg, + int flags)) +{ + struct user_msghdr m; + ssize_t ret; + + ret = sys32_get_msghdr(&m, umsg); + if (ret) + return ret; + + ret = rtdm_fd_recvmsg(fd, &m, flags); + if (ret < 0) + return ret; + + return sys32_put_msghdr(umsg, &m) ?: ret; +} + +static int get_timespec32(struct timespec64 *ts, + const void __user *u_ts) +{ + return sys32_get_timespec(ts, u_ts); +} + +static int get_mmsg32(struct mmsghdr *mmsg, void __user *u_mmsg) +{ + return sys32_get_mmsghdr(mmsg, u_mmsg); +} + +static int put_mmsg32(void __user **u_mmsg_p, const struct mmsghdr *mmsg) +{ + struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p, + *q __user = (*p)++; + + return sys32_put_mmsghdr(q, mmsg); +} + +COBALT_SYSCALL32emu(recvmmsg, primary, + (int ufd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, struct old_timespec32 *u_timeout)) +{ + return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout, + get_mmsg32, put_mmsg32, + get_timespec32); +} + +COBALT_SYSCALL32emu(recvmmsg64, primary, + (int ufd, struct compat_mmsghdr __user *u_msgvec, + unsigned int vlen, unsigned int flags, + struct __kernel_timespec *u_timeout)) +{ + return __rtdm_fd_recvmmsg64(ufd, u_msgvec, vlen, flags, u_timeout, + get_mmsg32, put_mmsg32); +} + +COBALT_SYSCALL32emu(sendmsg, handover, + (int fd, struct compat_msghdr __user *umsg, int flags)) +{ + struct user_msghdr m; + int ret; + + ret = sys32_get_msghdr(&m, umsg); + + return ret ?: rtdm_fd_sendmsg(fd, &m, flags); +} + +static int put_mmsglen32(void __user **u_mmsg_p, const struct mmsghdr *mmsg) +{ + struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p, + *q __user = (*p)++; + + return __xn_put_user(mmsg->msg_len, &q->msg_len); +} + +COBALT_SYSCALL32emu(sendmmsg, primary, + (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags)) +{ + return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags, + get_mmsg32, put_mmsglen32); +} + +COBALT_SYSCALL32emu(mmap, lostage, + (int fd, struct compat_rtdm_mmap_request __user *u_crma, + compat_uptr_t __user *u_caddrp)) +{ + struct _rtdm_mmap_request rma; + compat_uptr_t u_caddr; + void *u_addr = NULL; + int ret; + + if (u_crma == NULL || + !access_rok(u_crma, sizeof(*u_crma)) || + __xn_get_user(rma.length, &u_crma->length) || + __xn_get_user(rma.offset, &u_crma->offset) || + __xn_get_user(rma.prot, &u_crma->prot) || + __xn_get_user(rma.flags, &u_crma->flags)) + return -EFAULT; + + ret = rtdm_fd_mmap(fd, &rma, &u_addr); + if (ret) + return ret; + + u_caddr = ptr_to_compat(u_addr); + + return cobalt_copy_to_user(u_caddrp, &u_caddr, sizeof(u_caddr)); +} + +COBALT_SYSCALL32emu(backtrace, current, + (int nr, compat_ulong_t __user *u_backtrace, + int reason)) +{ + compat_ulong_t cbacktrace[SIGSHADOW_BACKTRACE_DEPTH]; + unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH]; + int ret, n; + + if (nr <= 0) + return 0; + + if (nr > SIGSHADOW_BACKTRACE_DEPTH) + nr = SIGSHADOW_BACKTRACE_DEPTH; + + ret = cobalt_copy_from_user(cbacktrace, u_backtrace, + nr * sizeof(compat_ulong_t)); + if (ret) + return ret; + + for (n = 0; n < nr; n++) + backtrace [n] = cbacktrace[n]; + + xndebug_trace_relax(nr, backtrace, reason); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h new file mode 100644 index 0000000..37f58ef --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h @@ -0,0 +1,293 @@ +/* + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SYSCALL32_H +#define _COBALT_POSIX_SYSCALL32_H + +#include <cobalt/kernel/compat.h> + +struct cobalt_mutex_shadow; +struct cobalt_event_shadow; +struct cobalt_cond_shadow; +struct cobalt_sem_shadow; +struct cobalt_monitor_shadow; + +COBALT_SYSCALL32emu_DECL(thread_create, + (compat_ulong_t pth, + int policy, + const struct compat_sched_param_ex __user *u_param_ex, + int xid, + __u32 __user *u_winoff)); + +COBALT_SYSCALL32emu_DECL(thread_setschedparam_ex, + (compat_ulong_t pth, + int policy, + const struct compat_sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)); + +COBALT_SYSCALL32emu_DECL(thread_getschedparam_ex, + (compat_ulong_t pth, + int __user *u_policy, + struct compat_sched_param_ex __user *u_param)); + +COBALT_SYSCALL32emu_DECL(thread_setschedprio, + (compat_ulong_t pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted)); + +COBALT_SYSCALL32emu_DECL(clock_getres, + (clockid_t clock_id, + struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(clock_getres64, + (clockid_t clock_id, + struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(clock_gettime, + (clockid_t clock_id, + struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(clock_gettime64, + (clockid_t clock_id, + struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(clock_settime, + (clockid_t clock_id, + const struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(clock_settime64, + (clockid_t clock_id, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(clock_adjtime, + (clockid_t clock_id, + struct old_timex32 __user *u_tx)); + +COBALT_SYSCALL32emu_DECL(clock_adjtime64, + (clockid_t clock_id, + struct __kernel_timex __user *u_tx)); + +COBALT_SYSCALL32emu_DECL(clock_nanosleep, + (clockid_t clock_id, int flags, + const struct old_timespec32 __user *u_rqt, + struct old_timespec32 __user *u_rmt)); + +COBALT_SYSCALL32emu_DECL(clock_nanosleep64, + (clockid_t clock_id, int flags, + const struct __kernel_timespec __user *u_rqt, + struct __kernel_timespec __user *u_rmt)); + + +COBALT_SYSCALL32emu_DECL(mutex_timedlock, + (struct cobalt_mutex_shadow __user *u_mx, + const struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(mutex_timedlock64, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(cond_wait_prologue, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + unsigned int timed, + struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(mq_open, + (const char __user *u_name, int oflags, + mode_t mode, struct compat_mq_attr __user *u_attr)); + +COBALT_SYSCALL32emu_DECL(mq_getattr, + (mqd_t uqd, struct compat_mq_attr __user *u_attr)); + +COBALT_SYSCALL32emu_DECL(mq_timedsend, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, + const struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(mq_timedsend64, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(mq_timedreceive, + (mqd_t uqd, void __user *u_buf, + compat_ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(mq_timedreceive64, + (mqd_t uqd, void __user *u_buf, + compat_ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(mq_notify, + (mqd_t fd, const struct compat_sigevent *__user u_cev)); + +COBALT_SYSCALL32emu_DECL(sched_weightprio, + (int policy, + const struct compat_sched_param_ex __user *u_param)); + +COBALT_SYSCALL32emu_DECL(sched_setconfig_np, + (int cpu, int policy, + union compat_sched_config __user *u_config, + size_t len)); + +COBALT_SYSCALL32emu_DECL(sched_getconfig_np, + (int cpu, int policy, + union compat_sched_config __user *u_config, + size_t len)); + +COBALT_SYSCALL32emu_DECL(sched_setscheduler_ex, + (compat_pid_t pid, + int policy, + const struct compat_sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)); + +COBALT_SYSCALL32emu_DECL(sched_getscheduler_ex, + (compat_pid_t pid, + int __user *u_policy, + struct compat_sched_param_ex __user *u_param)); + +COBALT_SYSCALL32emu_DECL(timer_create, + (clockid_t clock, + const struct compat_sigevent __user *u_sev, + timer_t __user *u_tm)); + +COBALT_SYSCALL32emu_DECL(timer_settime, + (timer_t tm, int flags, + const struct old_itimerspec32 __user *u_newval, + struct old_itimerspec32 __user *u_oldval)); + +COBALT_SYSCALL32emu_DECL(timer_gettime, + (timer_t tm, + struct old_itimerspec32 __user *u_val)); + +COBALT_SYSCALL32emu_DECL(timerfd_settime, + (int fd, int flags, + const struct old_itimerspec32 __user *new_value, + struct old_itimerspec32 __user *old_value)); + +COBALT_SYSCALL32emu_DECL(timerfd_gettime, + (int fd, struct old_itimerspec32 __user *value)); + +COBALT_SYSCALL32emu_DECL(sigwait, + (const compat_sigset_t __user *u_set, + int __user *u_sig)); + +COBALT_SYSCALL32emu_DECL(sigtimedwait, + (const compat_sigset_t __user *u_set, + struct compat_siginfo __user *u_si, + const struct old_timespec32 __user *u_timeout)); + +COBALT_SYSCALL32emu_DECL(sigtimedwait64, + (const compat_sigset_t __user *u_set, + struct compat_siginfo __user *u_si, + const struct __kernel_timespec __user *u_timeout)); + +COBALT_SYSCALL32emu_DECL(sigwaitinfo, + (const compat_sigset_t __user *u_set, + struct compat_siginfo __user *u_si)); + +COBALT_SYSCALL32emu_DECL(sigpending, + (compat_old_sigset_t __user *u_set)); + +COBALT_SYSCALL32emu_DECL(sigqueue, + (pid_t pid, int sig, + const union compat_sigval __user *u_value)); + +COBALT_SYSCALL32emu_DECL(monitor_wait, + (struct cobalt_monitor_shadow __user *u_mon, + int event, const struct old_timespec32 __user *u_ts, + int __user *u_ret)); + +COBALT_SYSCALL32emu_DECL(monitor_wait64, + (struct cobalt_monitor_shadow __user *u_mon, + int event, + const struct __kernel_timespec __user *u_ts, + int __user *u_ret)); + +COBALT_SYSCALL32emu_DECL(event_wait, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(event_wait64, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(select, + (int nfds, + compat_fd_set __user *u_rfds, + compat_fd_set __user *u_wfds, + compat_fd_set __user *u_xfds, + struct old_timeval32 __user *u_tv)); + +COBALT_SYSCALL32emu_DECL(recvmsg, + (int fd, struct compat_msghdr __user *umsg, + int flags)); + +COBALT_SYSCALL32emu_DECL(recvmmsg, + (int fd, struct compat_mmsghdr __user *u_msgvec, + unsigned int vlen, + unsigned int flags, struct old_timespec32 *u_timeout)); + +COBALT_SYSCALL32emu_DECL(recvmmsg64, + (int fd, struct compat_mmsghdr __user *u_msgvec, + unsigned int vlen, + unsigned int flags, + struct __kernel_timespec *u_timeout)); + +COBALT_SYSCALL32emu_DECL(sendmsg, + (int fd, struct compat_msghdr __user *umsg, + int flags)); + +COBALT_SYSCALL32emu_DECL(sendmmsg, + (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags)); + +COBALT_SYSCALL32emu_DECL(mmap, + (int fd, + struct compat_rtdm_mmap_request __user *u_rma, + compat_uptr_t __user *u_addrp)); + +COBALT_SYSCALL32emu_DECL(backtrace, + (int nr, compat_ulong_t __user *u_backtrace, + int reason)); + +COBALT_SYSCALL32emu_DECL(sem_open, + (compat_uptr_t __user *u_addrp, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value)); + +COBALT_SYSCALL32emu_DECL(sem_timedwait, + (struct cobalt_sem_shadow __user *u_sem, + const struct old_timespec32 __user *u_ts)); + +COBALT_SYSCALL32emu_DECL(sem_timedwait64, + (struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts)); + +#endif /* !_COBALT_POSIX_SYSCALL32_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c new file mode 100644 index 0000000..94a6e39 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c @@ -0,0 +1,954 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/types.h> +#include <linux/cred.h> +#include <linux/jhash.h> +#include <linux/signal.h> +#include <linux/jiffies.h> +#include <linux/err.h> +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "signal.h" +#include "timer.h" +#include "clock.h" +#include "sem.h" +#define CREATE_TRACE_POINTS +#include <trace/events/cobalt-posix.h> + +xnticks_t cobalt_time_slice = CONFIG_XENO_OPT_RR_QUANTUM * 1000; + +#define PTHREAD_HSLOTS (1 << 8) /* Must be a power of 2 */ + +/* Process-local index, pthread_t x mm_struct (cobalt_local_hkey). */ +struct local_thread_hash { + pid_t pid; + struct cobalt_thread *thread; + struct cobalt_local_hkey hkey; + struct local_thread_hash *next; +}; + +/* System-wide index on task_pid_nr(). */ +struct global_thread_hash { + pid_t pid; + struct cobalt_thread *thread; + struct global_thread_hash *next; +}; + +static struct local_thread_hash *local_index[PTHREAD_HSLOTS]; + +static struct global_thread_hash *global_index[PTHREAD_HSLOTS]; + +static inline struct local_thread_hash * +thread_hash(const struct cobalt_local_hkey *hkey, + struct cobalt_thread *thread, pid_t pid) +{ + struct global_thread_hash **ghead, *gslot; + struct local_thread_hash **lhead, *lslot; + u32 hash; + void *p; + spl_t s; + + p = xnmalloc(sizeof(*lslot) + sizeof(*gslot)); + if (p == NULL) + return NULL; + + lslot = p; + lslot->hkey = *hkey; + lslot->thread = thread; + lslot->pid = pid; + hash = jhash2((u32 *)&lslot->hkey, + sizeof(lslot->hkey) / sizeof(u32), 0); + lhead = &local_index[hash & (PTHREAD_HSLOTS - 1)]; + + gslot = p + sizeof(*lslot); + gslot->pid = pid; + gslot->thread = thread; + hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0); + ghead = &global_index[hash & (PTHREAD_HSLOTS - 1)]; + + xnlock_get_irqsave(&nklock, s); + lslot->next = *lhead; + *lhead = lslot; + gslot->next = *ghead; + *ghead = gslot; + xnlock_put_irqrestore(&nklock, s); + + return lslot; +} + +static inline void thread_unhash(const struct cobalt_local_hkey *hkey) +{ + struct global_thread_hash **gtail, *gslot; + struct local_thread_hash **ltail, *lslot; + pid_t pid; + u32 hash; + spl_t s; + + hash = jhash2((u32 *) hkey, sizeof(*hkey) / sizeof(u32), 0); + ltail = &local_index[hash & (PTHREAD_HSLOTS - 1)]; + + xnlock_get_irqsave(&nklock, s); + + lslot = *ltail; + while (lslot && + (lslot->hkey.u_pth != hkey->u_pth || + lslot->hkey.mm != hkey->mm)) { + ltail = &lslot->next; + lslot = *ltail; + } + + if (lslot == NULL) { + xnlock_put_irqrestore(&nklock, s); + return; + } + + *ltail = lslot->next; + pid = lslot->pid; + hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0); + gtail = &global_index[hash & (PTHREAD_HSLOTS - 1)]; + gslot = *gtail; + while (gslot && gslot->pid != pid) { + gtail = &gslot->next; + gslot = *gtail; + } + /* gslot must be found here. */ + XENO_BUG_ON(COBALT, !(gslot && gtail)); + *gtail = gslot->next; + + xnlock_put_irqrestore(&nklock, s); + + xnfree(lslot); +} + +static struct cobalt_thread * +thread_lookup(const struct cobalt_local_hkey *hkey) +{ + struct local_thread_hash *lslot; + struct cobalt_thread *thread; + u32 hash; + spl_t s; + + hash = jhash2((u32 *)hkey, sizeof(*hkey) / sizeof(u32), 0); + lslot = local_index[hash & (PTHREAD_HSLOTS - 1)]; + + xnlock_get_irqsave(&nklock, s); + + while (lslot != NULL && + (lslot->hkey.u_pth != hkey->u_pth || lslot->hkey.mm != hkey->mm)) + lslot = lslot->next; + + thread = lslot ? lslot->thread : NULL; + + xnlock_put_irqrestore(&nklock, s); + + return thread; +} + +struct cobalt_thread *cobalt_thread_find(pid_t pid) /* nklocked, IRQs off */ +{ + struct global_thread_hash *gslot; + u32 hash; + + hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0); + + gslot = global_index[hash & (PTHREAD_HSLOTS - 1)]; + while (gslot && gslot->pid != pid) + gslot = gslot->next; + + return gslot ? gslot->thread : NULL; +} +EXPORT_SYMBOL_GPL(cobalt_thread_find); + +struct cobalt_thread *cobalt_thread_find_local(pid_t pid) /* nklocked, IRQs off */ +{ + struct cobalt_thread *thread; + + thread = cobalt_thread_find(pid); + if (thread == NULL || thread->hkey.mm != current->mm) + return NULL; + + return thread; +} +EXPORT_SYMBOL_GPL(cobalt_thread_find_local); + +struct cobalt_thread *cobalt_thread_lookup(unsigned long pth) /* nklocked, IRQs off */ +{ + struct cobalt_local_hkey hkey; + + hkey.u_pth = pth; + hkey.mm = current->mm; + return thread_lookup(&hkey); +} +EXPORT_SYMBOL_GPL(cobalt_thread_lookup); + +void cobalt_thread_map(struct xnthread *curr) +{ + struct cobalt_thread *thread; + + thread = container_of(curr, struct cobalt_thread, threadbase); + thread->process = cobalt_current_process(); + XENO_BUG_ON(COBALT, thread->process == NULL); +} + +struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr) +{ + struct cobalt_thread *thread; + spl_t s; + + thread = container_of(curr, struct cobalt_thread, threadbase); + /* + * Unhash first, to prevent further access to the TCB from + * userland. + */ + thread_unhash(&thread->hkey); + xnlock_get_irqsave(&nklock, s); + cobalt_mark_deleted(thread); + list_del(&thread->next); + xnlock_put_irqrestore(&nklock, s); + cobalt_signal_flush(thread); + xnsynch_destroy(&thread->monitor_synch); + xnsynch_destroy(&thread->sigwait); + + return NULL; +} + +struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie) +{ + struct cobalt_thread *thread; + + thread = container_of(zombie, struct cobalt_thread, threadbase); + xnfree(thread); + + return NULL; +} + +int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy, + const struct sched_param_ex *param_ex) +{ + struct xnsched_class *sched_class; + union xnsched_policy_param param; + xnticks_t tslice; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC, + struct cobalt_thread)) { + ret = -ESRCH; + goto out; + } + + tslice = thread->threadbase.rrperiod; + sched_class = cobalt_sched_policy_param(¶m, policy, + param_ex, &tslice); + if (sched_class == NULL) { + ret = -EINVAL; + goto out; + } + xnthread_set_slice(&thread->threadbase, tslice); + if (cobalt_call_extension(thread_setsched, &thread->extref, ret, + sched_class, ¶m) && ret) + goto out; + ret = xnthread_set_schedparam(&thread->threadbase, + sched_class, ¶m); + xnsched_run(); +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread, + int *policy_r, + struct sched_param_ex *param_ex) +{ + struct xnsched_class *base_class; + struct xnthread *base_thread; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC, + struct cobalt_thread)) { + xnlock_put_irqrestore(&nklock, s); + return -ESRCH; + } + + base_thread = &thread->threadbase; + base_class = base_thread->base_class; + *policy_r = base_class->policy; + + param_ex->sched_priority = xnthread_base_priority(base_thread); + if (param_ex->sched_priority == 0) /* SCHED_FIFO/SCHED_WEAK */ + *policy_r = SCHED_NORMAL; + + if (base_class == &xnsched_class_rt) { + if (xnthread_test_state(base_thread, XNRRB)) { + u_ns2ts(¶m_ex->sched_rr_quantum, base_thread->rrperiod); + *policy_r = SCHED_RR; + } + goto out; + } + +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + if (base_class == &xnsched_class_weak) { + if (*policy_r != SCHED_WEAK) + param_ex->sched_priority = -param_ex->sched_priority; + goto out; + } +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + if (base_class == &xnsched_class_sporadic) { + param_ex->sched_ss_low_priority = base_thread->pss->param.low_prio; + u_ns2ts(¶m_ex->sched_ss_repl_period, base_thread->pss->param.repl_period); + u_ns2ts(¶m_ex->sched_ss_init_budget, base_thread->pss->param.init_budget); + param_ex->sched_ss_max_repl = base_thread->pss->param.max_repl; + goto out; + } +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + if (base_class == &xnsched_class_tp) { + param_ex->sched_tp_partition = + base_thread->tps - base_thread->sched->tp.partitions; + goto out; + } +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + if (base_class == &xnsched_class_quota) { + param_ex->sched_quota_group = base_thread->quota->tgid; + goto out; + } +#endif + +out: + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static int pthread_create(struct cobalt_thread **thread_p, + int policy, + const struct sched_param_ex *param_ex, + struct task_struct *task) +{ + struct cobalt_process *process = cobalt_current_process(); + struct xnsched_class *sched_class; + union xnsched_policy_param param; + struct xnthread_init_attr iattr; + struct cobalt_thread *thread; + xnticks_t tslice; + int ret, n; + spl_t s; + + thread = xnmalloc(sizeof(*thread)); + if (thread == NULL) + return -EAGAIN; + + tslice = cobalt_time_slice; + sched_class = cobalt_sched_policy_param(¶m, policy, + param_ex, &tslice); + if (sched_class == NULL) { + xnfree(thread); + return -EINVAL; + } + + iattr.name = task->comm; + iattr.flags = XNUSER|XNFPU; + iattr.personality = &cobalt_personality; + iattr.affinity = CPU_MASK_ALL; + ret = xnthread_init(&thread->threadbase, &iattr, sched_class, ¶m); + if (ret) { + xnfree(thread); + return ret; + } + + thread->magic = COBALT_THREAD_MAGIC; + xnsynch_init(&thread->monitor_synch, XNSYNCH_FIFO, NULL); + + xnsynch_init(&thread->sigwait, XNSYNCH_FIFO, NULL); + sigemptyset(&thread->sigpending); + for (n = 0; n < _NSIG; n++) + INIT_LIST_HEAD(thread->sigqueues + n); + + xnthread_set_slice(&thread->threadbase, tslice); + cobalt_set_extref(&thread->extref, NULL, NULL); + + /* + * We need an anonymous registry entry to obtain a handle for + * fast mutex locking. + */ + ret = xnthread_register(&thread->threadbase, ""); + if (ret) { + xnsynch_destroy(&thread->monitor_synch); + xnsynch_destroy(&thread->sigwait); + __xnthread_discard(&thread->threadbase); + xnfree(thread); + return ret; + } + + xnlock_get_irqsave(&nklock, s); + list_add_tail(&thread->next, process ? &process->thread_list + : &cobalt_global_thread_list); + xnlock_put_irqrestore(&nklock, s); + + thread->hkey.u_pth = 0; + thread->hkey.mm = NULL; + + *thread_p = thread; + + return 0; +} + +static void pthread_discard(struct cobalt_thread *thread) +{ + spl_t s; + + xnsynch_destroy(&thread->monitor_synch); + xnsynch_destroy(&thread->sigwait); + + xnlock_get_irqsave(&nklock, s); + list_del(&thread->next); + xnlock_put_irqrestore(&nklock, s); + __xnthread_discard(&thread->threadbase); + xnfree(thread); +} + +static inline int pthread_setmode_np(int clrmask, int setmask, int *mode_r) +{ + const int valid_flags = XNLOCK|XNWARN|XNTRAPLB; + int old; + + /* + * The conforming mode bit is actually zero, since jumping to + * this code entailed switching to primary mode already. + */ + if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0) + return -EINVAL; + + old = xnthread_set_mode(clrmask, setmask); + if (mode_r) + *mode_r = old; + + if ((clrmask & ~setmask) & XNLOCK) + /* Reschedule if the scheduler has been unlocked. */ + xnsched_run(); + + return 0; +} + +static struct cobalt_thread *thread_lookup_or_shadow(unsigned long pth, + __u32 __user *u_winoff, + int *promoted_r) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + + *promoted_r = 0; + + hkey.u_pth = pth; + hkey.mm = current->mm; + + thread = thread_lookup(&hkey); + if (thread == NULL) { + if (u_winoff == NULL) + return ERR_PTR(-ESRCH); + + thread = cobalt_thread_shadow(&hkey, u_winoff); + if (!IS_ERR(thread)) + *promoted_r = 1; + } + + return thread; +} + +int cobalt_thread_setschedparam_ex(unsigned long pth, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted) +{ + struct cobalt_thread *thread; + int ret, promoted; + + trace_cobalt_pthread_setschedparam(pth, policy, param_ex); + + thread = thread_lookup_or_shadow(pth, u_winoff, &promoted); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex); + if (ret) + return ret; + + return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted)); +} + +COBALT_SYSCALL(thread_setschedparam_ex, conforming, + (unsigned long pth, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + struct sched_param_ex param_ex; + + if (cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex))) + return -EFAULT; + + return cobalt_thread_setschedparam_ex(pth, policy, ¶m_ex, + u_winoff, u_promoted); +} + +int cobalt_thread_getschedparam_ex(unsigned long pth, + int *policy_r, + struct sched_param_ex *param_ex) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + int ret; + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + if (thread == NULL) + return -ESRCH; + + ret = __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex); + if (ret) + return ret; + + trace_cobalt_pthread_getschedparam(pth, *policy_r, param_ex); + + return 0; +} + +COBALT_SYSCALL(thread_getschedparam_ex, current, + (unsigned long pth, + int __user *u_policy, + struct sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret, policy; + + ret = cobalt_thread_getschedparam_ex(pth, &policy, ¶m_ex); + if (ret) + return ret; + + ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy)); + if (ret) + return ret; + + return cobalt_copy_to_user(u_param, ¶m_ex, sizeof(param_ex)); +} + +int cobalt_thread_setschedprio(unsigned long pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted) +{ + struct sched_param_ex param_ex; + struct cobalt_thread *thread; + int ret, policy, promoted; + + trace_cobalt_pthread_setschedprio(pth, prio); + + thread = thread_lookup_or_shadow(pth, u_winoff, &promoted); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + ret = __cobalt_thread_getschedparam_ex(thread, &policy, ¶m_ex); + if (ret) + return ret; + + param_ex.sched_priority = prio; + + ret = __cobalt_thread_setschedparam_ex(thread, policy, ¶m_ex); + if (ret) + return ret; + + return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted)); +} + +COBALT_SYSCALL(thread_setschedprio, conforming, + (unsigned long pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted); +} + +int __cobalt_thread_create(unsigned long pth, int policy, + struct sched_param_ex *param_ex, + int xid, __u32 __user *u_winoff) +{ + struct cobalt_thread *thread = NULL; + struct task_struct *p = current; + struct cobalt_local_hkey hkey; + int ret; + + trace_cobalt_pthread_create(pth, policy, param_ex); + + /* + * We have been passed the pthread_t identifier the user-space + * Cobalt library has assigned to our caller; we'll index our + * internal pthread_t descriptor in kernel space on it. + */ + hkey.u_pth = pth; + hkey.mm = p->mm; + + ret = pthread_create(&thread, policy, param_ex, p); + if (ret) + return ret; + + ret = cobalt_map_user(&thread->threadbase, u_winoff); + if (ret) { + pthread_discard(thread); + return ret; + } + + if (!thread_hash(&hkey, thread, task_pid_vnr(p))) { + ret = -EAGAIN; + goto fail; + } + + thread->hkey = hkey; + + if (xid > 0 && cobalt_push_personality(xid) == NULL) { + ret = -EINVAL; + goto fail; + } + + return xnthread_harden(); +fail: + xnthread_cancel(&thread->threadbase); + + return ret; +} + +COBALT_SYSCALL(thread_create, init, + (unsigned long pth, int policy, + struct sched_param_ex __user *u_param, + int xid, + __u32 __user *u_winoff)) +{ + struct sched_param_ex param_ex; + int ret; + + ret = cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex)); + if (ret) + return ret; + + return __cobalt_thread_create(pth, policy, ¶m_ex, xid, u_winoff); +} + +struct cobalt_thread * +cobalt_thread_shadow(struct cobalt_local_hkey *hkey, + __u32 __user *u_winoff) +{ + struct cobalt_thread *thread = NULL; + struct sched_param_ex param_ex; + int ret; + + if (xnthread_current()) + return ERR_PTR(-EBUSY); + + param_ex.sched_priority = 0; + trace_cobalt_pthread_create(hkey->u_pth, SCHED_NORMAL, ¶m_ex); + ret = pthread_create(&thread, SCHED_NORMAL, ¶m_ex, current); + if (ret) + return ERR_PTR(ret); + + ret = cobalt_map_user(&thread->threadbase, u_winoff); + if (ret) { + pthread_discard(thread); + return ERR_PTR(ret); + } + + if (!thread_hash(hkey, thread, task_pid_vnr(current))) { + ret = -EAGAIN; + goto fail; + } + + thread->hkey = *hkey; + + xnthread_harden(); + + return thread; +fail: + xnthread_cancel(&thread->threadbase); + + return ERR_PTR(ret); +} + +COBALT_SYSCALL(thread_setmode, primary, + (int clrmask, int setmask, int __user *u_mode_r)) +{ + int ret, old; + + trace_cobalt_pthread_setmode(clrmask, setmask); + + ret = pthread_setmode_np(clrmask, setmask, &old); + if (ret) + return ret; + + if (u_mode_r && cobalt_copy_to_user(u_mode_r, &old, sizeof(old))) + return -EFAULT; + + return 0; +} + +COBALT_SYSCALL(thread_setname, current, + (unsigned long pth, const char __user *u_name)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + char name[XNOBJECT_NAME_LEN]; + struct task_struct *p; + spl_t s; + + if (cobalt_strncpy_from_user(name, u_name, + sizeof(name) - 1) < 0) + return -EFAULT; + + name[sizeof(name) - 1] = '\0'; + hkey.u_pth = pth; + hkey.mm = current->mm; + + trace_cobalt_pthread_setname(pth, name); + + xnlock_get_irqsave(&nklock, s); + + thread = thread_lookup(&hkey); + if (thread == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -ESRCH; + } + + ksformat(thread->threadbase.name, + XNOBJECT_NAME_LEN - 1, "%s", name); + p = xnthread_host_task(&thread->threadbase); + get_task_struct(p); + + xnlock_put_irqrestore(&nklock, s); + + knamecpy(p->comm, name); + put_task_struct(p); + + return 0; +} + +COBALT_SYSCALL(thread_kill, conforming, + (unsigned long pth, int sig)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + int ret; + spl_t s; + + trace_cobalt_pthread_kill(pth, sig); + + xnlock_get_irqsave(&nklock, s); + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + if (thread == NULL) + ret = -ESRCH; + else + ret = __cobalt_kill(thread, sig, 0); + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(thread_join, primary, (unsigned long pth)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + spl_t s; + + trace_cobalt_pthread_join(pth); + + xnlock_get_irqsave(&nklock, s); + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + + xnlock_put_irqrestore(&nklock, s); + + if (thread == NULL) + return -ESRCH; + + return xnthread_join(&thread->threadbase, false); +} + +COBALT_SYSCALL(thread_getpid, current, (unsigned long pth)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + pid_t pid; + spl_t s; + + trace_cobalt_pthread_pid(pth); + + xnlock_get_irqsave(&nklock, s); + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + if (thread == NULL) + pid = -ESRCH; + else + pid = xnthread_host_pid(&thread->threadbase); + + xnlock_put_irqrestore(&nklock, s); + + return pid; +} + +COBALT_SYSCALL(thread_getstat, current, + (pid_t pid, struct cobalt_threadstat __user *u_stat)) +{ + struct cobalt_threadstat stat; + struct cobalt_thread *p; + struct xnthread *thread; + xnticks_t xtime; + spl_t s; + + trace_cobalt_pthread_stat(pid); + + if (pid == 0) { + thread = xnthread_current(); + if (thread == NULL) + return -EPERM; + xnlock_get_irqsave(&nklock, s); + } else { + xnlock_get_irqsave(&nklock, s); + p = cobalt_thread_find(pid); + if (p == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -ESRCH; + } + thread = &p->threadbase; + } + + /* We have to hold the nklock to keep most values consistent. */ + stat.cpu = xnsched_cpu(thread->sched); + stat.cprio = xnthread_current_priority(thread); + xtime = xnstat_exectime_get_total(&thread->stat.account); + if (thread->sched->curr == thread) + xtime += xnstat_exectime_now() - + xnstat_exectime_get_last_switch(thread->sched); + stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime); + stat.msw = xnstat_counter_get(&thread->stat.ssw); + stat.csw = xnstat_counter_get(&thread->stat.csw); + stat.xsc = xnstat_counter_get(&thread->stat.xsc); + stat.pf = xnstat_counter_get(&thread->stat.pf); + stat.status = xnthread_get_state(thread); + if (thread->lock_count > 0) + stat.status |= XNLOCK; + stat.timeout = xnthread_get_timeout(thread, + xnclock_read_monotonic(&nkclock)); + strcpy(stat.name, thread->name); + strcpy(stat.personality, thread->personality->name); + xnlock_put_irqrestore(&nklock, s); + + return cobalt_copy_to_user(u_stat, &stat, sizeof(stat)); +} + +#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION + +int cobalt_thread_extend(struct cobalt_extension *ext, + void *priv) +{ + struct cobalt_thread *thread = cobalt_current_thread(); + struct xnthread_personality *prev; + + trace_cobalt_pthread_extend(thread->hkey.u_pth, ext->core.name); + + prev = cobalt_push_personality(ext->core.xid); + if (prev == NULL) + return -EINVAL; + + cobalt_set_extref(&thread->extref, ext, priv); + + return 0; +} +EXPORT_SYMBOL_GPL(cobalt_thread_extend); + +void cobalt_thread_restrict(void) +{ + struct cobalt_thread *thread = cobalt_current_thread(); + + trace_cobalt_pthread_restrict(thread->hkey.u_pth, + thread->threadbase.personality->name); + cobalt_pop_personality(&cobalt_personality); + cobalt_set_extref(&thread->extref, NULL, NULL); +} +EXPORT_SYMBOL_GPL(cobalt_thread_restrict); + +#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +const char *cobalt_trace_parse_sched_params(struct trace_seq *p, int policy, + struct sched_param_ex *params) +{ + const char *ret = trace_seq_buffer_ptr(p); + + switch (policy) { + case SCHED_QUOTA: + trace_seq_printf(p, "priority=%d, group=%d", + params->sched_priority, + params->sched_quota_group); + break; + case SCHED_TP: + trace_seq_printf(p, "priority=%d, partition=%d", + params->sched_priority, + params->sched_tp_partition); + break; + case SCHED_NORMAL: + break; + case SCHED_SPORADIC: + trace_seq_printf(p, "priority=%d, low_priority=%d, " + "budget=(%ld.%09ld), period=(%ld.%09ld), " + "maxrepl=%d", + params->sched_priority, + params->sched_ss_low_priority, + params->sched_ss_init_budget.tv_sec, + params->sched_ss_init_budget.tv_nsec, + params->sched_ss_repl_period.tv_sec, + params->sched_ss_repl_period.tv_nsec, + params->sched_ss_max_repl); + break; + case SCHED_RR: + case SCHED_FIFO: + case SCHED_COBALT: + case SCHED_WEAK: + default: + trace_seq_printf(p, "priority=%d", params->sched_priority); + break; + } + trace_seq_putc(p, '\0'); + + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h new file mode 100644 index 0000000..0959ff6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h @@ -0,0 +1,228 @@ +/* + * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_THREAD_H +#define _COBALT_POSIX_THREAD_H + +#include <linux/stdarg.h> +#include <linux/types.h> +#include <linux/time.h> +#include <linux/signal.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/uapi/thread.h> +#include <cobalt/uapi/sched.h> +/* CAUTION: rtdm/cobalt.h reads this header. */ +#include <xenomai/posix/syscall.h> +#include <xenomai/posix/extension.h> + +#define PTHREAD_PROCESS_PRIVATE 0 +#define PTHREAD_PROCESS_SHARED 1 + +#define PTHREAD_CREATE_JOINABLE 0 +#define PTHREAD_CREATE_DETACHED 1 + +#define PTHREAD_INHERIT_SCHED 0 +#define PTHREAD_EXPLICIT_SCHED 1 + +#define PTHREAD_MUTEX_NORMAL 0 +#define PTHREAD_MUTEX_RECURSIVE 1 +#define PTHREAD_MUTEX_ERRORCHECK 2 +#define PTHREAD_MUTEX_DEFAULT 0 + +struct cobalt_thread; +struct cobalt_threadstat; + +/* + * pthread_mutexattr_t and pthread_condattr_t fit on 32 bits, for + * compatibility with libc. + */ + +/* The following definitions are copied from linuxthread pthreadtypes.h. */ +struct _pthread_fastlock { + long int __status; + int __spinlock; +}; + +typedef struct { + struct _pthread_fastlock __c_lock; + long __c_waiting; + char __padding[48 - sizeof (struct _pthread_fastlock) + - sizeof (long) - sizeof (long long)]; + long long __align; +} pthread_cond_t; + +enum { + PTHREAD_PRIO_NONE, + PTHREAD_PRIO_INHERIT, + PTHREAD_PRIO_PROTECT +}; + +typedef struct { + int __m_reserved; + int __m_count; + long __m_owner; + int __m_kind; + struct _pthread_fastlock __m_lock; +} pthread_mutex_t; + +struct cobalt_local_hkey { + /** pthread_t from userland. */ + unsigned long u_pth; + /** kernel mm context. */ + struct mm_struct *mm; +}; + +struct cobalt_thread { + unsigned int magic; + struct xnthread threadbase; + struct cobalt_extref extref; + struct cobalt_process *process; + struct list_head next; /* in global/process thread_list */ + + /** Signal management. */ + sigset_t sigpending; + struct list_head sigqueues[_NSIG]; /* in cobalt_sigpending */ + struct xnsynch sigwait; + struct list_head signext; + + /** Monitor wait object and link holder. */ + struct xnsynch monitor_synch; + struct list_head monitor_link; + + struct cobalt_local_hkey hkey; +}; + +struct cobalt_sigwait_context { + struct xnthread_wait_context wc; + sigset_t *set; + struct siginfo *si; +}; + +static inline struct cobalt_thread *cobalt_current_thread(void) +{ + struct xnthread *curr = xnthread_current(); + return curr ? container_of(curr, struct cobalt_thread, threadbase) : NULL; +} + +int __cobalt_thread_create(unsigned long pth, int policy, + struct sched_param_ex __user *u_param, + int xid, __u32 __user *u_winoff); + +int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy, + const struct sched_param_ex *param_ex); + +int cobalt_thread_setschedparam_ex(unsigned long pth, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted); + +int cobalt_thread_getschedparam_ex(unsigned long pth, + int *policy_r, + struct sched_param_ex *param_ex); + +int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread, + int *policy_r, + struct sched_param_ex *param_ex); + +int cobalt_thread_setschedprio(unsigned long pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted); + +struct cobalt_thread *cobalt_thread_find(pid_t pid); + +struct cobalt_thread *cobalt_thread_find_local(pid_t pid); + +struct cobalt_thread *cobalt_thread_lookup(unsigned long pth); + +COBALT_SYSCALL_DECL(thread_create, + (unsigned long pth, int policy, + struct sched_param_ex __user *u_param, + int xid, __u32 __user *u_winoff)); + +struct cobalt_thread * +cobalt_thread_shadow(struct cobalt_local_hkey *lhkey, + __u32 __user *u_winoff); + +COBALT_SYSCALL_DECL(thread_setmode, + (int clrmask, int setmask, int __user *u_mode_r)); + +COBALT_SYSCALL_DECL(thread_setname, + (unsigned long pth, const char __user *u_name)); + +COBALT_SYSCALL_DECL(thread_kill, (unsigned long pth, int sig)); + +COBALT_SYSCALL_DECL(thread_join, (unsigned long pth)); + +COBALT_SYSCALL_DECL(thread_getpid, (unsigned long pth)); + +COBALT_SYSCALL_DECL(thread_getstat, + (pid_t pid, struct cobalt_threadstat __user *u_stat)); + +COBALT_SYSCALL_DECL(thread_setschedparam_ex, + (unsigned long pth, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)); + +COBALT_SYSCALL_DECL(thread_getschedparam_ex, + (unsigned long pth, + int __user *u_policy, + struct sched_param_ex __user *u_param)); + +COBALT_SYSCALL_DECL(thread_setschedprio, + (unsigned long pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted)); + +void cobalt_thread_map(struct xnthread *curr); + +struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr); + +struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie); + +#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION + +int cobalt_thread_extend(struct cobalt_extension *ext, + void *priv); + +void cobalt_thread_restrict(void); + +static inline +int cobalt_thread_extended_p(const struct cobalt_thread *thread, + const struct cobalt_extension *ext) +{ + return thread->extref.extension == ext; +} + +#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +static inline +int cobalt_thread_extended_p(const struct cobalt_thread *thread, + const struct cobalt_extension *ext) +{ + return 0; +} + +#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +extern xnticks_t cobalt_time_slice; + +#endif /* !_COBALT_POSIX_THREAD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c new file mode 100644 index 0000000..a58ea99 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c @@ -0,0 +1,588 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/cred.h> +#include <linux/err.h> +#include "internal.h" +#include "thread.h" +#include "timer.h" +#include "clock.h" +#include "signal.h" + +void cobalt_timer_handler(struct xntimer *xntimer) +{ + struct cobalt_timer *timer; + /* + * Deliver the timer notification via a signal (unless + * SIGEV_NONE was given). If we can't do this because the + * target thread disappeared, then stop the timer. It will go + * away when timer_delete() is called, or the owner's process + * exits, whichever comes first. + */ + timer = container_of(xntimer, struct cobalt_timer, timerbase); + if (timer->sigp.si.si_signo && + cobalt_signal_send_pid(timer->target, &timer->sigp) == -ESRCH) + xntimer_stop(&timer->timerbase); +} +EXPORT_SYMBOL_GPL(cobalt_timer_handler); + +static inline struct cobalt_thread * +timer_init(struct cobalt_timer *timer, + const struct sigevent *__restrict__ evp) /* nklocked, IRQs off. */ +{ + struct cobalt_thread *owner = cobalt_current_thread(), *target = NULL; + struct xnclock *clock; + + /* + * First, try to offload this operation to the extended + * personality the current thread might originate from. + */ + if (cobalt_initcall_extension(timer_init, &timer->extref, + owner, target, evp) && target) + return target; + + /* + * Ok, we have no extension available, or we do but it does + * not want to overload the standard behavior: handle this + * timer the pure Cobalt way then. + */ + if (evp == NULL || evp->sigev_notify == SIGEV_NONE) { + target = owner; /* Assume SIGEV_THREAD_ID. */ + goto init; + } + + if (evp->sigev_notify != SIGEV_THREAD_ID) + return ERR_PTR(-EINVAL); + + /* + * Recipient thread must be a Xenomai shadow in user-space, + * living in the same process than our caller. + */ + target = cobalt_thread_find_local(evp->sigev_notify_thread_id); + if (target == NULL) + return ERR_PTR(-EINVAL); +init: + clock = cobalt_clock_find(timer->clockid); + if (IS_ERR(clock)) + return ERR_PTR(PTR_ERR(clock)); + + xntimer_init(&timer->timerbase, clock, cobalt_timer_handler, + target->threadbase.sched, XNTIMER_UGRAVITY); + + return target; +} + +static inline int timer_alloc_id(struct cobalt_process *cc) +{ + int id; + + id = find_first_bit(cc->timers_map, CONFIG_XENO_OPT_NRTIMERS); + if (id == CONFIG_XENO_OPT_NRTIMERS) + return -EAGAIN; + + __clear_bit(id, cc->timers_map); + + return id; +} + +static inline void timer_free_id(struct cobalt_process *cc, int id) +{ + __set_bit(id, cc->timers_map); +} + +struct cobalt_timer * +cobalt_timer_by_id(struct cobalt_process *cc, timer_t timer_id) +{ + if (timer_id < 0 || timer_id >= CONFIG_XENO_OPT_NRTIMERS) + return NULL; + + if (test_bit(timer_id, cc->timers_map)) + return NULL; + + return cc->timers[timer_id]; +} + +static inline int timer_create(clockid_t clockid, + const struct sigevent *__restrict__ evp, + timer_t * __restrict__ timerid) +{ + struct cobalt_process *cc; + struct cobalt_thread *target; + struct cobalt_timer *timer; + int signo, ret = -EINVAL; + timer_t timer_id; + spl_t s; + + cc = cobalt_current_process(); + if (cc == NULL) + return -EPERM; + + timer = xnmalloc(sizeof(*timer)); + if (timer == NULL) + return -ENOMEM; + + timer->sigp.si.si_errno = 0; + timer->sigp.si.si_code = SI_TIMER; + timer->sigp.si.si_overrun = 0; + INIT_LIST_HEAD(&timer->sigp.next); + timer->clockid = clockid; + timer->overruns = 0; + + xnlock_get_irqsave(&nklock, s); + + ret = timer_alloc_id(cc); + if (ret < 0) + goto out; + + timer_id = ret; + + if (evp == NULL) { + timer->sigp.si.si_int = timer_id; + signo = SIGALRM; + } else { + if (evp->sigev_notify == SIGEV_NONE) + signo = 0; /* Don't notify. */ + else { + signo = evp->sigev_signo; + if (signo < 1 || signo > _NSIG) { + ret = -EINVAL; + goto fail; + } + timer->sigp.si.si_value = evp->sigev_value; + } + } + + timer->sigp.si.si_signo = signo; + timer->sigp.si.si_tid = timer_id; + timer->id = timer_id; + + target = timer_init(timer, evp); + if (target == NULL) { + ret = -EPERM; + goto fail; + } + + if (IS_ERR(target)) { + ret = PTR_ERR(target); + goto fail; + } + + timer->target = xnthread_host_pid(&target->threadbase); + cc->timers[timer_id] = timer; + + xnlock_put_irqrestore(&nklock, s); + + *timerid = timer_id; + + return 0; +fail: + timer_free_id(cc, timer_id); +out: + xnlock_put_irqrestore(&nklock, s); + + xnfree(timer); + + return ret; +} + +static void timer_cleanup(struct cobalt_process *p, struct cobalt_timer *timer) +{ + xntimer_destroy(&timer->timerbase); + + if (!list_empty(&timer->sigp.next)) + list_del(&timer->sigp.next); + + timer_free_id(p, cobalt_timer_id(timer)); + p->timers[cobalt_timer_id(timer)] = NULL; +} + +static inline int +timer_delete(timer_t timerid) +{ + struct cobalt_process *cc; + struct cobalt_timer *timer; + int ret = 0; + spl_t s; + + cc = cobalt_current_process(); + if (cc == NULL) + return -EPERM; + + xnlock_get_irqsave(&nklock, s); + + timer = cobalt_timer_by_id(cc, timerid); + if (timer == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + /* + * If an extension runs and actually handles the deletion, we + * should not call the timer_cleanup extension handler for + * this timer, but we shall destroy the core timer. If the + * handler returns on error, the whole deletion process is + * aborted, leaving the timer untouched. In all other cases, + * we do the core timer cleanup work, firing the timer_cleanup + * extension handler if defined. + */ + if (cobalt_call_extension(timer_delete, &timer->extref, ret) && ret < 0) + goto out; + + if (ret == 0) + cobalt_call_extension(timer_cleanup, &timer->extref, ret); + else + ret = 0; + + timer_cleanup(cc, timer); + xnlock_put_irqrestore(&nklock, s); + xnfree(timer); + + return ret; + +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +void __cobalt_timer_getval(struct xntimer *__restrict__ timer, + struct itimerspec64 *__restrict__ value) +{ + ns2ts(&value->it_interval, xntimer_interval(timer)); + + if (!xntimer_running_p(timer)) { + value->it_value.tv_sec = 0; + value->it_value.tv_nsec = 0; + } else { + ns2ts(&value->it_value, xntimer_get_timeout(timer)); + } +} + +static inline void +timer_gettimeout(struct cobalt_timer *__restrict__ timer, + struct itimerspec64 *__restrict__ value) +{ + int ret = 0; + + if (cobalt_call_extension(timer_gettime, &timer->extref, + ret, value) && ret != 0) + return; + + __cobalt_timer_getval(&timer->timerbase, value); +} + +int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag, + const struct itimerspec64 *__restrict__ value) +{ + xnticks_t start, period; + + if (value->it_value.tv_nsec == 0 && value->it_value.tv_sec == 0) { + xntimer_stop(timer); + return 0; + } + + if ((unsigned long)value->it_value.tv_nsec >= ONE_BILLION || + ((unsigned long)value->it_interval.tv_nsec >= ONE_BILLION && + (value->it_value.tv_sec != 0 || value->it_value.tv_nsec != 0))) + return -EINVAL; + + start = ts2ns(&value->it_value) + 1; + period = ts2ns(&value->it_interval); + + /* + * Now start the timer. If the timeout data has already + * passed, the caller will handle the case. + */ + return xntimer_start(timer, start, period, clock_flag); +} + +static inline int timer_set(struct cobalt_timer *timer, int flags, + const struct itimerspec64 *__restrict__ value) +{ /* nklocked, IRQs off. */ + struct cobalt_thread *thread; + int ret = 0; + + /* First, try offloading the work to an extension. */ + + if (cobalt_call_extension(timer_settime, &timer->extref, + ret, value, flags) && ret != 0) + return ret < 0 ? ret : 0; + + /* + * No extension, or operation not handled. Default to plain + * POSIX behavior. + * + * If the target thread vanished, just don't start the timer. + */ + thread = cobalt_thread_find(timer->target); + if (thread == NULL) + return 0; + + /* + * Make the timer affine to the CPU running the thread to be + * signaled if possible. + */ + xntimer_set_affinity(&timer->timerbase, thread->threadbase.sched); + + return __cobalt_timer_setval(&timer->timerbase, + clock_flag(flags, timer->clockid), value); +} + +static inline void +timer_deliver_late(struct cobalt_process *cc, timer_t timerid) +{ + struct cobalt_timer *timer; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + /* + * We dropped the lock shortly, revalidate the timer handle in + * case a deletion slipped in. + */ + timer = cobalt_timer_by_id(cc, timerid); + if (timer) + cobalt_timer_handler(&timer->timerbase); + + xnlock_put_irqrestore(&nklock, s); +} + +int __cobalt_timer_settime(timer_t timerid, int flags, + const struct itimerspec64 *__restrict__ value, + struct itimerspec64 *__restrict__ ovalue) +{ + struct cobalt_timer *timer; + struct cobalt_process *cc; + int ret; + spl_t s; + + cc = cobalt_current_process(); + XENO_BUG_ON(COBALT, cc == NULL); + + xnlock_get_irqsave(&nklock, s); + + timer = cobalt_timer_by_id(cc, timerid); + if (timer == NULL) { + ret = -EINVAL; + goto out; + } + + if (ovalue) + timer_gettimeout(timer, ovalue); + + ret = timer_set(timer, flags, value); + if (ret == -ETIMEDOUT) { + /* + * Time has already passed, deliver a notification + * immediately. Since we are about to dive into the + * signal machinery for this, let's drop the nklock to + * break the atomic section temporarily. + */ + xnlock_put_irqrestore(&nklock, s); + timer_deliver_late(cc, timerid); + return 0; + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value) +{ + struct cobalt_timer *timer; + struct cobalt_process *cc; + spl_t s; + + cc = cobalt_current_process(); + if (cc == NULL) + return -EPERM; + + xnlock_get_irqsave(&nklock, s); + + timer = cobalt_timer_by_id(cc, timerid); + if (timer == NULL) + goto fail; + + timer_gettimeout(timer, value); + + xnlock_put_irqrestore(&nklock, s); + + return 0; +fail: + xnlock_put_irqrestore(&nklock, s); + + return -EINVAL; +} + +COBALT_SYSCALL(timer_delete, current, (timer_t timerid)) +{ + return timer_delete(timerid); +} + +int __cobalt_timer_create(clockid_t clock, + const struct sigevent *sev, + timer_t __user *u_tm) +{ + timer_t timerid = 0; + int ret; + + ret = timer_create(clock, sev, &timerid); + if (ret) + return ret; + + if (cobalt_copy_to_user(u_tm, &timerid, sizeof(timerid))) { + timer_delete(timerid); + return -EFAULT; + } + + return 0; +} + +COBALT_SYSCALL(timer_create, current, + (clockid_t clock, + const struct sigevent __user *u_sev, + timer_t __user *u_tm)) +{ + struct sigevent sev, *evp = NULL; + + if (u_sev) { + evp = &sev; + if (cobalt_copy_from_user(&sev, u_sev, sizeof(sev))) + return -EFAULT; + } + + return __cobalt_timer_create(clock, evp, u_tm); +} + +COBALT_SYSCALL(timer_settime, primary, + (timer_t tm, int flags, + const struct __user_old_itimerspec __user *u_newval, + struct __user_old_itimerspec __user *u_oldval)) +{ + struct itimerspec64 newv, oldv, *oldvp = &oldv; + int ret; + + if (u_oldval == NULL) + oldvp = NULL; + + if (cobalt_get_u_itimerspec(&newv, u_newval)) + return -EFAULT; + + ret = __cobalt_timer_settime(tm, flags, &newv, oldvp); + if (ret) + return ret; + + if (oldvp && cobalt_put_u_itimerspec(u_oldval, oldvp)) { + __cobalt_timer_settime(tm, flags, oldvp, NULL); + return -EFAULT; + } + + return 0; +} + +COBALT_SYSCALL(timer_gettime, current, + (timer_t tm, struct __user_old_itimerspec __user *u_val)) +{ + struct itimerspec64 val; + int ret; + + ret = __cobalt_timer_gettime(tm, &val); + if (ret) + return ret; + + return cobalt_put_u_itimerspec(u_val, &val); +} + +COBALT_SYSCALL(timer_getoverrun, current, (timer_t timerid)) +{ + struct cobalt_timer *timer; + struct cobalt_process *cc; + int overruns; + spl_t s; + + cc = cobalt_current_process(); + if (cc == NULL) + return -EPERM; + + xnlock_get_irqsave(&nklock, s); + + timer = cobalt_timer_by_id(cc, timerid); + if (timer == NULL) + goto fail; + + overruns = timer->overruns; + + xnlock_put_irqrestore(&nklock, s); + + return overruns; +fail: + xnlock_put_irqrestore(&nklock, s); + + return -EINVAL; +} + +int cobalt_timer_deliver(struct cobalt_thread *waiter, timer_t timerid) /* nklocked, IRQs off. */ +{ + struct cobalt_timer *timer; + xnticks_t now; + + timer = cobalt_timer_by_id(cobalt_current_process(), timerid); + if (timer == NULL) + /* Killed before ultimate delivery, who cares then? */ + return 0; + + if (!xntimer_periodic_p(&timer->timerbase)) + timer->overruns = 0; + else { + now = xnclock_read_raw(xntimer_clock(&timer->timerbase)); + timer->overruns = xntimer_get_overruns(&timer->timerbase, + &waiter->threadbase, now); + if ((unsigned int)timer->overruns > COBALT_DELAYMAX) + timer->overruns = COBALT_DELAYMAX; + } + + return timer->overruns; +} + +void cobalt_timer_reclaim(struct cobalt_process *p) +{ + struct cobalt_timer *timer; + unsigned id; + spl_t s; + int ret; + + xnlock_get_irqsave(&nklock, s); + + if (find_first_zero_bit(p->timers_map, CONFIG_XENO_OPT_NRTIMERS) == + CONFIG_XENO_OPT_NRTIMERS) + goto out; + + for (id = 0; id < ARRAY_SIZE(p->timers); id++) { + timer = cobalt_timer_by_id(p, id); + if (timer == NULL) + continue; + + cobalt_call_extension(timer_cleanup, &timer->extref, ret); + timer_cleanup(p, timer); + xnlock_put_irqrestore(&nklock, s); + xnfree(timer); + xnlock_get_irqsave(&nklock, s); + } +out: + xnlock_put_irqrestore(&nklock, s); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h new file mode 100644 index 0000000..3b580d4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_TIMER_H +#define _COBALT_POSIX_TIMER_H + +#include <linux/types.h> +#include <linux/time.h> +#include <linux/list.h> +#include <cobalt/kernel/timer.h> +#include <xenomai/posix/signal.h> +#include <xenomai/posix/syscall.h> + +struct cobalt_timer { + struct xntimer timerbase; + timer_t id; + int overruns; + clockid_t clockid; + pid_t target; + struct cobalt_sigpending sigp; + struct cobalt_extref extref; +}; + +int cobalt_timer_deliver(struct cobalt_thread *waiter, + timer_t timerid); + +void cobalt_timer_reclaim(struct cobalt_process *p); + +static inline timer_t cobalt_timer_id(const struct cobalt_timer *timer) +{ + return timer->id; +} + +struct cobalt_timer * +cobalt_timer_by_id(struct cobalt_process *p, timer_t timer_id); + +void cobalt_timer_handler(struct xntimer *xntimer); + +void __cobalt_timer_getval(struct xntimer *__restrict__ timer, + struct itimerspec64 *__restrict__ value); + +int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag, + const struct itimerspec64 *__restrict__ value); + +int __cobalt_timer_create(clockid_t clock, + const struct sigevent *sev, + timer_t __user *u_tm); + +int __cobalt_timer_settime(timer_t timerid, int flags, + const struct itimerspec64 *__restrict__ value, + struct itimerspec64 *__restrict__ ovalue); + +int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value); + +COBALT_SYSCALL_DECL(timer_create, + (clockid_t clock, + const struct sigevent __user *u_sev, + timer_t __user *u_tm)); + +COBALT_SYSCALL_DECL(timer_delete, (timer_t tm)); + +COBALT_SYSCALL_DECL(timer_settime, + (timer_t tm, int flags, + const struct __user_old_itimerspec __user *u_newval, + struct __user_old_itimerspec __user *u_oldval)); + +COBALT_SYSCALL_DECL(timer_gettime, + (timer_t tm, struct __user_old_itimerspec __user *u_val)); + +COBALT_SYSCALL_DECL(timer_getoverrun, (timer_t tm)); + +#endif /* !_COBALT_POSIX_TIMER_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c new file mode 100644 index 0000000..472c4cb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/timerfd.h> +#include <linux/err.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/select.h> +#include <rtdm/fd.h> +#include "internal.h" +#include "clock.h" +#include "timer.h" +#include "timerfd.h" + +struct cobalt_tfd { + int flags; + clockid_t clockid; + struct rtdm_fd fd; + struct xntimer timer; + DECLARE_XNSELECT(read_select); + struct itimerspec64 value; + struct xnsynch readers; + struct xnthread *target; +}; + +#define COBALT_TFD_TICKED (1 << 2) + +#define COBALT_TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_WAKEUP) + +static ssize_t timerfd_read(struct rtdm_fd *fd, void __user *buf, size_t size) +{ + struct cobalt_tfd *tfd; + __u64 __user *u_ticks; + __u64 ticks = 0; + bool aligned; + spl_t s; + int err; + + if (size < sizeof(ticks)) + return -EINVAL; + + u_ticks = buf; + if (!access_wok(u_ticks, sizeof(*u_ticks))) + return -EFAULT; + + aligned = (((unsigned long)buf) & (sizeof(ticks) - 1)) == 0; + + tfd = container_of(fd, struct cobalt_tfd, fd); + + xnlock_get_irqsave(&nklock, s); + if (tfd->flags & COBALT_TFD_TICKED) { + err = 0; + goto out; + } + if (rtdm_fd_flags(fd) & O_NONBLOCK) { + err = -EAGAIN; + goto out; + } + + do { + err = xnsynch_sleep_on(&tfd->readers, XN_INFINITE, XN_RELATIVE); + } while (err == 0 && (tfd->flags & COBALT_TFD_TICKED) == 0); + + if (err & XNBREAK) + err = -EINTR; + out: + if (err == 0) { + xnticks_t now; + + if (xntimer_periodic_p(&tfd->timer)) { + now = xnclock_read_raw(xntimer_clock(&tfd->timer)); + ticks = 1 + xntimer_get_overruns(&tfd->timer, + xnthread_current(), now); + } else + ticks = 1; + + tfd->flags &= ~COBALT_TFD_TICKED; + xnselect_signal(&tfd->read_select, 0); + } + xnlock_put_irqrestore(&nklock, s); + + if (err == 0) { + err = aligned ? __xn_put_user(ticks, u_ticks) : + __xn_copy_to_user(buf, &ticks, sizeof(ticks)); + if (err) + err =-EFAULT; + } + + return err ?: sizeof(ticks); +} + +static int +timerfd_select(struct rtdm_fd *fd, struct xnselector *selector, + unsigned type, unsigned index) +{ + struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd); + struct xnselect_binding *binding; + spl_t s; + int err; + + if (type != XNSELECT_READ) + return -EBADF; + + binding = xnmalloc(sizeof(*binding)); + if (binding == NULL) + return -ENOMEM; + + xnlock_get_irqsave(&nklock, s); + xntimer_set_affinity(&tfd->timer, xnthread_current()->sched); + err = xnselect_bind(&tfd->read_select, binding, selector, type, + index, tfd->flags & COBALT_TFD_TICKED); + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +static void timerfd_close(struct rtdm_fd *fd) +{ + struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_destroy(&tfd->timer); + xnsynch_destroy(&tfd->readers); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + xnselect_destroy(&tfd->read_select); /* Reschedules. */ + xnfree(tfd); +} + +static struct rtdm_fd_ops timerfd_ops = { + .read_rt = timerfd_read, + .select = timerfd_select, + .close = timerfd_close, +}; + +static void timerfd_handler(struct xntimer *xntimer) +{ + struct cobalt_tfd *tfd; + + tfd = container_of(xntimer, struct cobalt_tfd, timer); + tfd->flags |= COBALT_TFD_TICKED; + xnselect_signal(&tfd->read_select, 1); + xnsynch_wakeup_one_sleeper(&tfd->readers); + if (tfd->target) + xnthread_unblock(tfd->target); +} + +COBALT_SYSCALL(timerfd_create, lostage, (int clockid, int flags)) +{ + struct cobalt_tfd *tfd; + struct xnthread *curr; + struct xnclock *clock; + int ret, ufd; + + if (flags & ~TFD_CREATE_FLAGS) + return -EINVAL; + + clock = cobalt_clock_find(clockid); + if (IS_ERR(clock)) + return PTR_ERR(clock); + + tfd = xnmalloc(sizeof(*tfd)); + if (tfd == NULL) + return -ENOMEM; + + ufd = __rtdm_anon_getfd("[cobalt-timerfd]", + O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); + if (ufd < 0) { + ret = ufd; + goto fail_getfd; + } + + tfd->flags = flags & ~TFD_NONBLOCK; + tfd->fd.oflags = (flags & TFD_NONBLOCK) ? O_NONBLOCK : 0; + tfd->clockid = clockid; + curr = xnthread_current(); + xntimer_init(&tfd->timer, clock, timerfd_handler, + curr ? curr->sched : NULL, XNTIMER_UGRAVITY); + xnsynch_init(&tfd->readers, XNSYNCH_PRIO, NULL); + xnselect_init(&tfd->read_select); + tfd->target = NULL; + + ret = rtdm_fd_enter(&tfd->fd, ufd, COBALT_TIMERFD_MAGIC, &timerfd_ops); + if (ret < 0) + goto fail; + + ret = rtdm_fd_register(&tfd->fd, ufd); + if (ret < 0) + goto fail; + + return ufd; +fail: + xnselect_destroy(&tfd->read_select); + xnsynch_destroy(&tfd->readers); + xntimer_destroy(&tfd->timer); + __rtdm_anon_putfd(ufd); +fail_getfd: + xnfree(tfd); + + return ret; +} + +static inline struct cobalt_tfd *tfd_get(int ufd) +{ + struct rtdm_fd *fd; + + fd = rtdm_fd_get(ufd, COBALT_TIMERFD_MAGIC); + if (IS_ERR(fd)) { + int err = PTR_ERR(fd); + if (err == -EBADF && cobalt_current_process() == NULL) + err = -EPERM; + return ERR_PTR(err); + } + + return container_of(fd, struct cobalt_tfd, fd); +} + +static inline void tfd_put(struct cobalt_tfd *tfd) +{ + rtdm_fd_put(&tfd->fd); +} + +int __cobalt_timerfd_settime(int fd, int flags, + const struct itimerspec64 *value, + struct itimerspec64 *ovalue) +{ + struct cobalt_tfd *tfd; + int cflag, ret; + spl_t s; + + if (flags & ~COBALT_TFD_SETTIME_FLAGS) + return -EINVAL; + + tfd = tfd_get(fd); + if (IS_ERR(tfd)) + return PTR_ERR(tfd); + + cflag = (flags & TFD_TIMER_ABSTIME) ? TIMER_ABSTIME : 0; + + xnlock_get_irqsave(&nklock, s); + + tfd->target = NULL; + if (flags & TFD_WAKEUP) { + tfd->target = xnthread_current(); + if (tfd->target == NULL) { + ret = -EPERM; + goto out; + } + } + + if (ovalue) + __cobalt_timer_getval(&tfd->timer, ovalue); + + xntimer_set_affinity(&tfd->timer, xnthread_current()->sched); + + ret = __cobalt_timer_setval(&tfd->timer, + clock_flag(cflag, tfd->clockid), value); +out: + xnlock_put_irqrestore(&nklock, s); + + tfd_put(tfd); + + return ret; +} + +COBALT_SYSCALL(timerfd_settime, primary, + (int fd, int flags, + const struct __user_old_itimerspec __user *new_value, + struct __user_old_itimerspec __user *old_value)) +{ + struct itimerspec64 ovalue, value; + int ret; + + ret = cobalt_get_u_itimerspec(&value, new_value); + if (ret) + return ret; + + ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue); + if (ret) + return ret; + + if (old_value) { + ret = cobalt_copy_to_user(old_value, &ovalue, sizeof(ovalue)); + value.it_value.tv_sec = 0; + value.it_value.tv_nsec = 0; + __cobalt_timerfd_settime(fd, flags, &value, NULL); + } + + return ret; +} + +int __cobalt_timerfd_gettime(int fd, struct itimerspec64 *value) +{ + struct cobalt_tfd *tfd; + spl_t s; + + tfd = tfd_get(fd); + if (IS_ERR(tfd)) + return PTR_ERR(tfd); + + xnlock_get_irqsave(&nklock, s); + __cobalt_timer_getval(&tfd->timer, value); + xnlock_put_irqrestore(&nklock, s); + + tfd_put(tfd); + + return 0; +} + +COBALT_SYSCALL(timerfd_gettime, current, + (int fd, struct __user_old_itimerspec __user *curr_value)) +{ + struct itimerspec64 value; + int ret; + + ret = __cobalt_timerfd_gettime(fd, &value); + + return ret ?: cobalt_put_u_itimerspec(curr_value, &value); +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h new file mode 100644 index 0000000..245b869 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef TIMERFD_H +#define TIMERFD_H + +#include <linux/time.h> +#include <xenomai/posix/syscall.h> + +int __cobalt_timerfd_settime(int fd, int flags, + const struct itimerspec64 *new_value, + struct itimerspec64 *old_value); + +int __cobalt_timerfd_gettime(int fd, + struct itimerspec64 *value); + +COBALT_SYSCALL_DECL(timerfd_create, + (int clockid, int flags)); + +COBALT_SYSCALL_DECL(timerfd_settime, + (int fd, int flags, + const struct __user_old_itimerspec __user *new_value, + struct __user_old_itimerspec __user *old_value)); + +COBALT_SYSCALL_DECL(timerfd_gettime, + (int fd, struct __user_old_itimerspec __user *curr_value)); + +#endif /* TIMERFD_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c new file mode 100644 index 0000000..0aaf691 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/vfile.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/sched.h> +#include <xenomai/version.h> +#include "debug.h" + +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING + +static int lock_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + struct xnlockinfo lockinfo; + spl_t s; + int cpu; + + for_each_realtime_cpu(cpu) { + xnlock_get_irqsave(&nklock, s); + lockinfo = per_cpu(xnlock_stats, cpu); + xnlock_put_irqrestore(&nklock, s); + + if (cpu > 0) + xnvfile_printf(it, "\n"); + + xnvfile_printf(it, "CPU%d:\n", cpu); + + xnvfile_printf(it, + " longest locked section: %llu ns\n" + " spinning time: %llu ns\n" + " section entry: %s:%d (%s)\n", + xnclock_ticks_to_ns(&nkclock, lockinfo.lock_time), + xnclock_ticks_to_ns(&nkclock, lockinfo.spin_time), + lockinfo.file, lockinfo.line, lockinfo.function); + } + + return 0; +} + +static ssize_t lock_vfile_store(struct xnvfile_input *input) +{ + ssize_t ret; + spl_t s; + int cpu; + + long val; + + ret = xnvfile_get_integer(input, &val); + if (ret < 0) + return ret; + + if (val != 0) + return -EINVAL; + + for_each_realtime_cpu(cpu) { + xnlock_get_irqsave(&nklock, s); + memset(&per_cpu(xnlock_stats, cpu), '\0', sizeof(struct xnlockinfo)); + xnlock_put_irqrestore(&nklock, s); + } + + return ret; +} + +static struct xnvfile_regular_ops lock_vfile_ops = { + .show = lock_vfile_show, + .store = lock_vfile_store, +}; + +static struct xnvfile_regular lock_vfile = { + .ops = &lock_vfile_ops, +}; + +#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */ + +static int latency_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + xnvfile_printf(it, "%Lu\n", + xnclock_ticks_to_ns(&nkclock, nkclock.gravity.user)); + + return 0; +} + +static ssize_t latency_vfile_store(struct xnvfile_input *input) +{ + ssize_t ret; + long val; + + ret = xnvfile_get_integer(input, &val); + if (ret < 0) + return ret; + + nkclock.gravity.user = xnclock_ns_to_ticks(&nkclock, val); + + return ret; +} + +static struct xnvfile_regular_ops latency_vfile_ops = { + .show = latency_vfile_show, + .store = latency_vfile_store, +}; + +static struct xnvfile_regular latency_vfile = { + .ops = &latency_vfile_ops, +}; + +static int version_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + xnvfile_printf(it, "%s\n", XENO_VERSION_STRING); + + return 0; +} + +static struct xnvfile_regular_ops version_vfile_ops = { + .show = version_vfile_show, +}; + +static struct xnvfile_regular version_vfile = { + .ops = &version_vfile_ops, +}; + +static int faults_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + int cpu, trap; + + xnvfile_puts(it, "TRAP "); + + for_each_realtime_cpu(cpu) + xnvfile_printf(it, " CPU%d", cpu); + + for (trap = 0; cobalt_machine.fault_labels[trap]; trap++) { + if (*cobalt_machine.fault_labels[trap] == '\0') + continue; + + xnvfile_printf(it, "\n%3d: ", trap); + + for_each_realtime_cpu(cpu) + xnvfile_printf(it, "%12u", + per_cpu(cobalt_machine_cpudata, cpu).faults[trap]); + + xnvfile_printf(it, " (%s)", + cobalt_machine.fault_labels[trap]); + } + + xnvfile_putc(it, '\n'); + + return 0; +} + +static struct xnvfile_regular_ops faults_vfile_ops = { + .show = faults_vfile_show, +}; + +static struct xnvfile_regular faults_vfile = { + .ops = &faults_vfile_ops, +}; + +void xnprocfs_cleanup_tree(void) +{ +#ifdef CONFIG_XENO_OPT_DEBUG +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING + xnvfile_destroy_regular(&lock_vfile); +#endif + xnvfile_destroy_dir(&cobalt_debug_vfroot); +#endif /* XENO_OPT_DEBUG */ + xnvfile_destroy_regular(&faults_vfile); + xnvfile_destroy_regular(&version_vfile); + xnvfile_destroy_regular(&latency_vfile); + xnintr_cleanup_proc(); + xnheap_cleanup_proc(); + xnclock_cleanup_proc(); + xnsched_cleanup_proc(); + xnvfile_destroy_root(); +} + +int __init xnprocfs_init_tree(void) +{ + int ret; + + ret = xnvfile_init_root(); + if (ret) + return ret; + + ret = xnsched_init_proc(); + if (ret) + return ret; + + xnclock_init_proc(); + xnheap_init_proc(); + xnintr_init_proc(); + xnvfile_init_regular("latency", &latency_vfile, &cobalt_vfroot); + xnvfile_init_regular("version", &version_vfile, &cobalt_vfroot); + xnvfile_init_regular("faults", &faults_vfile, &cobalt_vfroot); +#ifdef CONFIG_XENO_OPT_DEBUG + xnvfile_init_dir("debug", &cobalt_debug_vfroot, &cobalt_vfroot); +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING + xnvfile_init_regular("lock", &lock_vfile, &cobalt_debug_vfroot); +#endif +#endif + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h new file mode 100644 index 0000000..75304fe --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _KERNEL_COBALT_PROCFS_H +#define _KERNEL_COBALT_PROCFS_H + +#ifdef CONFIG_XENO_OPT_VFILE +int xnprocfs_init_tree(void); +void xnprocfs_cleanup_tree(void); +#else +static inline int xnprocfs_init_tree(void) { return 0; } +static inline void xnprocfs_cleanup_tree(void) { } +#endif /* !CONFIG_XENO_OPT_VFILE */ + +#endif /* !_KERNEL_COBALT_PROCFS_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c b/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c new file mode 100644 index 0000000..211e0f7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c @@ -0,0 +1,954 @@ +/* + * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/slab.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/registry.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/assert.h> +#include <pipeline/sirq.h> +#include <trace/events/cobalt-core.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_registry Registry services + * + * The registry provides a mean to index object descriptors on unique + * alphanumeric keys. When labeled this way, an object is globally + * exported; it can be searched for, and its descriptor returned to + * the caller for further use; the latter operation is called a + * "binding". When no object has been registered under the given name + * yet, the registry can be asked to set up a rendez-vous, blocking + * the caller until the object is eventually registered. + * + *@{ + */ + +struct xnobject *registry_obj_slots; +EXPORT_SYMBOL_GPL(registry_obj_slots); + +static LIST_HEAD(free_object_list); /* Free objects. */ + +static LIST_HEAD(busy_object_list); /* Active and exported objects. */ + +static unsigned int nr_active_objects; + +static unsigned long next_object_stamp; + +static struct hlist_head *object_index; + +static int nr_object_entries; + +static struct xnsynch register_synch; + +#ifdef CONFIG_XENO_OPT_VFILE + +#include <linux/workqueue.h> + +static void proc_callback(struct work_struct *work); + +static irqreturn_t registry_proc_schedule(int virq, void *dev_id); + +static LIST_HEAD(proc_object_list); /* Objects waiting for /proc handling. */ + +static DECLARE_WORK(registry_proc_work, proc_callback); + +static int proc_virq; + +static struct xnvfile_directory registry_vfroot; + +static int usage_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + xnvfile_printf(it, "%u/%u\n", + nr_active_objects, + CONFIG_XENO_OPT_REGISTRY_NRSLOTS); + return 0; +} + +static struct xnvfile_regular_ops usage_vfile_ops = { + .show = usage_vfile_show, +}; + +static struct xnvfile_regular usage_vfile = { + .ops = &usage_vfile_ops, +}; + +#endif /* CONFIG_XENO_OPT_VFILE */ + +unsigned xnregistry_hash_size(void) +{ + static const int primes[] = { + 101, 211, 307, 401, 503, 601, + 701, 809, 907, 1009, 1103 + }; + +#define obj_hash_max(n) \ +((n) < sizeof(primes) / sizeof(int) ? \ + (n) : sizeof(primes) / sizeof(int) - 1) + + return primes[obj_hash_max(CONFIG_XENO_OPT_REGISTRY_NRSLOTS / 100)]; +} + +int xnregistry_init(void) +{ + int n, ret __maybe_unused; + + registry_obj_slots = kmalloc(CONFIG_XENO_OPT_REGISTRY_NRSLOTS * + sizeof(struct xnobject), GFP_KERNEL); + if (registry_obj_slots == NULL) + return -ENOMEM; + +#ifdef CONFIG_XENO_OPT_VFILE + ret = xnvfile_init_dir("registry", ®istry_vfroot, &cobalt_vfroot); + if (ret) + return ret; + + ret = xnvfile_init_regular("usage", &usage_vfile, ®istry_vfroot); + if (ret) { + xnvfile_destroy_dir(®istry_vfroot); + return ret; + } + + proc_virq = pipeline_create_inband_sirq(registry_proc_schedule); + if (proc_virq < 0) { + xnvfile_destroy_regular(&usage_vfile); + xnvfile_destroy_dir(®istry_vfroot); + return proc_virq; + } +#endif /* CONFIG_XENO_OPT_VFILE */ + + next_object_stamp = 0; + + for (n = 0; n < CONFIG_XENO_OPT_REGISTRY_NRSLOTS; n++) { + registry_obj_slots[n].objaddr = NULL; + list_add_tail(®istry_obj_slots[n].link, &free_object_list); + } + + /* Slot #0 is reserved/invalid. */ + list_get_entry(&free_object_list, struct xnobject, link); + nr_active_objects = 1; + + nr_object_entries = xnregistry_hash_size(); + object_index = kmalloc(sizeof(*object_index) * + nr_object_entries, GFP_KERNEL); + + if (object_index == NULL) { +#ifdef CONFIG_XENO_OPT_VFILE + xnvfile_destroy_regular(&usage_vfile); + xnvfile_destroy_dir(®istry_vfroot); + pipeline_delete_inband_sirq(proc_virq); +#endif /* CONFIG_XENO_OPT_VFILE */ + return -ENOMEM; + } + + for (n = 0; n < nr_object_entries; n++) + INIT_HLIST_HEAD(&object_index[n]); + + xnsynch_init(®ister_synch, XNSYNCH_FIFO, NULL); + + return 0; +} + +void xnregistry_cleanup(void) +{ +#ifdef CONFIG_XENO_OPT_VFILE + struct hlist_node *enext; + struct xnobject *ecurr; + struct xnpnode *pnode; + int n; + + flush_scheduled_work(); + + for (n = 0; n < nr_object_entries; n++) + hlist_for_each_entry_safe(ecurr, enext, + &object_index[n], hlink) { + pnode = ecurr->pnode; + if (pnode == NULL) + continue; + + pnode->ops->unexport(ecurr, pnode); + + if (--pnode->entries > 0) + continue; + + xnvfile_destroy_dir(&pnode->vdir); + + if (--pnode->root->entries == 0) + xnvfile_destroy_dir(&pnode->root->vdir); + } +#endif /* CONFIG_XENO_OPT_VFILE */ + + kfree(object_index); + xnsynch_destroy(®ister_synch); + +#ifdef CONFIG_XENO_OPT_VFILE + pipeline_delete_inband_sirq(proc_virq); + flush_scheduled_work(); + xnvfile_destroy_regular(&usage_vfile); + xnvfile_destroy_dir(®istry_vfroot); +#endif /* CONFIG_XENO_OPT_VFILE */ + + kfree(registry_obj_slots); +} + +#ifdef CONFIG_XENO_OPT_VFILE + +static DEFINE_SEMAPHORE(export_mutex); + +/* + * The following stuff implements the mechanism for delegating + * export/unexport requests to/from the /proc interface from the + * Xenomai domain to the Linux kernel (i.e. the "lower stage"). This + * ends up being a bit complex due to the fact that such requests + * might lag enough before being processed by the Linux kernel so that + * subsequent requests might just contradict former ones before they + * even had a chance to be applied (e.g. export -> unexport in the + * Xenomai domain for short-lived objects). This situation and the + * like are hopefully properly handled due to a careful + * synchronization of operations across domains. + */ +static void proc_callback(struct work_struct *work) +{ + struct xnvfile_directory *rdir, *dir; + const char *rname, *type; + struct xnobject *object; + struct xnpnode *pnode; + int ret; + spl_t s; + + down(&export_mutex); + + xnlock_get_irqsave(&nklock, s); + + while (!list_empty(&proc_object_list)) { + object = list_get_entry(&proc_object_list, + struct xnobject, link); + pnode = object->pnode; + type = pnode->dirname; + dir = &pnode->vdir; + rdir = &pnode->root->vdir; + rname = pnode->root->dirname; + + if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED) + goto unexport; + + object->vfilp = XNOBJECT_EXPORT_INPROGRESS; + list_add_tail(&object->link, &busy_object_list); + + xnlock_put_irqrestore(&nklock, s); + + if (pnode->entries++ == 0) { + if (pnode->root->entries++ == 0) { + /* Create the root directory on the fly. */ + ret = xnvfile_init_dir(rname, rdir, ®istry_vfroot); + if (ret) { + xnlock_get_irqsave(&nklock, s); + object->pnode = NULL; + pnode->root->entries = 0; + pnode->entries = 0; + continue; + } + } + /* Create the class directory on the fly. */ + ret = xnvfile_init_dir(type, dir, rdir); + if (ret) { + if (pnode->root->entries == 1) { + pnode->root->entries = 0; + xnvfile_destroy_dir(rdir); + } + xnlock_get_irqsave(&nklock, s); + object->pnode = NULL; + pnode->entries = 0; + continue; + } + } + + ret = pnode->ops->export(object, pnode); + if (ret && --pnode->entries == 0) { + xnvfile_destroy_dir(dir); + if (--pnode->root->entries == 0) + xnvfile_destroy_dir(rdir); + xnlock_get_irqsave(&nklock, s); + object->pnode = NULL; + } else + xnlock_get_irqsave(&nklock, s); + + continue; + + unexport: + object->vfilp = NULL; + object->pnode = NULL; + + if (object->vfilp == XNOBJECT_EXPORT_ABORTED) + object->objaddr = NULL; + + if (object->objaddr) + list_add_tail(&object->link, &busy_object_list); + else { + /* + * Trap the case where we are unexporting an + * already unregistered object. + */ + list_add_tail(&object->link, &free_object_list); + nr_active_objects--; + } + + xnlock_put_irqrestore(&nklock, s); + + pnode->ops->unexport(object, pnode); + + if (--pnode->entries == 0) { + xnvfile_destroy_dir(dir); + if (--pnode->root->entries == 0) + xnvfile_destroy_dir(rdir); + } + + xnlock_get_irqsave(&nklock, s); + } + + xnlock_put_irqrestore(&nklock, s); + + up(&export_mutex); +} + +static irqreturn_t registry_proc_schedule(int virq, void *dev_id) +{ + /* + * schedule_work() will check for us if the work has already + * been scheduled, so just be lazy and submit blindly. + */ + schedule_work(®istry_proc_work); + + return IRQ_HANDLED; +} + +static int registry_export_vfsnap(struct xnobject *object, + struct xnpnode *pnode) +{ + struct xnpnode_snapshot *p; + int ret; + + /* + * Make sure to initialize _all_ mandatory vfile fields; most + * of the time we are using sane NULL defaults based on static + * storage for the vfile struct, but here we are building up a + * vfile object explicitly. + */ + p = container_of(pnode, struct xnpnode_snapshot, node); + object->vfile_u.vfsnap.file.datasz = p->vfile.datasz; + object->vfile_u.vfsnap.file.privsz = p->vfile.privsz; + /* + * Make the vfile refer to the provided tag struct if any, + * otherwise use our default tag space. In the latter case, + * each object family has its own private revision tag. + */ + object->vfile_u.vfsnap.file.tag = p->vfile.tag ?: + &object->vfile_u.vfsnap.tag; + object->vfile_u.vfsnap.file.ops = p->vfile.ops; + object->vfile_u.vfsnap.file.entry.lockops = p->vfile.lockops; + + ret = xnvfile_init_snapshot(object->key, &object->vfile_u.vfsnap.file, + &pnode->vdir); + if (ret) + return ret; + + object->vfilp = &object->vfile_u.vfsnap.file.entry; + object->vfilp->private = object->objaddr; + + return 0; +} + +static void registry_unexport_vfsnap(struct xnobject *object, + struct xnpnode *pnode) +{ + xnvfile_destroy_snapshot(&object->vfile_u.vfsnap.file); +} + +static void registry_touch_vfsnap(struct xnobject *object) +{ + xnvfile_touch(&object->vfile_u.vfsnap.file); +} + +struct xnpnode_ops xnregistry_vfsnap_ops = { + .export = registry_export_vfsnap, + .unexport = registry_unexport_vfsnap, + .touch = registry_touch_vfsnap, +}; +EXPORT_SYMBOL_GPL(xnregistry_vfsnap_ops); + +static int registry_export_vfreg(struct xnobject *object, + struct xnpnode *pnode) +{ + struct xnpnode_regular *p; + int ret; + + /* See registry_export_vfsnap() for hints. */ + p = container_of(pnode, struct xnpnode_regular, node); + object->vfile_u.vfreg.privsz = p->vfile.privsz; + object->vfile_u.vfreg.ops = p->vfile.ops; + object->vfile_u.vfreg.entry.lockops = p->vfile.lockops; + object->vfile_u.vfreg.entry.refcnt = 0; + + ret = xnvfile_init_regular(object->key, &object->vfile_u.vfreg, + &pnode->vdir); + if (ret) + return ret; + + object->vfilp = &object->vfile_u.vfreg.entry; + object->vfilp->private = object->objaddr; + + return 0; +} + +static void registry_unexport_vfreg(struct xnobject *object, + struct xnpnode *pnode) +{ + xnvfile_destroy_regular(&object->vfile_u.vfreg); +} + +struct xnpnode_ops xnregistry_vfreg_ops = { + .export = registry_export_vfreg, + .unexport = registry_unexport_vfreg, +}; +EXPORT_SYMBOL_GPL(xnregistry_vfreg_ops); + +static int registry_export_vlink(struct xnobject *object, + struct xnpnode *pnode) +{ + struct xnpnode_link *link_desc; + char *link_target; + int ret; + + link_desc = container_of(pnode, struct xnpnode_link, node); + link_target = link_desc->target(object->objaddr); + if (link_target == NULL) + return -ENOMEM; + + ret = xnvfile_init_link(object->key, link_target, + &object->vfile_u.link, &pnode->vdir); + kfree(link_target); + if (ret) + return ret; + + object->vfilp = &object->vfile_u.link.entry; + object->vfilp->private = object->objaddr; + + return 0; +} + +static void registry_unexport_vlink(struct xnobject *object, + struct xnpnode *pnode) +{ + xnvfile_destroy_link(&object->vfile_u.link); +} + +struct xnpnode_ops xnregistry_vlink_ops = { + .export = registry_export_vlink, + .unexport = registry_unexport_vlink, +}; +EXPORT_SYMBOL_GPL(xnregistry_vlink_ops); + +static inline void registry_export_pnode(struct xnobject *object, + struct xnpnode *pnode) +{ + object->vfilp = XNOBJECT_EXPORT_SCHEDULED; + object->pnode = pnode; + list_del(&object->link); + list_add_tail(&object->link, &proc_object_list); + pipeline_post_sirq(proc_virq); +} + +static inline void registry_unexport_pnode(struct xnobject *object) +{ + if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED) { + /* + * We might have preempted a v-file read op, so bump + * the object's revtag to make sure the data + * collection is aborted next, if we end up deleting + * the object being read. + */ + if (object->pnode->ops->touch) + object->pnode->ops->touch(object); + list_del(&object->link); + list_add_tail(&object->link, &proc_object_list); + pipeline_post_sirq(proc_virq); + } else { + /* + * Unexporting before the lower stage has had a chance + * to export. Move back the object to the busyq just + * like if no export had been requested. + */ + list_del(&object->link); + list_add_tail(&object->link, &busy_object_list); + object->pnode = NULL; + object->vfilp = NULL; + } +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +static unsigned registry_hash_crunch(const char *key) +{ + unsigned int h = 0, g; + +#define HQON 24 /* Higher byte position */ +#define HBYTE 0xf0000000 /* Higher nibble on */ + + while (*key) { + h = (h << 4) + *key++; + if ((g = (h & HBYTE)) != 0) + h = (h ^ (g >> HQON)) ^ g; + } + + return h % nr_object_entries; +} + +static inline int registry_hash_enter(const char *key, struct xnobject *object) +{ + struct xnobject *ecurr; + unsigned s; + + object->key = key; + s = registry_hash_crunch(key); + + hlist_for_each_entry(ecurr, &object_index[s], hlink) + if (ecurr == object || strcmp(key, ecurr->key) == 0) + return -EEXIST; + + hlist_add_head(&object->hlink, &object_index[s]); + + return 0; +} + +static inline int registry_hash_remove(struct xnobject *object) +{ + unsigned int s = registry_hash_crunch(object->key); + struct xnobject *ecurr; + + hlist_for_each_entry(ecurr, &object_index[s], hlink) + if (ecurr == object) { + hlist_del(&ecurr->hlink); + return 0; + } + + return -ESRCH; +} + +static struct xnobject *registry_hash_find(const char *key) +{ + struct xnobject *ecurr; + + hlist_for_each_entry(ecurr, + &object_index[registry_hash_crunch(key)], hlink) + if (strcmp(key, ecurr->key) == 0) + return ecurr; + + return NULL; +} + +struct registry_wait_context { + struct xnthread_wait_context wc; + const char *key; +}; + +static inline int registry_wakeup_sleepers(const char *key) +{ + struct registry_wait_context *rwc; + struct xnthread_wait_context *wc; + struct xnthread *sleeper, *tmp; + int cnt = 0; + + xnsynch_for_each_sleeper_safe(sleeper, tmp, ®ister_synch) { + wc = xnthread_get_wait_context(sleeper); + rwc = container_of(wc, struct registry_wait_context, wc); + if (*key == *rwc->key && strcmp(key, rwc->key) == 0) { + xnsynch_wakeup_this_sleeper(®ister_synch, sleeper); + ++cnt; + } + } + + return cnt; +} + +/** + * @fn int xnregistry_enter(const char *key,void *objaddr,xnhandle_t *phandle,struct xnpnode *pnode) + * @brief Register a real-time object. + * + * This service allocates a new registry slot for an associated + * object, and indexes it by an alphanumeric key for later retrieval. + * + * @param key A valid NULL-terminated string by which the object will + * be indexed and later retrieved in the registry. Since it is assumed + * that such key is stored into the registered object, it will *not* + * be copied but only kept by reference in the registry. Pass an empty + * or NULL string if the object shall only occupy a registry slot for + * handle-based lookups. The slash character is not accepted in @a key + * if @a pnode is non-NULL. + * + * @param objaddr An opaque pointer to the object to index by @a + * key. + * + * @param phandle A pointer to a generic handle defined by the + * registry which will uniquely identify the indexed object, until the + * latter is unregistered using the xnregistry_remove() service. + * + * @param pnode A pointer to an optional /proc node class + * descriptor. This structure provides the information needed to + * export all objects from the given class through the /proc + * filesystem, under the /proc/xenomai/registry entry. Passing NULL + * indicates that no /proc support is available for the newly + * registered object. + * + * @return 0 is returned upon success. Otherwise: + * + * - -EINVAL is returned if @a objaddr is NULL. + * + * - -EINVAL if @a pnode is non-NULL, and @a key points to a valid + * string containing a '/' character. + * + * - -ENOMEM is returned if the system fails to get enough dynamic + * memory from the global real-time heap in order to register the + * object. + * + * - -EEXIST is returned if the @a key is already in use. + * + * @coretags{unrestricted, might-switch, atomic-entry} + */ +int xnregistry_enter(const char *key, void *objaddr, + xnhandle_t *phandle, struct xnpnode *pnode) +{ + struct xnobject *object; + spl_t s; + int ret; + + if (objaddr == NULL || + (pnode != NULL && key != NULL && strchr(key, '/'))) + return -EINVAL; + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(&free_object_list)) { + ret = -EAGAIN; + goto unlock_and_exit; + } + + object = list_get_entry(&free_object_list, struct xnobject, link); + nr_active_objects++; + object->objaddr = objaddr; + object->cstamp = ++next_object_stamp; + trace_cobalt_registry_enter(key, objaddr); +#ifdef CONFIG_XENO_OPT_VFILE + object->pnode = NULL; +#endif + if (key == NULL || *key == '\0') { + object->key = NULL; + *phandle = object - registry_obj_slots; + ret = 0; + goto unlock_and_exit; + } + + ret = registry_hash_enter(key, object); + if (ret) { + nr_active_objects--; + list_add_tail(&object->link, &free_object_list); + goto unlock_and_exit; + } + + list_add_tail(&object->link, &busy_object_list); + + /* + * <!> Make sure the handle is written back before the + * rescheduling takes place. + */ + *phandle = object - registry_obj_slots; + +#ifdef CONFIG_XENO_OPT_VFILE + if (pnode) + registry_export_pnode(object, pnode); +#endif /* CONFIG_XENO_OPT_VFILE */ + + if (registry_wakeup_sleepers(key)) + xnsched_run(); + +unlock_and_exit: + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnregistry_enter); + +/** + * @fn int xnregistry_bind(const char *key,xnticks_t timeout,int timeout_mode,xnhandle_t *phandle) + * @brief Bind to a real-time object. + * + * This service retrieves the registry handle of a given object + * identified by its key. Unless otherwise specified, this service + * will block the caller if the object is not registered yet, waiting + * for such registration to occur. + * + * @param key A valid NULL-terminated string which identifies the + * object to bind to. + * + * @param timeout The timeout which may be used to limit the time the + * thread wait for the object to be registered. This value is a wait + * time given as a count of nanoseconds. It can either be relative, + * absolute monotonic (XN_ABSOLUTE), or absolute adjustable + * (XN_REALTIME) depending on @a timeout_mode. Passing XN_INFINITE @b + * and setting @a timeout_mode to XN_RELATIVE specifies an unbounded + * wait. Passing XN_NONBLOCK causes the service to return immediately + * without waiting if the object is not registered on entry. All other + * values are used as a wait limit. + * + * @param timeout_mode The mode of the @a timeout parameter. It can + * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also + * xntimer_start()). + * + * @param phandle A pointer to a memory location which will be written + * upon success with the generic handle defined by the registry for + * the retrieved object. Contents of this memory is undefined upon + * failure. + * + * @return 0 is returned upon success. Otherwise: + * + * - -EINVAL is returned if @a key is NULL. + * + * - -EINTR is returned if xnthread_unblock() has been called for the + * waiting thread before the retrieval has completed. + * + * - -EWOULDBLOCK is returned if @a timeout is equal to XN_NONBLOCK + * and the searched object is not registered on entry. As a special + * exception, this error is also returned if this service should + * block, but was called from a context which cannot sleep + * (e.g. interrupt, non-realtime or scheduler locked). + * + * - -ETIMEDOUT is returned if the object cannot be retrieved within + * the specified amount of time. + * + * @coretags{primary-only, might-switch} + * + * @note xnregistry_bind() only returns the index portion of a handle, + * which might include other fixed bits to be complete + * (e.g. XNSYNCH_PSHARED). The caller is responsible for completing + * the handle returned with those bits if applicable, depending on the + * context. + */ +int xnregistry_bind(const char *key, xnticks_t timeout, int timeout_mode, + xnhandle_t *phandle) +{ + struct registry_wait_context rwc; + struct xnobject *object; + int ret = 0, info; + spl_t s; + + if (key == NULL) + return -EINVAL; + + xnlock_get_irqsave(&nklock, s); + + if (timeout_mode == XN_RELATIVE && + timeout != XN_INFINITE && timeout != XN_NONBLOCK) { + timeout_mode = XN_ABSOLUTE; + timeout += xnclock_read_monotonic(&nkclock); + } + + for (;;) { + object = registry_hash_find(key); + if (object) { + *phandle = object - registry_obj_slots; + goto unlock_and_exit; + } + + if ((timeout_mode == XN_RELATIVE && timeout == XN_NONBLOCK) || + xnsched_unblockable_p()) { + ret = -EWOULDBLOCK; + goto unlock_and_exit; + } + + rwc.key = key; + xnthread_prepare_wait(&rwc.wc); + info = xnsynch_sleep_on(®ister_synch, timeout, timeout_mode); + if (info & XNTIMEO) { + ret = -ETIMEDOUT; + goto unlock_and_exit; + } + if (info & XNBREAK) { + ret = -EINTR; + goto unlock_and_exit; + } + } + +unlock_and_exit: + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnregistry_bind); + +/** + * @fn int xnregistry_remove(xnhandle_t handle) + * @brief Forcibly unregister a real-time object. + * + * This service forcibly removes an object from the registry. The + * removal is performed regardless of the current object's locking + * status. + * + * @param handle The generic handle of the object to remove. + * + * @return 0 is returned upon success. Otherwise: + * + * - -ESRCH is returned if @a handle does not reference a registered + * object. + * + * @coretags{unrestricted} + */ +int xnregistry_remove(xnhandle_t handle) +{ + struct xnobject *object; + void *objaddr; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + object = xnregistry_validate(handle); + if (object == NULL) { + ret = -ESRCH; + goto unlock_and_exit; + } + + trace_cobalt_registry_remove(object->key, object->objaddr); + + objaddr = object->objaddr; + object->objaddr = NULL; + object->cstamp = 0; + + if (object->key) { + registry_hash_remove(object); + +#ifdef CONFIG_XENO_OPT_VFILE + if (object->pnode) { + if (object->vfilp == XNOBJECT_EXPORT_INPROGRESS) { + object->vfilp = XNOBJECT_EXPORT_ABORTED; + object->objaddr = objaddr; + } + + registry_unexport_pnode(object); + /* + * Leave the update of the object queues to + * the work callback if it has been kicked. + */ + if (object->pnode) { + xnlock_put_irqrestore(&nklock, s); + if (is_secondary_domain()) + flush_work(®istry_proc_work); + return 0; + } + } +#endif /* CONFIG_XENO_OPT_VFILE */ + + list_del(&object->link); + } + + if (!IS_ENABLED(CONFIG_XENO_OPT_VFILE) || !object->objaddr) { + list_add_tail(&object->link, &free_object_list); + nr_active_objects--; + } + +unlock_and_exit: + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnregistry_remove); + +/** + * Turn a named object into an anonymous object + * + * @coretags{unrestricted} + */ +int xnregistry_unlink(const char *key) +{ + struct xnobject *object; + int ret = 0; + spl_t s; + + if (key == NULL) + return -EINVAL; + + xnlock_get_irqsave(&nklock, s); + + object = registry_hash_find(key); + if (object == NULL) { + ret = -ESRCH; + goto unlock_and_exit; + } + + trace_cobalt_registry_unlink(object->key, object->objaddr); + + ret = registry_hash_remove(object); + if (ret < 0) + goto unlock_and_exit; + +#ifdef CONFIG_XENO_OPT_VFILE + if (object->pnode) { + registry_unexport_pnode(object); + /* + * Leave the update of the object queues to + * the work callback if it has been kicked. + */ + if (object->pnode) + goto unlock_and_exit; + } +#endif /* CONFIG_XENO_OPT_VFILE */ + + list_del(&object->link); + + object->key = NULL; + +unlock_and_exit: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +/** + * @fn void *xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r) + * @brief Find a real-time object into the registry. + * + * This service retrieves an object from its handle into the registry + * and returns the memory address of its descriptor. Optionally, it + * also copies back the object's creation stamp which is unique across + * object registration calls. + * + * @param handle The generic handle of the object to fetch. + * + * @param cstamp_r If not-NULL, the object's creation stamp will be + * copied to this memory area. + * + * @return The memory address of the object's descriptor is returned + * on success. Otherwise, NULL is returned if @a handle does not + * reference a registered object. + * + * @coretags{unrestricted} + */ + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES new file mode 100644 index 0000000..978799f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES @@ -0,0 +1,53 @@ +Scheduled modifications (unsorted): + o Packet pool management (generalised variant of RTnet's and RT-Firerwire's + buffer pools). + o Threaded IRQ handlers. + o Support for deferring IRQ line re-enabling from handler to thread context. + +Revision 9: + +See http://xenomai.org/migrating-from-xenomai-2-x-to-3-x/#RTDM_interface_changes. + +Revision 8: + o Added rtdm_rt_capable. + o Added rtdm_context_put as logic match to rtdm_context_get + +Revision 7: + o Added callbacks and services to enable select support. + +Revision 6: + o Added profile_version field to rtdm_device. + o Requested IRQ lines are now enabled on return of rtdm_irq_request. + o Converted request argument in IOCTL handler to unsigned int to fix issues + on 64-bit architectures. + o Added custom argument to rtdm_nrtsig handler. + o Introduced Timer API. + o Introduced monotonic time base: + - obtainable via rtdm_clock_read_monotonic + - usable via new rtdm_task_sleep_abs or the timer API + o Deprecated rtdm_task_sleep_until, users shall migrate to + rtdm_task_sleep_abs(..., RTDM_TIMERMODE_REALTIME). + +Revision 5: + o Introduced generic time types nanosecs_abs_t and nanosecs_rel_t. + o Switched the following functions parameters from unsigned to signed + (uint64_t -> nanosecs_rel_t) and adopted their semantics: + - period in rtdm_task_init, period 0 means non-periodic + - period in rtdm_task_set_period, period 0 means non-periodic + - delay in rtdm_task_sleep, now clearly specified: delay = 0 means + infinite delay, delay < 0 means no delay at all + - delay in rtdm_task_busy_sleep, same semantics as before (delay <= 0 + means no delay) + o Added rtdm_safe_copy_to/from_user. + o Added rtdm_iomap_to_user. + +Revision 4: + o Dropped RTDM_IRQ_PROPAGATE ISR return flag. Generic deterministic RTDM + drivers should not interact with standard Linux in this way. + o Merged RTDM_IRQ_ENABLE into RTDM_IRQ_HANDLED return code. An explicit + request to leave the IRQ line disabled upon return from ISR will be + provided in later versions via rtdm_irq_disable. + o Added RTDM_IRQTYPE_SHARED and RTDM_IRQTYPE_EDGE flags which indicate + specific handling sharable level- and edge-triggered IRQs + o Added rtdm_mmap_to_user and rtdm_munmap. Intended usage is the mapping of + driver memory like DMA buffers into the address range of a user task. diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING new file mode 100644 index 0000000..66b9f24 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING @@ -0,0 +1,281 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place - Suite 330, Boston, MA + 02111-1307, USA. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile new file mode 100644 index 0000000..4f5a6ca --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile @@ -0,0 +1,10 @@ + +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := core.o \ + device.o \ + drvlib.o \ + fd.o \ + wrappers.o + +ccflags-y += -I$(srctree)/$(src)/.. -I$(srctree)/kernel diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c new file mode 100644 index 0000000..dcced04 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c @@ -0,0 +1,1373 @@ +/* + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/workqueue.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/fdtable.h> +#include <linux/anon_inodes.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/heap.h> +#include "rtdm/internal.h" +#define CREATE_TRACE_POINTS +#include <trace/events/cobalt-rtdm.h> +#include "posix/process.h" + +/** + * @ingroup rtdm + * @defgroup rtdm_driver_interface Driver programming interface + * RTDM driver programming interface + * @{ + */ + +static void cleanup_instance(struct rtdm_device *dev, + struct rtdm_dev_context *context) +{ + if (context) + kfree(context); + + __rtdm_put_device(dev); +} + +void __rtdm_dev_close(struct rtdm_fd *fd) +{ + struct rtdm_dev_context *context = rtdm_fd_to_context(fd); + struct rtdm_device *dev = context->device; + struct rtdm_driver *drv = dev->driver; + + if (!fd->stale && drv->ops.close) + drv->ops.close(fd); + + cleanup_instance(dev, context); +} + +int __rtdm_anon_getfd(const char *name, int flags) +{ + return anon_inode_getfd(name, &rtdm_dumb_fops, NULL, flags); +} + +void __rtdm_anon_putfd(int ufd) +{ + close_fd(ufd); +} + +static int create_instance(int ufd, struct rtdm_device *dev, + struct rtdm_dev_context **context_ptr) +{ + struct rtdm_driver *drv = dev->driver; + struct rtdm_dev_context *context; + + /* + * Reset to NULL so that we can always use cleanup_files/instance to + * revert also partially successful allocations. + */ + *context_ptr = NULL; + + if ((drv->device_flags & RTDM_EXCLUSIVE) != 0 && + atomic_read(&dev->refcount) > 1) + return -EBUSY; + + context = kzalloc(sizeof(struct rtdm_dev_context) + + drv->context_size, GFP_KERNEL); + if (unlikely(context == NULL)) + return -ENOMEM; + + context->device = dev; + *context_ptr = context; + + return rtdm_fd_enter(&context->fd, ufd, RTDM_FD_MAGIC, &dev->ops); +} + +#ifdef CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE + +static inline struct file * +open_devnode(struct rtdm_device *dev, const char *path, int oflag) +{ + struct file *filp; + char *filename; + + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY) && + strncmp(path, "/dev/rtdm/", 10)) + printk(XENO_WARNING + "%s[%d] opens obsolete device path: %s\n", + current->comm, task_pid_nr(current), path); + + filename = kasprintf(GFP_KERNEL, "/dev/rtdm/%s", dev->name); + if (filename == NULL) + return ERR_PTR(-ENOMEM); + + filp = filp_open(filename, oflag, 0); + kfree(filename); + + return filp; +} + +#else /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */ + +static inline struct file * +open_devnode(struct rtdm_device *dev, const char *path, int oflag) +{ + return filp_open(path, oflag, 0); +} + +#endif /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */ + +int __rtdm_dev_open(const char *path, int oflag) +{ + struct rtdm_dev_context *context; + struct rtdm_device *dev; + struct file *filp; + int ufd, ret; + + secondary_mode_only(); + + /* + * CAUTION: we do want a lookup into the registry to happen + * before any attempt is made to open the devnode, so that we + * don't inadvertently open a regular (i.e. non-RTDM) device. + * Reason is that opening, then closing a device - because we + * don't manage it - may incur side-effects we don't want, + * e.g. opening then closing one end of a pipe would cause the + * other side to read the EOF condition. This is basically + * why we keep a RTDM registry for named devices, so that we + * can figure out whether an open() request is going to be + * valid, without having to open the devnode yet. + */ + dev = __rtdm_get_namedev(path); + if (dev == NULL) + return -EADV; + + ufd = get_unused_fd_flags(oflag); + if (ufd < 0) { + ret = ufd; + goto fail_fd; + } + + filp = open_devnode(dev, path, oflag); + if (IS_ERR(filp)) { + ret = PTR_ERR(filp); + goto fail_fopen; + } + + ret = create_instance(ufd, dev, &context); + if (ret < 0) + goto fail_create; + + context->fd.minor = dev->minor; + context->fd.oflags = oflag; + + trace_cobalt_fd_open(current, &context->fd, ufd, oflag); + + if (dev->ops.open) { + ret = dev->ops.open(&context->fd, oflag); + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + if (ret < 0) + goto fail_open; + } + + ret = rtdm_device_new_fd(&context->fd, ufd, context->device); + if (ret < 0) + goto fail_open; + + fd_install(ufd, filp); + + return ufd; + +fail_open: + cleanup_instance(dev, context); +fail_create: + filp_close(filp, current->files); +fail_fopen: + put_unused_fd(ufd); +fail_fd: + __rtdm_put_device(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__rtdm_dev_open); + +int __rtdm_dev_socket(int protocol_family, int socket_type, + int protocol) +{ + struct rtdm_dev_context *context; + struct rtdm_device *dev; + int ufd, ret; + + secondary_mode_only(); + + dev = __rtdm_get_protodev(protocol_family, socket_type); + if (dev == NULL) + return -EAFNOSUPPORT; + + ufd = __rtdm_anon_getfd("[rtdm-socket]", O_RDWR); + if (ufd < 0) { + ret = ufd; + goto fail_getfd; + } + + ret = create_instance(ufd, dev, &context); + if (ret < 0) + goto fail_create; + + trace_cobalt_fd_socket(current, &context->fd, ufd, protocol_family); + + if (dev->ops.socket) { + ret = dev->ops.socket(&context->fd, protocol); + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + if (ret < 0) + goto fail_socket; + } + + ret = rtdm_device_new_fd(&context->fd, ufd, context->device); + if (ret < 0) + goto fail_socket; + + return ufd; + +fail_socket: + cleanup_instance(dev, context); +fail_create: + close_fd(ufd); +fail_getfd: + __rtdm_put_device(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__rtdm_dev_socket); + +int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, unsigned int request, + void __user *arg) +{ + struct rtdm_device *dev = rtdm_fd_device(fd); + struct rtdm_driver *drv; + struct rtdm_device_info dev_info; + + if (fd->magic != RTDM_FD_MAGIC || request != RTIOC_DEVICE_INFO) + return -EADV; + + drv = dev->driver; + dev_info.device_flags = drv->device_flags; + dev_info.device_class = drv->profile_info.class_id; + dev_info.device_sub_class = drv->profile_info.subclass_id; + dev_info.profile_version = drv->profile_info.version; + + return rtdm_safe_copy_to_user(fd, arg, &dev_info, sizeof(dev_info)); +} + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @addtogroup rtdm_sync + *@{ + */ + +/** + * @fn void rtdm_waitqueue_init(struct rtdm_waitqueue *wq) + * @brief Initialize a RTDM wait queue + * + * Sets up a wait queue structure for further use. + * + * @param wq waitqueue to initialize. + * + * @coretags{task-unrestricted} + */ +void rtdm_waitqueue_init(struct rtdm_waitqueue *wq); + +/** + * @fn void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq) + * @brief Deletes a RTDM wait queue + * + * Dismantles a wait queue structure, releasing all resources attached + * to it. + * + * @param wq waitqueue to delete. + * + * @coretags{task-unrestricted} + */ +void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq); + +/** + * @fn rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a locked waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true or a timeout occurs. The condition is checked each time the + * waitqueue @a wq is signaled. + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @param condition C expression for the event to wait for. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition) + * @brief Sleep on a locked waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true. The condition is checked each time the waitqueue @a wq is + * signaled. + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @param condition C expression for the event to wait for. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @coretags{primary-only, might-switch} + */ +rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition); + +/** + * @fn rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true or a timeout occurs. The condition is checked each time the + * waitqueue @a wq is signaled. + * + * @param wq waitqueue to wait on. + * + * @param condition C expression for the event to wait for. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn void rtdm_timedwait(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs. + * + * @param wq waitqueue to wait on. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +void rtdm_timedwait(struct rtdm_wait_queue *wq, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a locked waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs. + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition) + * @brief Sleep on a waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true. The condition is checked each time the waitqueue @a wq is + * signaled. + * + * @param wq waitqueue to wait on + * + * @param condition C expression for the event to wait for. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @coretags{primary-only, might-switch} + */ +rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition); + +/** + * @fn void rtdm_wait(struct rtdm_wait_queue *wq) + * @brief Sleep on a waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(). + * + * @param wq waitqueue to wait on. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * @coretags{primary-only, might-switch} + */ +void rtdm_wait(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_wait_locked(struct rtdm_wait_queue *wq) + * @brief Sleep on a locked waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(). + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * @coretags{primary-only, might-switch} + */ +void rtdm_wait_locked(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context) + * @brief Lock a waitqueue + * + * Acquires the lock on the waitqueue @a wq. + * + * @param wq waitqueue to lock. + * + * @param context name of local variable to store the context in. + * + * @note Recursive locking might lead to unexpected behavior, + * including lock up. + * + * @coretags{unrestricted} + */ +void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context); + +/** + * @fn void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context) + * @brief Unlock a waitqueue + * + * Releases the lock on the waitqueue @a wq. + * + * @param wq waitqueue to unlock. + * + * @param context name of local variable to retrieve the context from. + * + * @coretags{unrestricted} + */ +void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context); + +/** + * @fn void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq) + * @brief Signal a waitqueue + * + * Signals the waitqueue @a wq, waking up a single waiter (if + * any). + * + * @param wq waitqueue to signal. + * + * @return non-zero if a task has been readied as a result of this + * call, zero otherwise. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq) + * @brief Broadcast a waitqueue + * + * Broadcast the waitqueue @a wq, waking up all waiters. Each + * readied task may assume to have received the wake up event. + * + * @param wq waitqueue to broadcast. + * + * @return non-zero if at least one task has been readied as a result + * of this call, zero otherwise. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq) + * @brief Flush a waitqueue + * + * Flushes the waitqueue @a wq, unblocking all waiters with an error + * status (-EINTR). + * + * @param wq waitqueue to flush. + * + * @return non-zero if at least one task has been readied as a result + * of this call, zero otherwise. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter) + * @brief Signal a particular waiter on a waitqueue + * + * Signals the waitqueue @a wq, waking up waiter @a waiter only, + * which must be currently sleeping on the waitqueue. + * + * @param wq waitqueue to signal. + * + * @param waiter RTDM task to wake up. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter); + +/** + * @fn rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq) + * @brief Simple iterator for waitqueues + * + * This construct traverses the wait list of a given waitqueue + * @a wq, assigning each RTDM task pointer to the cursor variable + * @a pos, which must be of type rtdm_task_t. + * + * @a wq must have been locked by a call to rtdm_waitqueue_lock() + * prior to traversing its wait list. + * + * @param pos cursor variable holding a pointer to the RTDM task + * being fetched. + * + * @param wq waitqueue to scan. + * + * @note The waitqueue should not be signaled, broadcast or flushed + * during the traversal, unless the loop is aborted immediately + * after. Should multiple waiters be readied while iterating, the safe + * form rtdm_for_each_waiter_safe() must be used for traversal + * instead. + * + * @coretags{unrestricted} + */ +rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq); + +/** + * @fn rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq) + * @brief Safe iterator for waitqueues + * + * This construct traverses the wait list of a given waitqueue + * @a wq, assigning each RTDM task pointer to the cursor variable + * @a pos, which must be of type rtdm_task_t. + * + * Unlike with rtdm_for_each_waiter(), the waitqueue may be signaled, + * broadcast or flushed during the traversal. + * + * @a wq must have been locked by a call to rtdm_waitqueue_lock() + * prior to traversing its wait list. + * + * @param pos cursor variable holding a pointer to the RTDM task + * being fetched. + * + * @param tmp temporary cursor variable. + * + * @param wq waitqueue to scan. + * + * @coretags{unrestricted} + */ +rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq); + +/** @} rtdm_sync */ + +/** + * @defgroup rtdm_interdriver_api Driver to driver services + * Inter-driver interface + *@{ + */ + +/** + * @brief Open a device + * + * Refer to rtdm_open() for parameters and return values + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_open(const char *path, int oflag, ...); + +/** + * @brief Create a socket + * + * Refer to rtdm_socket() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_socket(int protocol_family, int socket_type, int protocol); + +/** + * @brief Close a device or socket + * + * Refer to rtdm_close() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_close(int fd); + +/** + * @brief Issue an IOCTL + * + * Refer to rtdm_ioctl() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_ioctl(int fd, int request, ...); + +/** + * @brief Read from device + * + * Refer to rtdm_read() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_read(int fd, void *buf, size_t nbyte); + +/** + * @brief Write to device + * + * Refer to rtdm_write() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_write(int fd, const void *buf, size_t nbyte); + +/** + * @brief Receive message from socket + * + * Refer to rtdm_recvmsg() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags); + +/** + * @brief Receive message from socket + * + * Refer to rtdm_recvfrom() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); + +/** + * @brief Receive message from socket + * + * Refer to rtdm_recv() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags); + +/** + * @brief Transmit message to socket + * + * Refer to rtdm_sendmsg() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags); + +/** + * @brief Transmit message to socket + * + * Refer to rtdm_sendto() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags, + const struct sockaddr *to, socklen_t tolen); + +/** + * @brief Transmit message to socket + * + * Refer to rtdm_send() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags); + +/** + * @brief Bind to local address + * + * Refer to rtdm_bind() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen); + +/** + * @brief Connect to remote address + * + * Refer to rtdm_connect() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen); + +/** + * @brief Listen to incoming connection requests + * + * Refer to rtdm_listen() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_listen(int fd, int backlog); + +/** + * @brief Accept a connection request + * + * Refer to rtdm_accept() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen); + +/** + * @brief Shut down parts of a connection + * + * Refer to rtdm_shutdown() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_shutdown(int fd, int how); + +/** + * @brief Get socket option + * + * Refer to rtdm_getsockopt() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockopt(int fd, int level, int optname, void *optval, + socklen_t *optlen); + +/** + * @brief Set socket option + * + * Refer to rtdm_setsockopt() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_setsockopt(int fd, int level, int optname, const void *optval, + socklen_t optlen); + +/** + * @brief Get local socket address + * + * Refer to rtdm_getsockname() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen); + +/** + * @brief Get socket destination address + * + * Refer to rtdm_getpeername() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen); + +/** @} Inter-driver calls */ + +/** @} */ + +/*! + * @addtogroup rtdm_user_api + * @{ + */ + +/** + * @brief Open a device + * + * @param[in] path Device name + * @param[in] oflag Open flags + * @param ... Further parameters will be ignored. + * + * @return Positive file descriptor value on success, otherwise a negative + * error code. + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c open() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_open(const char *path, int oflag, ...); + +/** + * @brief Create a socket + * + * @param[in] protocol_family Protocol family (@c PF_xxx) + * @param[in] socket_type Socket type (@c SOCK_xxx) + * @param[in] protocol Protocol ID, 0 for default + * + * @return Positive file descriptor value on success, otherwise a negative + * error code. + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c socket() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_socket(int protocol_family, int socket_type, int protocol); + +/** + * @brief Close a device or socket + * + * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket() + * + * @return 0 on success, otherwise a negative error code. + * + * @note If the matching rtdm_open() or rtdm_socket() call took place in + * non-real-time context, rtdm_close() must be issued within non-real-time + * as well. Otherwise, the call will fail. + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c close() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_close(int fd); + +/** + * @brief Issue an IOCTL + * + * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket() + * @param[in] request IOCTL code + * @param ... Optional third argument, depending on IOCTL function + * (@c void @c * or @c unsigned @c long) + * + * @return Positiv value on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c ioctl() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_ioctl(int fd, int request, ...); + +/** + * @brief Read from device + * + * @param[in] fd File descriptor as returned by rtdm_open() + * @param[out] buf Input buffer + * @param[in] nbyte Number of bytes to read + * + * @return Number of bytes read, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c read() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_read(int fd, void *buf, size_t nbyte); + +/** + * @brief Write to device + * + * @param[in] fd File descriptor as returned by rtdm_open() + * @param[in] buf Output buffer + * @param[in] nbyte Number of bytes to write + * + * @return Number of bytes written, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c write() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_write(int fd, const void *buf, size_t nbyte); + +/** + * @brief Receive message from socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in,out] msg Message descriptor + * @param[in] flags Message flags + * + * @return Number of bytes received, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c recvmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags); + +/** + * @brief Receive message from socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * @param[out] from Buffer for message sender address + * @param[in,out] fromlen Address buffer size + * + * @return Number of bytes received, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c recvfrom() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); + +/** + * @brief Receive message from socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * + * @return Number of bytes received, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c recv() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags); + +/** + * @brief Transmit message to socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] msg Message descriptor + * @param[in] flags Message flags + * + * @return Number of bytes sent, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c sendmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags); + +/** + * @brief Transmit message to socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * @param[in] to Buffer for message destination address + * @param[in] tolen Address buffer size + * + * @return Number of bytes sent, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c sendto() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags, + const struct sockaddr *to, socklen_t tolen); + +/** + * @brief Transmit message to socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * + * @return Number of bytes sent, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c send() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags); + +/** + * @brief Bind to local address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] my_addr Address buffer + * @param[in] addrlen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c bind() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen); + +/** + * @brief Connect to remote address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] serv_addr Address buffer + * @param[in] addrlen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c connect() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_connect(int fd, const struct sockaddr *serv_addr, + socklen_t addrlen); + +/** + * @brief Listen for incomming connection requests + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] backlog Maximum queue length + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c listen() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_listen(int fd, int backlog); + +/** + * @brief Accept connection requests + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] addr Buffer for remote address + * @param[in,out] addrlen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c accept() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen); + +/** + * @brief Shut down parts of a connection + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] how Specifies the part to be shut down (@c SHUT_xxx) +* + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c shutdown() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_shutdown(int fd, int how); + +/** + * @brief Get socket option + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] level Addressed stack level + * @param[in] optname Option name ID + * @param[out] optval Value buffer + * @param[in,out] optlen Value buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c getsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockopt(int fd, int level, int optname, void *optval, + socklen_t *optlen); + +/** + * @brief Set socket option + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] level Addressed stack level + * @param[in] optname Option name ID + * @param[in] optval Value buffer + * @param[in] optlen Value buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c setsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_setsockopt(int fd, int level, int optname, const void *optval, + socklen_t optlen); + +/** + * @brief Get local socket address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] name Address buffer + * @param[in,out] namelen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c getsockname() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen); + +/** + * @brief Get socket destination address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] name Address buffer + * @param[in,out] namelen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c getpeername() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen); + +#endif /* DOXYGEN_CPP */ + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c new file mode 100644 index 0000000..1215515 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c @@ -0,0 +1,651 @@ +/* + * Real-Time Driver Model for Xenomai, device management + * + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/notifier.h> +#include "rtdm/internal.h" +#include <cobalt/kernel/init.h> +#include <trace/events/cobalt-rtdm.h> + +/** + * @ingroup rtdm + * @defgroup rtdm_profiles Device Profiles + * + * Pre-defined classes of real-time devices + * + * Device profiles define which operation handlers a driver of a + * certain class of devices has to implement, which name or protocol + * it has to register, which IOCTLs it has to provide, and further + * details. Sub-classes can be defined in order to extend a device + * profile with more hardware-specific functions. + */ + +/** + * @addtogroup rtdm_driver_interface + * @{ + */ + +#define RTDM_DEVICE_MAGIC 0x82846877 + +static struct rb_root protocol_devices; + +static DEFINE_MUTEX(register_lock); +static DECLARE_BITMAP(protocol_devices_minor_map, RTDM_MAX_MINOR); + +static struct class *rtdm_class; + +static int enosys(void) +{ + return -ENOSYS; +} + +void __rtdm_put_device(struct rtdm_device *dev) +{ + secondary_mode_only(); + + if (atomic_dec_and_test(&dev->refcount)) + wake_up(&dev->putwq); +} + +static inline xnkey_t get_proto_id(int pf, int type) +{ + xnkey_t llpf = (unsigned int)pf; + return (llpf << 32) | (unsigned int)type; +} + +struct rtdm_device *__rtdm_get_namedev(const char *path) +{ + struct rtdm_device *dev; + xnhandle_t handle; + int ret; + + secondary_mode_only(); + + /* skip common /dev prefix */ + if (strncmp(path, "/dev/", 5) == 0) + path += 5; + + /* skip RTDM devnode root */ + if (strncmp(path, "rtdm/", 5) == 0) + path += 5; + + ret = xnregistry_bind(path, XN_NONBLOCK, XN_RELATIVE, &handle); + if (ret) + return NULL; + + mutex_lock(®ister_lock); + + dev = xnregistry_lookup(handle, NULL); + if (dev && dev->magic == RTDM_DEVICE_MAGIC) + __rtdm_get_device(dev); + else + dev = NULL; + + mutex_unlock(®ister_lock); + + return dev; +} + +struct rtdm_device *__rtdm_get_protodev(int protocol_family, int socket_type) +{ + struct rtdm_device *dev = NULL; + struct xnid *xnid; + xnkey_t id; + + secondary_mode_only(); + + id = get_proto_id(protocol_family, socket_type); + + mutex_lock(®ister_lock); + + xnid = xnid_fetch(&protocol_devices, id); + if (xnid) { + dev = container_of(xnid, struct rtdm_device, proto.id); + __rtdm_get_device(dev); + } + + mutex_unlock(®ister_lock); + + return dev; +} + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_device_register Device Registration Services + * @{ + */ + +static char *rtdm_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "rtdm/%s", dev_name(dev)); +} + +static ssize_t profile_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + + return sprintf(buf, "%d,%d\n", + dev->driver->profile_info.class_id, + dev->driver->profile_info.subclass_id); +} + +static ssize_t refcount_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + + return sprintf(buf, "%d\n", atomic_read(&dev->refcount)); +} + +#define cat_count(__buf, __str) \ + ({ \ + int __ret = sizeof(__str) - 1; \ + strcat(__buf, __str); \ + __ret; \ + }) + +static ssize_t flags_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + struct rtdm_driver *drv = dev->driver; + + return sprintf(buf, "%#x\n", drv->device_flags); + +} + +static ssize_t type_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + struct rtdm_driver *drv = dev->driver; + int ret; + + if (drv->device_flags & RTDM_NAMED_DEVICE) + ret = cat_count(buf, "named\n"); + else + ret = cat_count(buf, "protocol\n"); + + return ret; + +} + +#ifdef ATTRIBUTE_GROUPS + +static DEVICE_ATTR_RO(profile); +static DEVICE_ATTR_RO(refcount); +static DEVICE_ATTR_RO(flags); +static DEVICE_ATTR_RO(type); + +static struct attribute *rtdm_attrs[] = { + &dev_attr_profile.attr, + &dev_attr_refcount.attr, + &dev_attr_flags.attr, + &dev_attr_type.attr, + NULL, +}; +ATTRIBUTE_GROUPS(rtdm); + +#else /* !ATTRIBUTE_GROUPS */ + +/* + * Cope with legacy sysfs attributes. Scheduled for removal when 3.10 + * is at EOL for us. + */ +static struct device_attribute rtdm_attrs[] = { + DEVICE_ATTR_RO(profile), + DEVICE_ATTR_RO(refcount), + DEVICE_ATTR_RO(flags), + DEVICE_ATTR_RO(type), + __ATTR_NULL +}; + +#define dev_groups dev_attrs +#define rtdm_groups rtdm_attrs + +#endif /* !ATTRIBUTE_GROUPS */ + +static int state_change_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct rtdm_driver *drv; + int ret; + + drv = container_of(nb, struct rtdm_driver, nb_statechange); + + switch (action) { + case COBALT_STATE_WARMUP: + if (drv->smops.start == NULL) + return NOTIFY_DONE; + ret = drv->smops.start(drv); + if (ret) + printk(XENO_WARNING + "failed starting driver %s (%d)\n", + drv->profile_info.name, ret); + break; + case COBALT_STATE_TEARDOWN: + if (drv->smops.stop == NULL) + return NOTIFY_DONE; + ret = drv->smops.stop(drv); + if (ret) + printk(XENO_WARNING + "failed stopping driver %s (%d)\n", + drv->profile_info.name, ret); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static int register_driver(struct rtdm_driver *drv) +{ + dev_t rdev; + int ret; + + if (drv->profile_info.magic == RTDM_CLASS_MAGIC) { + atomic_inc(&drv->refcount); + return 0; + } + + if (drv->profile_info.magic != ~RTDM_CLASS_MAGIC) { + XENO_WARN_ON_ONCE(COBALT, 1); + return -EINVAL; + } + + switch (drv->device_flags & RTDM_DEVICE_TYPE_MASK) { + case RTDM_NAMED_DEVICE: + case RTDM_PROTOCOL_DEVICE: + break; + default: + printk(XENO_WARNING "%s has invalid device type (%#x)\n", + drv->profile_info.name, + drv->device_flags & RTDM_DEVICE_TYPE_MASK); + return -EINVAL; + } + + if (drv->device_count <= 0 || + drv->device_count > RTDM_MAX_MINOR) { + printk(XENO_WARNING "%s has invalid device count (%d)\n", + drv->profile_info.name, drv->device_count); + return -EINVAL; + } + + if ((drv->device_flags & RTDM_NAMED_DEVICE) == 0) + goto done; + + if (drv->base_minor < 0 || + drv->base_minor >= RTDM_MAX_MINOR) { + printk(XENO_WARNING "%s has invalid base minor (%d)\n", + drv->profile_info.name, drv->base_minor); + return -EINVAL; + } + + ret = alloc_chrdev_region(&rdev, drv->base_minor, drv->device_count, + drv->profile_info.name); + if (ret) { + printk(XENO_WARNING "cannot allocate chrdev region %s[%d..%d]\n", + drv->profile_info.name, drv->base_minor, + drv->base_minor + drv->device_count - 1); + return ret; + } + + cdev_init(&drv->named.cdev, &rtdm_dumb_fops); + ret = cdev_add(&drv->named.cdev, rdev, drv->device_count); + if (ret) { + printk(XENO_WARNING "cannot create cdev series for %s\n", + drv->profile_info.name); + goto fail_cdev; + } + + drv->named.major = MAJOR(rdev); + bitmap_zero(drv->minor_map, RTDM_MAX_MINOR); + +done: + atomic_set(&drv->refcount, 1); + drv->nb_statechange.notifier_call = state_change_notifier; + drv->nb_statechange.priority = 0; + cobalt_add_state_chain(&drv->nb_statechange); + drv->profile_info.magic = RTDM_CLASS_MAGIC; + + return 0; + +fail_cdev: + unregister_chrdev_region(rdev, drv->device_count); + + return ret; +} + +static void unregister_driver(struct rtdm_driver *drv) +{ + XENO_BUG_ON(COBALT, drv->profile_info.magic != RTDM_CLASS_MAGIC); + + if (!atomic_dec_and_test(&drv->refcount)) + return; + + cobalt_remove_state_chain(&drv->nb_statechange); + + drv->profile_info.magic = ~RTDM_CLASS_MAGIC; + + if (drv->device_flags & RTDM_NAMED_DEVICE) { + cdev_del(&drv->named.cdev); + unregister_chrdev_region(MKDEV(drv->named.major, drv->base_minor), + drv->device_count); + } +} + +/** + * @brief Register a RTDM device + * + * Registers a device in the RTDM namespace. + * + * @param[in] dev Device descriptor. + * + * @return 0 is returned upon success. Otherwise: + * + * - -EINVAL is returned if the descriptor contains invalid + * entries. RTDM_PROFILE_INFO() must appear in the list of + * initializers for the driver properties. + * + * - -EEXIST is returned if the specified device name of protocol ID is + * already in use. + * + * - -ENOMEM is returned if a memory allocation failed in the process + * of registering the device. + * + * - -EAGAIN is returned if no registry slot is available (check/raise + * CONFIG_XENO_OPT_REGISTRY_NRSLOTS). + * + * - -ENOSYS is returned if the real-time core is disabled. + * + * - -ENXIO is returned if no valid minor could be assigned + * + * @coretags{secondary-only} + */ +int rtdm_dev_register(struct rtdm_device *dev) +{ + struct class *kdev_class = rtdm_class; + struct device *kdev = NULL; + struct rtdm_driver *drv; + int ret, major, minor; + xnkey_t id; + dev_t rdev; + const char *dev_name; + + secondary_mode_only(); + + if (!realtime_core_enabled()) + return -ENOSYS; + + mutex_lock(®ister_lock); + + dev->name = NULL; + drv = dev->driver; + ret = register_driver(drv); + if (ret) { + mutex_unlock(®ister_lock); + return ret; + } + + dev->ops = drv->ops; + if (drv->device_flags & RTDM_NAMED_DEVICE) { + dev->ops.socket = + (typeof(dev->ops.socket))(void (*)(void))enosys; + } else { + dev->ops.open = (typeof(dev->ops.open))(void (*)(void))enosys; + } + + INIT_LIST_HEAD(&dev->openfd_list); + init_waitqueue_head(&dev->putwq); + dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */ + atomic_set(&dev->refcount, 0); + + if (drv->profile_info.kdev_class) + kdev_class = drv->profile_info.kdev_class; + + if (drv->device_flags & RTDM_NAMED_DEVICE) { + if (drv->device_flags & RTDM_FIXED_MINOR) { + minor = dev->minor; + if (minor < 0 || + minor >= drv->base_minor + drv->device_count) { + ret = -ENXIO; + goto fail; + } + } else { + minor = find_first_zero_bit(drv->minor_map, RTDM_MAX_MINOR); + if (minor >= RTDM_MAX_MINOR) { + ret = -ENXIO; + goto fail; + } + dev->minor = minor; + } + + major = drv->named.major; + dev->name = kasformat(dev->label, minor); + if (dev->name == NULL) { + ret = -ENOMEM; + goto fail; + } + if (dev->name[0] == '/') { + dev_name = dev->name+1; + } else { + dev_name = dev->name; + } + ret = xnregistry_enter(dev_name, dev, + &dev->named.handle, NULL); + if (ret) + goto fail; + + rdev = MKDEV(major, minor); + kdev = device_create(kdev_class, NULL, rdev, + dev, kbasename(dev->label), minor); + if (IS_ERR(kdev)) { + xnregistry_remove(dev->named.handle); + ret = PTR_ERR(kdev); + goto fail2; + } + __set_bit(minor, drv->minor_map); + } else { + minor = find_first_zero_bit(protocol_devices_minor_map, + RTDM_MAX_MINOR); + if (minor >= RTDM_MAX_MINOR) { + ret = -ENXIO; + goto fail; + } + dev->minor = minor; + + dev->name = kstrdup(dev->label, GFP_KERNEL); + if (dev->name == NULL) { + ret = -ENOMEM; + goto fail; + } + + rdev = MKDEV(0, minor); + kdev = device_create(kdev_class, NULL, rdev, + dev, dev->name); + if (IS_ERR(kdev)) { + ret = PTR_ERR(kdev); + goto fail2; + } + + id = get_proto_id(drv->protocol_family, drv->socket_type); + ret = xnid_enter(&protocol_devices, &dev->proto.id, id); + if (ret < 0) + goto fail; + __set_bit(minor, protocol_devices_minor_map); + } + + dev->rdev = rdev; + dev->kdev = kdev; + dev->magic = RTDM_DEVICE_MAGIC; + dev->kdev_class = kdev_class; + + mutex_unlock(®ister_lock); + + trace_cobalt_device_register(dev); + + return 0; +fail: + if (kdev) + device_destroy(kdev_class, rdev); +fail2: + unregister_driver(drv); + + mutex_unlock(®ister_lock); + + if (dev->name) + kfree(dev->name); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_dev_register); + +/** + * @brief Unregister a RTDM device + * + * Removes the device from the RTDM namespace. This routine first + * attempts to teardown all active connections to the @a device prior + * to unregistering. + * + * @param[in] dev Device descriptor. + * + * @coretags{secondary-only} + */ +void rtdm_dev_unregister(struct rtdm_device *dev) +{ + struct rtdm_driver *drv = dev->driver; + + secondary_mode_only(); + + trace_cobalt_device_unregister(dev); + + /* Lock out any further connection. */ + dev->magic = ~RTDM_DEVICE_MAGIC; + + /* Flush all fds from this device. */ + rtdm_device_flush_fds(dev); + + /* Then wait for the ongoing connections to finish. */ + wait_event(dev->putwq, + atomic_read(&dev->refcount) == 0); + + mutex_lock(®ister_lock); + + if (drv->device_flags & RTDM_NAMED_DEVICE) { + xnregistry_remove(dev->named.handle); + __clear_bit(dev->minor, drv->minor_map); + } else { + xnid_remove(&protocol_devices, &dev->proto.id); + __clear_bit(dev->minor, protocol_devices_minor_map); + } + + device_destroy(dev->kdev_class, dev->rdev); + + unregister_driver(drv); + + mutex_unlock(®ister_lock); + + kfree(dev->name); +} +EXPORT_SYMBOL_GPL(rtdm_dev_unregister); + +/** + * @brief Set the kernel device class of a RTDM driver. + * + * Set the kernel device class assigned to the RTDM driver. By + * default, RTDM drivers belong to Linux's "rtdm" device class, + * creating a device node hierarchy rooted at /dev/rtdm, and sysfs + * nodes under /sys/class/rtdm. + * + * This call assigns a user-defined kernel device class to the RTDM + * driver, so that its devices are created into a different system + * hierarchy. + * + * rtdm_drv_set_sysclass() is meaningful only before the first device + * which is attached to @a drv is registered by a call to + * rtdm_dev_register(). + * + * @param[in] drv Address of the RTDM driver descriptor. + * + * @param[in] cls Pointer to the kernel device class. NULL is allowed + * to clear a previous setting, switching back to the default "rtdm" + * device class. + * + * @return 0 on success, otherwise: + * + * - -EBUSY is returned if the kernel device class has already been + * set for @a drv, or some device(s) attached to @a drv are currently + * registered. + * + * @coretags{task-unrestricted} + * + * @attention The kernel device class set by this call is not related to + * the RTDM class identification as defined by the @ref rtdm_profiles + * "RTDM profiles" in any way. This is strictly related to the Linux + * kernel device hierarchy. + */ +int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls) +{ + if ((cls && drv->profile_info.kdev_class) || + atomic_read(&drv->refcount)) + return -EBUSY; + + drv->profile_info.kdev_class = cls; + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_drv_set_sysclass); + +/** @} */ + +int __init rtdm_init(void) +{ + xntree_init(&protocol_devices); + + rtdm_class = class_create(THIS_MODULE, "rtdm"); + if (IS_ERR(rtdm_class)) { + printk(XENO_ERR "cannot create RTDM sysfs class\n"); + return PTR_ERR(rtdm_class); + } + rtdm_class->dev_groups = rtdm_groups; + rtdm_class->devnode = rtdm_devnode; + + bitmap_zero(protocol_devices_minor_map, RTDM_MAX_MINOR); + + return 0; +} + +void rtdm_cleanup(void) +{ + class_destroy(rtdm_class); + /* + * NOTE: no need to flush the cleanup_queue as no device is + * allowed to unregister as long as there are references. + */ +} + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c new file mode 100644 index 0000000..99d54f5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c @@ -0,0 +1,2493 @@ +/* + * Real-Time Driver Model for Xenomai, driver library + * + * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/mman.h> +#include <asm/page.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <linux/highmem.h> +#include <linux/err.h> +#include <linux/anon_inodes.h> +#include <rtdm/driver.h> +#include <rtdm/compat.h> +#include <pipeline/inband_work.h> +#include "internal.h" +#include <trace/events/cobalt-rtdm.h> + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_clock Clock Services + * @{ + */ + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @brief Get system time + * + * @return The system time in nanoseconds is returned + * + * @note The resolution of this service depends on the system timer. In + * particular, if the system timer is running in periodic mode, the return + * value will be limited to multiples of the timer tick period. + * + * @note The system timer may have to be started to obtain valid results. + * Whether this happens automatically (as on Xenomai) or is controlled by the + * application depends on the RTDM host environment. + * + * @coretags{unrestricted} + */ +nanosecs_abs_t rtdm_clock_read(void); + +/** + * @brief Get monotonic time + * + * @return The monotonic time in nanoseconds is returned + * + * @note The resolution of this service depends on the system timer. In + * particular, if the system timer is running in periodic mode, the return + * value will be limited to multiples of the timer tick period. + * + * @note The system timer may have to be started to obtain valid results. + * Whether this happens automatically (as on Xenomai) or is controlled by the + * application depends on the RTDM host environment. + * + * @coretags{unrestricted} + */ +nanosecs_abs_t rtdm_clock_read_monotonic(void); +#endif /* DOXYGEN_CPP */ +/** @} */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_task Task Services + * @{ + */ + +/** + * @brief Initialise and start a real-time task + * + * After initialising a task, the task handle remains valid and can be + * passed to RTDM services until either rtdm_task_destroy() or + * rtdm_task_join() was invoked. + * + * @param[in,out] task Task handle + * @param[in] name Optional task name + * @param[in] task_proc Procedure to be executed by the task + * @param[in] arg Custom argument passed to @c task_proc() on entry + * @param[in] priority Priority of the task, see also + * @ref rtdmtaskprio "Task Priority Range" + * @param[in] period Period in nanoseconds of a cyclic task, 0 for non-cyclic + * mode. Waiting for the first and subsequent periodic events is + * done using rtdm_task_wait_period(). + * + * @return 0 on success, otherwise negative error code + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_task_init(rtdm_task_t *task, const char *name, + rtdm_task_proc_t task_proc, void *arg, + int priority, nanosecs_rel_t period) +{ + union xnsched_policy_param param; + struct xnthread_start_attr sattr; + struct xnthread_init_attr iattr; + int err; + + if (!realtime_core_enabled()) + return -ENOSYS; + + iattr.name = name; + iattr.flags = 0; + iattr.personality = &xenomai_personality; + iattr.affinity = CPU_MASK_ALL; + param.rt.prio = priority; + + err = xnthread_init(task, &iattr, &xnsched_class_rt, ¶m); + if (err) + return err; + + /* We need an anonymous registry entry to obtain a handle for fast + mutex locking. */ + err = xnthread_register(task, ""); + if (err) + goto cleanup_out; + + if (period > 0) { + err = xnthread_set_periodic(task, XN_INFINITE, + XN_RELATIVE, period); + if (err) + goto cleanup_out; + } + + sattr.mode = 0; + sattr.entry = task_proc; + sattr.cookie = arg; + err = xnthread_start(task, &sattr); + if (err) + goto cleanup_out; + + return 0; + + cleanup_out: + xnthread_cancel(task); + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_task_init); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ +/** + * @brief Destroy a real-time task + * + * This call sends a termination request to @a task, then waits for it + * to exit. All RTDM task should check for pending termination + * requests by calling rtdm_task_should_stop() from their work loop. + * + * If @a task is current, rtdm_task_destroy() terminates the current + * context, and does not return to the caller. + * + * @param[in,out] task Task handle as returned by rtdm_task_init() + * + * @note Passing the same task handle to RTDM services after the completion of + * this function is not allowed. + * + * @coretags{secondary-only, might-switch} + */ +void rtdm_task_destroy(rtdm_task_t *task); + +/** + * @brief Check for pending termination request + * + * Check whether a termination request was received by the current + * RTDM task. Termination requests are sent by calling + * rtdm_task_destroy(). + * + * @return Non-zero indicates that a termination request is pending, + * in which case the caller should wrap up and exit. + * + * @coretags{rtdm-task, might-switch} + */ +int rtdm_task_should_stop(void); + +/** + * @brief Adjust real-time task priority + * + * @param[in,out] task Task handle as returned by rtdm_task_init() + * @param[in] priority New priority of the task, see also + * @ref rtdmtaskprio "Task Priority Range" + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_task_set_priority(rtdm_task_t *task, int priority); + +/** + * @brief Adjust real-time task period + * + * @param[in,out] task Task handle as returned by rtdm_task_init(), or + * NULL for referring to the current RTDM task or Cobalt thread. + * + * @param[in] start_date The initial (absolute) date of the first + * release point, expressed in nanoseconds. @a task will be delayed + * by the first call to rtdm_task_wait_period() until this point is + * reached. If @a start_date is zero, the first release point is set + * to @a period nanoseconds after the current date. + + * @param[in] period New period in nanoseconds of a cyclic task, zero + * to disable cyclic mode for @a task. + * + * @coretags{task-unrestricted} + */ +int rtdm_task_set_period(rtdm_task_t *task, nanosecs_abs_t start_date, + nanosecs_rel_t period); + +/** + * @brief Wait on next real-time task period + * + * @param[in] overruns_r Address of a long word receiving the count of + * overruns if -ETIMEDOUT is returned, or NULL if the caller don't + * need that information. + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if calling task is not in periodic mode. + * + * - -ETIMEDOUT is returned if a timer overrun occurred, which indicates + * that a previous release point has been missed by the calling task. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_wait_period(unsigned long *overruns_r); + +/** + * @brief Activate a blocked real-time task + * + * @return Non-zero is returned if the task was actually unblocked from a + * pending wait state, 0 otherwise. + * + * @coretags{unrestricted, might-switch} + */ +int rtdm_task_unblock(rtdm_task_t *task); + +/** + * @brief Get current real-time task + * + * @return Pointer to task handle + * + * @coretags{mode-unrestricted} + */ +rtdm_task_t *rtdm_task_current(void); + +/** + * @brief Sleep a specified amount of time + * + * @param[in] delay Delay in nanoseconds, see @ref RTDM_TIMEOUT_xxx for + * special values. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_sleep(nanosecs_rel_t delay); + +/** + * @brief Sleep until a specified absolute time + * + * @deprecated Use rtdm_task_sleep_abs instead! + * + * @param[in] wakeup_time Absolute timeout in nanoseconds + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_sleep_until(nanosecs_abs_t wakeup_time); + +/** + * @brief Sleep until a specified absolute time + * + * @param[in] wakeup_time Absolute timeout in nanoseconds + * @param[in] mode Selects the timer mode, see RTDM_TIMERMODE_xxx for details + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * - -EINVAL is returned if an invalid parameter was passed. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum rtdm_timer_mode mode); + +/** + * @brief Safe busy waiting + * + * This service alternates active spinning and sleeping within a wait + * loop, until a condition is satisfied. While sleeping, a task is + * scheduled out and does not consume any CPU time. + * + * rtdm_task_busy_wait() is particularly useful for waiting for a + * state change reading an I/O register, which usually happens shortly + * after the wait starts, without incurring the adverse effects of + * long busy waiting if it doesn't. + * + * @param[in] condition The C expression to be tested for detecting + * completion. + * @param[in] spin_ns The time to spin on @a condition before + * sleeping, expressed as a count of nanoseconds. + * @param[in] sleep_ns The time to sleep for before spinning again, + * expressed as a count of nanoseconds. + * + * @return 0 on success if @a condition is satisfied, otherwise: + * + * - -EINTR is returned if the calling task has been unblocked by a + * Linux signal or explicitly via rtdm_task_unblock(). + * + * - -EPERM may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_busy_wait(bool condition, nanosecs_rel_t spin_ns, + nanosecs_rel_t sleep_ns); + +/** + * @brief Register wait context + * + * rtdm_wait_prepare() registers a wait context structure for the + * caller, which can be later retrieved by a call to + * rtdm_wait_get_context(). This call is normally issued before the + * current task blocks on a wait object, waiting for some (producer) + * code to wake it up. Arbitrary data can be exchanged between both + * sites via the wait context structure, which is allocated by the + * waiter (consumer) side. + * + * @a wc is the address of an anchor object which is commonly embedded + * into a larger structure with arbitrary contents, which needs to be + * shared between the consumer (waiter) and the producer for + * implementing the wait code. + * + * A typical implementation pattern for the wait side is: + * + * @code + * struct rtdm_waitqueue wq; + * struct some_wait_context { + * int input_value; + * int output_value; + * struct rtdm_wait_context wc; + * } wait_context; + * + * wait_context.input_value = 42; + * rtdm_wait_prepare(&wait_context); + * ret = rtdm_wait_condition(&wq, rtdm_wait_is_completed(&wait_context)); + * if (ret) + * goto wait_failed; + * handle_event(wait_context.output_value); + * @endcode + * + * On the producer side, the implementation would look like: + * + * @code + * struct rtdm_waitqueue wq; + * struct some_wait_context { + * int input_value; + * int output_value; + * struct rtdm_wait_context wc; + * } *wait_context_ptr; + * struct rtdm_wait_context *wc; + * rtdm_task_t *task; + * + * rtdm_for_each_waiter(task, &wq) { + * wc = rtdm_wait_get_context(task); + * wait_context_ptr = container_of(wc, struct some_wait_context, wc); + * wait_context_ptr->output_value = 12; + * } + * rtdm_waitqueue_broadcast(&wq); + * @endcode + * + * @param wc Wait context to register. + */ +void rtdm_wait_prepare(struct rtdm_wait_context *wc); + +/** + * @brief Mark completion for a wait context + * + * rtdm_complete_wait() marks a wait context as completed, so that + * rtdm_wait_is_completed() returns true for such context. + * + * @param wc Wait context to complete. + */ +void rtdm_wait_complete(struct rtdm_wait_context *wc); + +/** + * @brief Test completion of a wait context + * + * rtdm_wait_is_completed() returns true if rtdm_complete_wait() was + * called for @a wc. The completion mark is reset each time + * rtdm_wait_prepare() is called for a wait context. + * + * @param wc Wait context to check for completion. + * + * @return non-zero/true if rtdm_wait_complete() was called for @a wc, + * zero otherwise. + */ +int rtdm_wait_is_completed(struct rtdm_wait_context *wc); + +#endif /* DOXYGEN_CPP */ + +int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode) +{ + struct xnthread *thread; + + if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p())) + return -EPERM; + + thread = xnthread_current(); + xnthread_suspend(thread, XNDELAY, timeout, mode, NULL); + + return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0; +} + +EXPORT_SYMBOL_GPL(__rtdm_task_sleep); + +/** + * @brief Wait on a real-time task to terminate + * + * @param[in,out] task Task handle as returned by rtdm_task_init() + * + * @note Passing the same task handle to RTDM services after the + * completion of this function is not allowed. + * + * @note This service does not trigger the termination of the targeted + * task. The user has to take of this, otherwise rtdm_task_join() + * will never return. + * + * @coretags{mode-unrestricted} + */ +void rtdm_task_join(rtdm_task_t *task) +{ + trace_cobalt_driver_task_join(task); + + xnthread_join(task, true); +} + +EXPORT_SYMBOL_GPL(rtdm_task_join); + +/** + * @brief Busy-wait a specified amount of time + * + * This service does not schedule out the caller, but rather spins in + * a tight loop, burning CPU cycles until the timeout elapses. + * + * @param[in] delay Delay in nanoseconds. Note that a zero delay does @b not + * have the meaning of @c RTDM_TIMEOUT_INFINITE here. + * + * @note The caller must not be migratable to different CPUs while executing + * this service. Otherwise, the actual delay will be undefined. + * + * @coretags{unrestricted} + */ +void rtdm_task_busy_sleep(nanosecs_rel_t delay) +{ + xnticks_t wakeup; + + wakeup = xnclock_read_raw(&nkclock) + + xnclock_ns_to_ticks(&nkclock, delay); + + while ((xnsticks_t)(xnclock_read_raw(&nkclock) - wakeup) < 0) + cpu_relax(); +} + +EXPORT_SYMBOL_GPL(rtdm_task_busy_sleep); +/** @} */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_timer Timer Services + * @{ + */ + +/** + * @brief Initialise a timer + * + * @param[in,out] timer Timer handle + * @param[in] handler Handler to be called on timer expiry + * @param[in] name Optional timer name + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler, + const char *name) +{ + if (!realtime_core_enabled()) + return -ENOSYS; + + xntimer_init((timer), &nkclock, handler, NULL, XNTIMER_IGRAVITY); + xntimer_set_name((timer), (name)); + return 0; +} + +EXPORT_SYMBOL_GPL(rtdm_timer_init); + +/** + * @brief Destroy a timer + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * + * @coretags{task-unrestricted} + */ +void rtdm_timer_destroy(rtdm_timer_t *timer) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_destroy(timer); + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_timer_destroy); + +/** + * @brief Start a timer + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * @param[in] expiry Firing time of the timer, @c mode defines if relative or + * absolute + * @param[in] interval Relative reload value, > 0 if the timer shall work in + * periodic mode with the specific interval, 0 for one-shot timers + * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for + * possible values + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if @c expiry describes an absolute date in + * the past. In such an event, the timer is nevertheless armed for the + * next shot in the timeline if @a interval is non-zero. + * + * @coretags{unrestricted} + */ +int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry, + nanosecs_rel_t interval, enum rtdm_timer_mode mode) +{ + spl_t s; + int err; + + xnlock_get_irqsave(&nklock, s); + err = xntimer_start(timer, expiry, interval, (xntmode_t)mode); + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_timer_start); + +/** + * @brief Stop a timer + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * + * @coretags{unrestricted} + */ +void rtdm_timer_stop(rtdm_timer_t *timer) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_stop(timer); + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_timer_stop); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ +/** + * @brief Start a timer from inside a timer handler + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * @param[in] expiry Firing time of the timer, @c mode defines if relative or + * absolute + * @param[in] interval Relative reload value, > 0 if the timer shall work in + * periodic mode with the specific interval, 0 for one-shot timers + * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for + * possible values + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if @c expiry describes an absolute date in the + * past. + * + * @coretags{coreirq-only} + */ +int rtdm_timer_start_in_handler(rtdm_timer_t *timer, nanosecs_abs_t expiry, + nanosecs_rel_t interval, + enum rtdm_timer_mode mode); + +/** + * @brief Stop a timer from inside a timer handler + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * + * @coretags{coreirq-only} + */ +void rtdm_timer_stop_in_handler(rtdm_timer_t *timer); +#endif /* DOXYGEN_CPP */ +/** @} */ + +/* --- IPC cleanup helper --- */ + +#define RTDM_SYNCH_DELETED XNSYNCH_SPARE0 + +void __rtdm_synch_flush(struct xnsynch *synch, unsigned long reason) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (reason == XNRMID) + xnsynch_set_status(synch, RTDM_SYNCH_DELETED); + + if (likely(xnsynch_flush(synch, reason) == XNSYNCH_RESCHED)) + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(__rtdm_synch_flush); + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_sync Synchronisation Services + * @{ + */ + +/*! + * @name Timeout Sequence Management + * @{ + */ + +/** + * @brief Initialise a timeout sequence + * + * This service initialises a timeout sequence handle according to the given + * timeout value. Timeout sequences allow to maintain a continuous @a timeout + * across multiple calls of blocking synchronisation services. A typical + * application scenario is given below. + * + * @param[in,out] timeout_seq Timeout sequence handle + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * + * Application Scenario: + * @code +int device_service_routine(...) +{ + rtdm_toseq_t timeout_seq; + ... + + rtdm_toseq_init(&timeout_seq, timeout); + ... + while (received < requested) { + ret = rtdm_event_timedwait(&data_available, timeout, &timeout_seq); + if (ret < 0) // including -ETIMEDOUT + break; + + // receive some data + ... + } + ... +} + * @endcode + * Using a timeout sequence in such a scenario avoids that the user-provided + * relative @c timeout is restarted on every call to rtdm_event_timedwait(), + * potentially causing an overall delay that is larger than specified by + * @c timeout. Moreover, all functions supporting timeout sequences also + * interpret special timeout values (infinite and non-blocking), + * disburdening the driver developer from handling them separately. + * + * @coretags{task-unrestricted} + */ +void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout) +{ + XENO_WARN_ON(COBALT, xnsched_unblockable_p()); /* only warn here */ + + *timeout_seq = xnclock_read_monotonic(&nkclock) + timeout; +} + +EXPORT_SYMBOL_GPL(rtdm_toseq_init); + +/** @} */ + +/** + * @ingroup rtdm_sync + * @defgroup rtdm_sync_event Event Services + * @{ + */ + +/** + * @brief Initialise an event + * + * @param[in,out] event Event handle + * @param[in] pending Non-zero if event shall be initialised as set, 0 otherwise + * + * @coretags{task-unrestricted} + */ +void rtdm_event_init(rtdm_event_t *event, unsigned long pending) +{ + spl_t s; + + trace_cobalt_driver_event_init(event, pending); + + /* Make atomic for re-initialisation support */ + xnlock_get_irqsave(&nklock, s); + + xnsynch_init(&event->synch_base, XNSYNCH_PRIO, NULL); + if (pending) + xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING); + xnselect_init(&event->select_block); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_event_init); + +/** + * @brief Destroy an event + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_event_destroy(rtdm_event_t *event) +{ + trace_cobalt_driver_event_destroy(event); + if (realtime_core_enabled()) { + __rtdm_synch_flush(&event->synch_base, XNRMID); + xnselect_destroy(&event->select_block); + } +} +EXPORT_SYMBOL_GPL(rtdm_event_destroy); + +/** + * @brief Signal an event occurrence to currently listening waiters + * + * This function wakes up all current waiters of the given event, but it does + * not change the event state. Subsequently callers of rtdm_event_wait() or + * rtdm_event_timedwait() will therefore be blocked first. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_event_pulse(rtdm_event_t *event) +{ + trace_cobalt_driver_event_pulse(event); + __rtdm_synch_flush(&event->synch_base, 0); +} +EXPORT_SYMBOL_GPL(rtdm_event_pulse); + +/** + * @brief Signal an event occurrence + * + * This function sets the given event and wakes up all current waiters. If no + * waiter is presently registered, the next call to rtdm_event_wait() or + * rtdm_event_timedwait() will return immediately. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_event_signal(rtdm_event_t *event) +{ + int resched = 0; + spl_t s; + + trace_cobalt_driver_event_signal(event); + + xnlock_get_irqsave(&nklock, s); + + xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING); + if (xnsynch_flush(&event->synch_base, 0)) + resched = 1; + if (xnselect_signal(&event->select_block, 1)) + resched = 1; + if (resched) + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_event_signal); + +/** + * @brief Wait on event occurrence + * + * This is the light-weight version of rtdm_event_timedwait(), implying an + * infinite timeout. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a event has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_event_wait(rtdm_event_t *event) +{ + return rtdm_event_timedwait(event, 0, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_event_wait); + +/** + * @brief Wait on event occurrence with timeout + * + * This function waits or tests for the occurence of the given event, taking + * the provided timeout into account. On successful return, the event is + * reset. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * @param[in,out] timeout_seq Handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a event has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * - -EWOULDBLOCK is returned if a negative @a timeout (i.e., non-blocking + * operation) has been specified. + * + * @coretags{primary-timed, might-switch} + */ +int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq) +{ + struct xnthread *thread; + int err = 0, ret; + spl_t s; + + if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p())) + return -EPERM; + + trace_cobalt_driver_event_wait(event, xnthread_current()); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(event->synch_base.status & RTDM_SYNCH_DELETED)) + err = -EIDRM; + else if (likely(event->synch_base.status & RTDM_EVENT_PENDING)) { + xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING); + xnselect_signal(&event->select_block, 0); + } else { + /* non-blocking mode */ + if (timeout < 0) { + err = -EWOULDBLOCK; + goto unlock_out; + } + + thread = xnthread_current(); + + if (timeout_seq && (timeout > 0)) + /* timeout sequence */ + ret = xnsynch_sleep_on(&event->synch_base, *timeout_seq, + XN_ABSOLUTE); + else + /* infinite or relative timeout */ + ret = xnsynch_sleep_on(&event->synch_base, timeout, XN_RELATIVE); + + if (likely(ret == 0)) { + xnsynch_clear_status(&event->synch_base, + RTDM_EVENT_PENDING); + xnselect_signal(&event->select_block, 0); + } else if (ret & XNTIMEO) + err = -ETIMEDOUT; + else if (ret & XNRMID) + err = -EIDRM; + else /* XNBREAK */ + err = -EINTR; + } + +unlock_out: + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_event_timedwait); + +/** + * @brief Clear event state + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{unrestricted} + */ +void rtdm_event_clear(rtdm_event_t *event) +{ + spl_t s; + + trace_cobalt_driver_event_clear(event); + + xnlock_get_irqsave(&nklock, s); + + xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING); + xnselect_signal(&event->select_block, 0); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_event_clear); + +/** + * @brief Bind a selector to an event + * + * This functions binds the given selector to an event so that the former is + * notified when the event state changes. Typically the select binding handler + * will invoke this service. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * @param[in,out] selector Selector as passed to the select binding handler + * @param[in] type Type of the bound event as passed to the select binding handler + * @param[in] fd_index File descriptor index as passed to the select binding + * handler + * + * @return 0 on success, otherwise: + * + * - -ENOMEM is returned if there is insufficient memory to establish the + * dynamic binding. + * + * - -EINVAL is returned if @a type or @a fd_index are invalid. + * + * @coretags{task-unrestricted} + */ +int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned int fd_index) +{ + struct xnselect_binding *binding; + int err; + spl_t s; + + binding = xnmalloc(sizeof(*binding)); + if (!binding) + return -ENOMEM; + + xnlock_get_irqsave(&nklock, s); + err = xnselect_bind(&event->select_block, + binding, selector, type, fd_index, + event->synch_base.status & (RTDM_SYNCH_DELETED | + RTDM_EVENT_PENDING)); + xnlock_put_irqrestore(&nklock, s); + + if (err) + xnfree(binding); + + return err; +} +EXPORT_SYMBOL_GPL(rtdm_event_select); + +/** @} */ + +/** + * @ingroup rtdm_sync + * @defgroup rtdm_sync_sem Semaphore Services + * @{ + */ + +/** + * @brief Initialise a semaphore + * + * @param[in,out] sem Semaphore handle + * @param[in] value Initial value of the semaphore + * + * @coretags{task-unrestricted} + */ +void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value) +{ + spl_t s; + + trace_cobalt_driver_sem_init(sem, value); + + /* Make atomic for re-initialisation support */ + xnlock_get_irqsave(&nklock, s); + + sem->value = value; + xnsynch_init(&sem->synch_base, XNSYNCH_PRIO, NULL); + xnselect_init(&sem->select_block); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_sem_init); + +/** + * @brief Destroy a semaphore + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_sem_destroy(rtdm_sem_t *sem) +{ + trace_cobalt_driver_sem_destroy(sem); + if (realtime_core_enabled()) { + __rtdm_synch_flush(&sem->synch_base, XNRMID); + xnselect_destroy(&sem->select_block); + } +} +EXPORT_SYMBOL_GPL(rtdm_sem_destroy); + +/** + * @brief Decrement a semaphore + * + * This is the light-weight version of rtdm_sem_timeddown(), implying an + * infinite timeout. + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a sem has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_sem_down(rtdm_sem_t *sem) +{ + return rtdm_sem_timeddown(sem, 0, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_sem_down); + +/** + * @brief Decrement a semaphore with timeout + * + * This function tries to decrement the given semphore's value if it is + * positive on entry. If not, the caller is blocked unless non-blocking + * operation was selected. + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * @param[in,out] timeout_seq Handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore + * value is currently not positive. + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a sem has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-timed, might-switch} + */ +int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq) +{ + struct xnthread *thread; + int err = 0, ret; + spl_t s; + + if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p())) + return -EPERM; + + trace_cobalt_driver_sem_wait(sem, xnthread_current()); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(sem->synch_base.status & RTDM_SYNCH_DELETED)) + err = -EIDRM; + else if (sem->value > 0) { + if(!--sem->value) + xnselect_signal(&sem->select_block, 0); + } else if (timeout < 0) /* non-blocking mode */ + err = -EWOULDBLOCK; + else { + thread = xnthread_current(); + + if (timeout_seq && timeout > 0) + /* timeout sequence */ + ret = xnsynch_sleep_on(&sem->synch_base, *timeout_seq, + XN_ABSOLUTE); + else + /* infinite or relative timeout */ + ret = xnsynch_sleep_on(&sem->synch_base, timeout, XN_RELATIVE); + + if (ret) { + if (ret & XNTIMEO) + err = -ETIMEDOUT; + else if (ret & XNRMID) + err = -EIDRM; + else /* XNBREAK */ + err = -EINTR; + } + } + + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_sem_timeddown); + +/** + * @brief Increment a semaphore + * + * This function increments the given semphore's value, waking up a potential + * waiter which was blocked upon rtdm_sem_down(). + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_sem_up(rtdm_sem_t *sem) +{ + spl_t s; + + trace_cobalt_driver_sem_up(sem); + + xnlock_get_irqsave(&nklock, s); + + if (xnsynch_wakeup_one_sleeper(&sem->synch_base)) + xnsched_run(); + else + if (sem->value++ == 0 + && xnselect_signal(&sem->select_block, 1)) + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_sem_up); + +/** + * @brief Bind a selector to a semaphore + * + * This functions binds the given selector to the semaphore so that the former + * is notified when the semaphore state changes. Typically the select binding + * handler will invoke this service. + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * @param[in,out] selector Selector as passed to the select binding handler + * @param[in] type Type of the bound event as passed to the select binding handler + * @param[in] fd_index File descriptor index as passed to the select binding + * handler + * + * @return 0 on success, otherwise: + * + * - -ENOMEM is returned if there is insufficient memory to establish the + * dynamic binding. + * + * - -EINVAL is returned if @a type or @a fd_index are invalid. + * + * @coretags{task-unrestricted} + */ +int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned int fd_index) +{ + struct xnselect_binding *binding; + int err; + spl_t s; + + binding = xnmalloc(sizeof(*binding)); + if (!binding) + return -ENOMEM; + + xnlock_get_irqsave(&nklock, s); + err = xnselect_bind(&sem->select_block, binding, selector, + type, fd_index, + (sem->value > 0) || + sem->synch_base.status & RTDM_SYNCH_DELETED); + xnlock_put_irqrestore(&nklock, s); + + if (err) + xnfree(binding); + + return err; +} +EXPORT_SYMBOL_GPL(rtdm_sem_select); + +/** @} */ + +/** + * @ingroup rtdm_sync + * @defgroup rtdm_sync_mutex Mutex services + * @{ + */ + +/** + * @brief Initialise a mutex + * + * This function initalises a basic mutex with priority inversion protection. + * "Basic", as it does not allow a mutex owner to recursively lock the same + * mutex again. + * + * @param[in,out] mutex Mutex handle + * + * @coretags{task-unrestricted} + */ +void rtdm_mutex_init(rtdm_mutex_t *mutex) +{ + spl_t s; + + /* Make atomic for re-initialisation support */ + xnlock_get_irqsave(&nklock, s); + xnsynch_init(&mutex->synch_base, XNSYNCH_PI, &mutex->fastlock); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(rtdm_mutex_init); + +/** + * @brief Destroy a mutex + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_mutex_destroy(rtdm_mutex_t *mutex) +{ + trace_cobalt_driver_mutex_destroy(mutex); + + if (realtime_core_enabled()) + __rtdm_synch_flush(&mutex->synch_base, XNRMID); +} +EXPORT_SYMBOL_GPL(rtdm_mutex_destroy); + +/** + * @brief Release a mutex + * + * This function releases the given mutex, waking up a potential waiter which + * was blocked upon rtdm_mutex_lock() or rtdm_mutex_timedlock(). + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * + * @coretags{primary-only, might-switch} + */ +void rtdm_mutex_unlock(rtdm_mutex_t *mutex) +{ + if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p())) + return; + + trace_cobalt_driver_mutex_release(mutex); + + if (unlikely(xnsynch_release(&mutex->synch_base, + xnsched_current_thread()))) + xnsched_run(); +} +EXPORT_SYMBOL_GPL(rtdm_mutex_unlock); + +/** + * @brief Request a mutex + * + * This is the light-weight version of rtdm_mutex_timedlock(), implying an + * infinite timeout. + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * + * @return 0 on success, otherwise: + * + * - -EIDRM is returned if @a mutex has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_mutex_lock(rtdm_mutex_t *mutex) +{ + return rtdm_mutex_timedlock(mutex, 0, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_mutex_lock); + +/** + * @brief Request a mutex with timeout + * + * This function tries to acquire the given mutex. If it is not available, the + * caller is blocked unless non-blocking operation was selected. + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * @param[in,out] timeout_seq Handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore + * value is currently not positive. + * + * - -EIDRM is returned if @a mutex has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq) +{ + struct xnthread *curr; + int ret; + spl_t s; + + if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p())) + return -EPERM; + + curr = xnthread_current(); + trace_cobalt_driver_mutex_wait(mutex, curr); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED)) { + ret = -EIDRM; + goto out; + } + + ret = xnsynch_try_acquire(&mutex->synch_base); + if (ret != -EBUSY) + goto out; + + if (timeout < 0) { + ret = -EWOULDBLOCK; + goto out; + } + + for (;;) { + if (timeout_seq && timeout > 0) /* timeout sequence */ + ret = xnsynch_acquire(&mutex->synch_base, *timeout_seq, + XN_ABSOLUTE); + else /* infinite or relative timeout */ + ret = xnsynch_acquire(&mutex->synch_base, timeout, + XN_RELATIVE); + if (ret == 0) + break; + if (ret & XNBREAK) + continue; + ret = ret & XNTIMEO ? -ETIMEDOUT : -EIDRM; + break; + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +EXPORT_SYMBOL_GPL(rtdm_mutex_timedlock); +/** @} */ + +/** @} Synchronisation services */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_irq Interrupt Management Services + * @{ + */ + +/** + * @brief Register an interrupt handler + * + * This function registers the provided handler with an IRQ line and enables + * the line. + * + * @param[in,out] irq_handle IRQ handle + * @param[in] irq_no Line number of the addressed IRQ + * @param[in] handler Interrupt handler + * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details + * @param[in] device_name Device name to show up in real-time IRQ lists + * @param[in] arg Pointer to be passed to the interrupt handler on invocation + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if an invalid parameter was passed. + * + * - -EBUSY is returned if the specified IRQ line is already in use. + * + * - -ENOSYS is returned if the real-time core is disabled. + * + * @coretags{secondary-only} + */ +int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg) +{ + return rtdm_irq_request_affine(irq_handle, irq_no, handler, flags, + device_name, arg, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_irq_request); + +/** + * @brief Register an interrupt handler + * + * This function registers the provided handler with an IRQ line and enables + * the line. + * + * @param[in,out] irq_handle IRQ handle + * @param[in] irq_no Line number of the addressed IRQ + * @param[in] handler Interrupt handler + * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details + * @param[in] device_name Device name to show up in real-time IRQ lists + * @param[in] arg Pointer to be passed to the interrupt handler on invocation + * @param[in] cpumask CPU affinity of the interrupt + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if an invalid parameter was passed. + * + * - -EBUSY is returned if the specified IRQ line is already in use. + * + * - -ENOSYS is returned if the real-time core is disabled. + * + * @coretags{secondary-only} + */ +int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg, + const cpumask_t *cpumask) +{ + int err; + + if (!realtime_core_enabled()) + return -ENOSYS; + + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + err = xnintr_init(irq_handle, device_name, irq_no, handler, NULL, flags); + if (err) + return err; + + err = xnintr_attach(irq_handle, arg, cpumask); + if (err) { + xnintr_destroy(irq_handle); + return err; + } + + xnintr_enable(irq_handle); + + return 0; +} + +EXPORT_SYMBOL_GPL(rtdm_irq_request_affine); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ +/** + * @brief Release an interrupt handler + * + * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 on success, otherwise negative error code + * + * @note The caller is responsible for shutting down the IRQ source at device + * level before invoking this service. In turn, rtdm_irq_free ensures that any + * pending event on the given IRQ line is fully processed on return from this + * service. + * + * @coretags{secondary-only} + */ +int rtdm_irq_free(rtdm_irq_t *irq_handle); + +/** + * @brief Enable interrupt line + * + * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 on success, otherwise negative error code + * + * @note This service is for exceptional use only. Drivers should + * always prefer interrupt masking at device level (via corresponding + * control registers etc.) over masking at line level. Keep in mind + * that the latter is incompatible with IRQ line sharing and can also + * be more costly as interrupt controller access requires broader + * synchronization. Also, such service is solely available from + * secondary mode. The caller is responsible for excluding such + * conflicts. + * + * @coretags{secondary-only} + */ +int rtdm_irq_enable(rtdm_irq_t *irq_handle); + +/** + * @brief Disable interrupt line + * + * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 on success, otherwise negative error code + * + * @note This service is for exceptional use only. Drivers should + * always prefer interrupt masking at device level (via corresponding + * control registers etc.) over masking at line level. Keep in mind + * that the latter is incompatible with IRQ line sharing and can also + * be more costly as interrupt controller access requires broader + * synchronization. Also, such service is solely available from + * secondary mode. The caller is responsible for excluding such + * conflicts. + * + * @coretags{secondary-only} + */ +int rtdm_irq_disable(rtdm_irq_t *irq_handle); + +/** + * @brief Set interrupt affinity + * + * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @param[in] cpumask The new CPU affinity of the interrupt + * + * @return 0 on success, otherwise negative error code + * + * @coretags{secondary-only} + */ +int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, const cpumask_t *cpumask); +#endif /* DOXYGEN_CPP */ + +/** @} Interrupt Management Services */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_nrtsignal Non-Real-Time Signalling Services + * + * These services provide a mechanism to request the execution of a specified + * handler in non-real-time context. The triggering can safely be performed in + * real-time context without suffering from unknown delays. The handler + * execution will be deferred until the next time the real-time subsystem + * releases the CPU to the non-real-time part. + * @{ + */ + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @brief Register a non-real-time signal handler + * + * @param[in,out] nrt_sig Signal handle + * @param[in] handler Non-real-time signal handler + * @param[in] arg Custom argument passed to @c handler() on each invocation + * + * @return 0 on success, otherwise: + * + * - -EAGAIN is returned if no free signal slot is available. + * + * @coretags{task-unrestricted} + */ +int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, rtdm_nrtsig_handler_t handler, + void *arg); + +/** + * @brief Release a non-realtime signal handler + * + * @param[in,out] nrt_sig Signal handle + * + * @coretags{task-unrestricted} + */ +void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig); +#endif /* DOXYGEN_CPP */ + +void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work) +{ + struct rtdm_nrtsig *nrt_sig; + + nrt_sig = container_of(inband_work, typeof(*nrt_sig), inband_work); + nrt_sig->handler(nrt_sig, nrt_sig->arg); +} +EXPORT_SYMBOL_GPL(__rtdm_nrtsig_execute); + +/** + * Trigger non-real-time signal + * + * @param[in,out] nrt_sig Signal handle + * + * @coretags{unrestricted} + */ +void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig) +{ + pipeline_post_inband_work(nrt_sig); +} +EXPORT_SYMBOL_GPL(rtdm_nrtsig_pend); + +static LIST_HEAD(nrt_work_list); +DEFINE_PRIVATE_XNLOCK(nrt_work_lock); + +static void lostage_schedule_work(struct pipeline_inband_work *inband_work) +{ + struct work_struct *lostage_work; + spl_t s; + + xnlock_get_irqsave(&nrt_work_lock, s); + + while (!list_empty(&nrt_work_list)) { + lostage_work = list_first_entry(&nrt_work_list, + struct work_struct, entry); + list_del_init(&lostage_work->entry); + + xnlock_put_irqrestore(&nrt_work_lock, s); + + schedule_work(lostage_work); + + xnlock_get_irqsave(&nrt_work_lock, s); + } + + xnlock_put_irqrestore(&nrt_work_lock, s); +} + +static struct lostage_trigger_work { + struct pipeline_inband_work inband_work; /* Must be first. */ +} nrt_work = { + .inband_work = PIPELINE_INBAND_WORK_INITIALIZER(nrt_work, + lostage_schedule_work), +}; + +/** + * Put a work task in Linux non real-time global workqueue from primary mode. + * + * @param lostage_work + */ +void rtdm_schedule_nrt_work(struct work_struct *lostage_work) +{ + spl_t s; + + if (is_secondary_domain()) { + schedule_work(lostage_work); + return; + } + + xnlock_get_irqsave(&nrt_work_lock, s); + + list_add_tail(&lostage_work->entry, &nrt_work_list); + pipeline_post_inband_work(&nrt_work); + + xnlock_put_irqrestore(&nrt_work_lock, s); +} +EXPORT_SYMBOL_GPL(rtdm_schedule_nrt_work); + +/** @} Non-Real-Time Signalling Services */ + + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_util Utility Services + * @{ + */ + +struct mmap_tramp_data { + struct rtdm_fd *fd; + struct file_operations *fops; + int (*mmap_handler)(struct rtdm_fd *fd, + struct vm_area_struct *vma); +}; + +struct mmap_helper_data { + void *src_vaddr; + phys_addr_t src_paddr; + struct vm_operations_struct *vm_ops; + void *vm_private_data; + struct mmap_tramp_data tramp_data; +}; + +static int mmap_kmem_helper(struct vm_area_struct *vma, void *va) +{ + unsigned long addr, len, pfn, to; + int ret = 0; + + to = (unsigned long)va; + addr = vma->vm_start; + len = vma->vm_end - vma->vm_start; + + if (to != PAGE_ALIGN(to) || (len & ~PAGE_MASK) != 0) + return -EINVAL; + +#ifndef CONFIG_MMU + pfn = __pa(to) >> PAGE_SHIFT; + ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED); +#else + if (to < VMALLOC_START || to >= VMALLOC_END) { + /* logical address. */ + pfn = __pa(to) >> PAGE_SHIFT; + ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED); + if (ret) + return ret; + } else { + /* vmalloc memory. */ + while (len > 0) { + struct page *page = vmalloc_to_page((void *)to); + if (vm_insert_page(vma, addr, page)) + return -EAGAIN; + addr += PAGE_SIZE; + to += PAGE_SIZE; + len -= PAGE_SIZE; + } + } + + if (cobalt_machine.prefault) + cobalt_machine.prefault(vma); +#endif + + return ret; +} + +static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa) +{ + pgprot_t prot = PAGE_SHARED; + unsigned long len; + + len = vma->vm_end - vma->vm_start; +#ifndef CONFIG_MMU + vma->vm_pgoff = pa >> PAGE_SHIFT; +#endif /* CONFIG_MMU */ + +#ifdef __HAVE_PHYS_MEM_ACCESS_PROT + if (vma->vm_file) + prot = phys_mem_access_prot(vma->vm_file, pa >> PAGE_SHIFT, + len, prot); +#endif + vma->vm_page_prot = pgprot_noncached(prot); + + return remap_pfn_range(vma, vma->vm_start, pa >> PAGE_SHIFT, + len, vma->vm_page_prot); +} + +static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma) +{ + struct mmap_tramp_data *tramp_data = vma->vm_private_data; + struct mmap_helper_data *helper_data; + int ret; + + helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data); + vma->vm_ops = helper_data->vm_ops; + vma->vm_private_data = helper_data->vm_private_data; + + if (helper_data->src_paddr) + ret = mmap_iomem_helper(vma, helper_data->src_paddr); + else + ret = mmap_kmem_helper(vma, helper_data->src_vaddr); + + return ret; +} + +static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + int ret; + + vma->vm_private_data = tramp_data; + + ret = tramp_data->mmap_handler(tramp_data->fd, vma); + if (ret) + return ret; + + return 0; +} + +#ifndef CONFIG_MMU + +static unsigned long +internal_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + struct mmap_helper_data *helper_data; + unsigned long pa; + + helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data); + pa = helper_data->src_paddr; + if (pa) + return (unsigned long)__va(pa); + + return (unsigned long)helper_data->src_vaddr; +} + +static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data, + size_t len, off_t offset, int prot, int flags, + void **pptr) +{ + const struct file_operations *old_fops; + unsigned long u_addr; + struct file *filp; + + filp = filp_open("/dev/mem", O_RDWR, 0); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + old_fops = filp->f_op; + filp->f_op = tramp_data->fops; + filp->private_data = tramp_data; + u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset); + filp_close(filp, current->files); + filp->f_op = old_fops; + + if (IS_ERR_VALUE(u_addr)) + return (int)u_addr; + + *pptr = (void *)u_addr; + + return 0; +} + +#else /* CONFIG_MMU */ + +static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data, + size_t len, off_t offset, int prot, int flags, + void **pptr) +{ + unsigned long u_addr; + struct file *filp; + + filp = anon_inode_getfile("[rtdm]", tramp_data->fops, tramp_data, O_RDWR); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset); + filp_close(filp, current->files); + + if (IS_ERR_VALUE(u_addr)) + return (int)u_addr; + + *pptr = (void *)u_addr; + + return 0; +} + +#define internal_get_unmapped_area NULL + +#endif /* CONFIG_MMU */ + +static struct file_operations internal_mmap_fops = { + .mmap = mmap_trampoline, + .get_unmapped_area = internal_get_unmapped_area +}; + +static unsigned long +driver_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + struct rtdm_fd *fd = tramp_data->fd; + + if (fd->ops->get_unmapped_area) + return fd->ops->get_unmapped_area(fd, len, pgoff, flags); + +#ifdef CONFIG_MMU + /* Run default handler. */ + return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); +#else + return -ENODEV; +#endif +} + +static struct file_operations driver_mmap_fops = { + .mmap = mmap_trampoline, + .get_unmapped_area = driver_get_unmapped_area +}; + +int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset, + int prot, int flags, void **pptr) +{ + struct mmap_tramp_data tramp_data = { + .fd = fd, + .fops = &driver_mmap_fops, + .mmap_handler = fd->ops->mmap, + }; + +#ifndef CONFIG_MMU + /* + * XXX: A .get_unmapped_area handler must be provided in the + * nommu case. We use this to force the memory management code + * not to share VM regions for distinct areas to map to, as it + * would otherwise do since all requests currently apply to + * the same file (i.e. from /dev/mem, see do_mmap_pgoff() in + * the nommu case). + */ + if (fd->ops->get_unmapped_area) + offset = fd->ops->get_unmapped_area(fd, len, 0, flags); +#endif + + return do_rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr); +} + +/** + * Map a kernel memory range into the address space of the user. + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] src_addr Kernel virtual address to be mapped + * @param[in] len Length of the memory range + * @param[in] prot Protection flags for the user's memory range, typically + * either PROT_READ or PROT_READ|PROT_WRITE + * @param[in,out] pptr Address of a pointer containing the desired user + * address or NULL on entry and the finally assigned address on return + * @param[in] vm_ops vm_operations to be executed on the vm_area of the + * user memory range or NULL + * @param[in] vm_private_data Private data to be stored in the vm_area, + * primarily useful for vm_operation handlers + * + * @return 0 on success, otherwise (most common values): + * + * - -EINVAL is returned if an invalid start address, size, or destination + * address was passed. + * + * - -ENOMEM is returned if there is insufficient free memory or the limit of + * memory mapping for the user process was reached. + * + * - -EAGAIN is returned if too much memory has been already locked by the + * user process. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @note This service only works on memory regions allocated via kmalloc() or + * vmalloc(). To map physical I/O memory to user-space use + * rtdm_iomap_to_user() instead. + * + * @note This service is provided only for use in .ioctl operation handlers. + * Otherwise RTDM drivers implementing a .mmap operation should use + * rtdm_mmap_kmem(), rtdm_mmap_vmem(), or rtdm_mmap_iomem(). + * + * @note RTDM supports two models for unmapping the memory area: + * - manual unmapping via rtdm_munmap(), which may be issued from a + * driver in response to an IOCTL call, or by a call to the regular + * munmap() call from the application. + * - automatic unmapping, triggered by the termination of the process + * which owns the mapping. + * To track the number of references pending on the resource mapped, + * the driver can pass the address of a close handler for the vm_area + * considered, in the @a vm_ops descriptor. See the relevant Linux + * kernel programming documentation (e.g. Linux Device Drivers book) + * on virtual memory management for details. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_to_user(struct rtdm_fd *fd, + void *src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data) +{ + struct mmap_helper_data helper_data = { + .tramp_data = { + .fd = fd, + .fops = &internal_mmap_fops, + .mmap_handler = mmap_buffer_helper, + }, + .src_vaddr = src_addr, + .src_paddr = 0, + .vm_ops = vm_ops, + .vm_private_data = vm_private_data + }; + + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_to_user); + +/** + * Map an I/O memory range into the address space of the user. + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] src_addr physical I/O address to be mapped + * @param[in] len Length of the memory range + * @param[in] prot Protection flags for the user's memory range, typically + * either PROT_READ or PROT_READ|PROT_WRITE + * @param[in,out] pptr Address of a pointer containing the desired user + * address or NULL on entry and the finally assigned address on return + * @param[in] vm_ops vm_operations to be executed on the vm_area of the + * user memory range or NULL + * @param[in] vm_private_data Private data to be stored in the vm_area, + * primarily useful for vm_operation handlers + * + * @return 0 on success, otherwise (most common values): + * + * - -EINVAL is returned if an invalid start address, size, or destination + * address was passed. + * + * - -ENOMEM is returned if there is insufficient free memory or the limit of + * memory mapping for the user process was reached. + * + * - -EAGAIN is returned if too much memory has been already locked by the + * user process. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @note RTDM supports two models for unmapping the memory area: + * - manual unmapping via rtdm_munmap(), which may be issued from a + * driver in response to an IOCTL call, or by a call to the regular + * munmap() call from the application. + * - automatic unmapping, triggered by the termination of the process + * which owns the mapping. + * To track the number of references pending on the resource mapped, + * the driver can pass the address of a close handler for the vm_area + * considered, in the @a vm_ops descriptor. See the relevant Linux + * kernel programming documentation (e.g. Linux Device Drivers book) + * on virtual memory management for details. + * + * @coretags{secondary-only} + */ +int rtdm_iomap_to_user(struct rtdm_fd *fd, + phys_addr_t src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data) +{ + struct mmap_helper_data helper_data = { + .tramp_data = { + .fd = fd, + .fops = &internal_mmap_fops, + .mmap_handler = mmap_buffer_helper, + }, + .src_vaddr = NULL, + .src_paddr = src_addr, + .vm_ops = vm_ops, + .vm_private_data = vm_private_data + }; + + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr); +} +EXPORT_SYMBOL_GPL(rtdm_iomap_to_user); + +/** + * Map a kernel logical memory range to a virtual user area. + * + * This routine is commonly used from a .mmap operation handler of a RTDM + * driver, for mapping a virtual memory area with a direct physical + * mapping over the user address space referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] va The kernel logical address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note This service works on memory regions allocated via + * kmalloc(). To map a chunk of virtual space with no direct physical + * mapping, or a physical I/O memory to a VMA, call rtdm_mmap_vmem() + * or rtdm_mmap_iomem() respectively instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va) +{ + return mmap_kmem_helper(vma, va); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_kmem); + +/** + * Map a kernel virtual memory range to a virtual user area. + * + * This routine is commonly used from a .mmap operation handler of a RTDM + * driver, for mapping a kernel virtual memory area over the user + * address space referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] va The virtual address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note This service works on memory regions allocated via + * vmalloc(). To map a chunk of logical space obtained from kmalloc(), + * or a physical I/O memory to a VMA, call rtdm_mmap_kmem() or + * rtdm_mmap_iomem() respectively instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va) +{ + /* + * Our helper handles both of directly mapped to physical and + * purely virtual memory ranges. + */ + return mmap_kmem_helper(vma, va); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_vmem); + +/** + * Map an I/O memory range to a virtual user area. + * + * This routine is commonly used from a .mmap operation handler of a RTDM + * driver, for mapping an I/O memory area over the user address space + * referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] pa The physical I/O address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note To map a chunk of logical space obtained from kmalloc(), or a + * purely virtual area with no direct physical mapping to a VMA, call + * rtdm_mmap_kmem() or rtdm_mmap_vmem() respectively instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa) +{ + return mmap_iomem_helper(vma, pa); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_iomem); + +/** + * Unmap a user memory range. + * + * @param[in] ptr User address or the memory range + * @param[in] len Length of the memory range + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if an invalid address or size was passed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{secondary-only} + */ +int rtdm_munmap(void *ptr, size_t len) +{ + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + return vm_munmap((unsigned long)ptr, len); +} +EXPORT_SYMBOL_GPL(rtdm_munmap); + +int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iovp, + const struct user_msghdr *msg, + struct iovec *iov_fast) +{ + size_t len = sizeof(struct iovec) * msg->msg_iovlen; + struct iovec *iov = iov_fast; + + /* + * If the I/O vector doesn't fit in the fast memory, allocate + * a chunk from the system heap which is large enough to hold + * it. + */ + if (msg->msg_iovlen > RTDM_IOV_FASTMAX) { + iov = xnmalloc(len); + if (iov == NULL) + return -ENOMEM; + } + + *iovp = iov; + + if (!rtdm_fd_is_user(fd)) { + memcpy(iov, msg->msg_iov, len); + return 0; + } + + return rtdm_fd_get_iovec(fd, iov, msg, false); +} +EXPORT_SYMBOL_GPL(rtdm_get_iovec); + +int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov, + const struct user_msghdr *msg, + struct iovec *iov_fast) +{ + size_t len = sizeof(iov[0]) * msg->msg_iovlen; + int ret; + + if (!rtdm_fd_is_user(fd)) { + memcpy(msg->msg_iov, iov, len); + ret = 0; + } else + ret = rtdm_fd_put_iovec(fd, iov, msg); + + if (iov != iov_fast) + xnfree(iov); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_put_iovec); + +ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen) +{ + ssize_t len; + int nvec; + + /* Return the flattened vector length. */ + for (len = 0, nvec = 0; nvec < iovlen; nvec++) { + ssize_t l = iov[nvec].iov_len; + if (l < 0 || len + l < len) /* SuS wants this. */ + return -EINVAL; + len += l; + } + + return len; +} +EXPORT_SYMBOL_GPL(rtdm_get_iov_flatlen); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * Real-time safe rate-limited message printing on kernel console + * + * @param[in] format Format string (conforming standard @c printf()) + * @param ... Arguments referred by @a format + * + * @return On success, this service returns the number of characters printed. + * Otherwise, a negative error code is returned. + * + * @coretags{unrestricted} + */ +void rtdm_printk_ratelimited(const char *format, ...); + +/** + * Real-time safe message printing on kernel console + * + * @param[in] format Format string (conforming standard @c printf()) + * @param ... Arguments referred by @a format + * + * @return On success, this service returns the number of characters printed. + * Otherwise, a negative error code is returned. + * + * @coretags{unrestricted} + */ +void rtdm_printk(const char *format, ...); + +/** + * Allocate memory block + * + * @param[in] size Requested size of the memory block + * + * @return The pointer to the allocated block is returned on success, NULL + * otherwise. + * + * @coretags{unrestricted} + */ +void *rtdm_malloc(size_t size); + +/** + * Release real-time memory block + * + * @param[in] ptr Pointer to memory block as returned by rtdm_malloc() + * + * @coretags{unrestricted} + */ +void rtdm_free(void *ptr); + +/** + * Check if read access to user-space memory block is safe + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] ptr Address of the user-provided memory block + * @param[in] size Size of the memory block + * + * @return Non-zero is return when it is safe to read from the specified + * memory block, 0 otherwise. + * + * @coretags{task-unrestricted} + */ +int rtdm_read_user_ok(struct rtdm_fd *fd, const void __user *ptr, + size_t size); + +/** + * Check if read/write access to user-space memory block is safe + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] ptr Address of the user-provided memory block + * @param[in] size Size of the memory block + * + * @return Non-zero is return when it is safe to read from or write to the + * specified memory block, 0 otherwise. + * + * @coretags{task-unrestricted} + */ +int rtdm_rw_user_ok(struct rtdm_fd *fd, const void __user *ptr, + size_t size); + +/** + * Copy user-space memory block to specified buffer + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Destination buffer address + * @param[in] src Address of the user-space memory block + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note Before invoking this service, verify via rtdm_read_user_ok() that the + * provided user-space address can securely be accessed. + * + * @coretags{task-unrestricted} + */ +int rtdm_copy_from_user(struct rtdm_fd *fd, void *dst, + const void __user *src, size_t size); + +/** + * Check if read access to user-space memory block and copy it to specified + * buffer + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Destination buffer address + * @param[in] src Address of the user-space memory block + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note This service is a combination of rtdm_read_user_ok and + * rtdm_copy_from_user. + * + * @coretags{task-unrestricted} + */ +int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst, + const void __user *src, size_t size); + +/** + * Copy specified buffer to user-space memory block + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Address of the user-space memory block + * @param[in] src Source buffer address + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note Before invoking this service, verify via rtdm_rw_user_ok() that the + * provided user-space address can securely be accessed. + * + * @coretags{task-unrestricted} + */ +int rtdm_copy_to_user(struct rtdm_fd *fd, void __user *dst, + const void *src, size_t size); + +/** + * Check if read/write access to user-space memory block is safe and copy + * specified buffer to it + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Address of the user-space memory block + * @param[in] src Source buffer address + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note This service is a combination of rtdm_rw_user_ok and + * rtdm_copy_to_user. + * + * @coretags{task-unrestricted} + */ +int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst, + const void *src, size_t size); + +/** + * Copy user-space string to specified buffer + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Destination buffer address + * @param[in] src Address of the user-space string + * @param[in] count Maximum number of bytes to copy, including the trailing + * '0' + * + * @return Length of the string on success (not including the trailing '0'), + * otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note This services already includes a check of the source address, + * calling rtdm_read_user_ok() for @a src explicitly is not required. + * + * @coretags{task-unrestricted} + */ +int rtdm_strncpy_from_user(struct rtdm_fd *fd, char *dst, + const char __user *src, size_t count); + +/** + * Test if running in a real-time task + * + * @return Non-zero is returned if the caller resides in real-time context, 0 + * otherwise. + * + * @coretags{unrestricted} + */ +int rtdm_in_rt_context(void); + +/** + * Test if the caller is capable of running in real-time context + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * + * @return Non-zero is returned if the caller is able to execute in real-time + * context (independent of its current execution mode), 0 otherwise. + * + * @note This function can be used by drivers that provide different + * implementations for the same service depending on the execution mode of + * the caller. If a caller requests such a service in non-real-time context + * but is capable of running in real-time as well, it might be appropriate + * for the driver to reject the request via -ENOSYS so that RTDM can switch + * the caller and restart the request in real-time context. + * + * @coretags{unrestricted} + */ +int rtdm_rt_capable(struct rtdm_fd *fd); + +/** + * Test if the real-time core is available + * + * @return True if the real-time is available, false if it is disabled or in + * error state. + * + * @note Drivers should query the core state during initialization if they + * perform hardware setup operations or interact with RTDM services such as + * locks prior to calling an RTDM service that has a built-in state check of + * the real-time core (e.g. rtdm_dev_register() or rtdm_task_init()). + * + * @coretags{unrestricted} + */ +bool rtdm_available(void); + +#endif /* DOXYGEN_CPP */ + +/** @} Utility Services */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c new file mode 100644 index 0000000..c7ceb76 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c @@ -0,0 +1,1192 @@ +/* + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de> + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net> + * Copyright (C) 2013,2014 Gilles Chanteperdrix <gch@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/list.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/poll.h> +#include <linux/kthread.h> +#include <linux/fdtable.h> +#include <cobalt/kernel/registry.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/ppd.h> +#include <cobalt/kernel/time.h> +#include <pipeline/inband_work.h> +#include <trace/events/cobalt-rtdm.h> +#include <rtdm/compat.h> +#include <rtdm/fd.h> +#include "internal.h" +#include "posix/process.h" +#include "posix/syscall.h" +#include "posix/clock.h" + +#define RTDM_SETFL_MASK (O_NONBLOCK) + +DEFINE_PRIVATE_XNLOCK(fdtree_lock); +static LIST_HEAD(rtdm_fd_cleanup_queue); +static struct semaphore rtdm_fd_cleanup_sem; + +struct rtdm_fd_index { + struct xnid id; + struct rtdm_fd *fd; +}; + +static int enosys(void) +{ + return -ENOSYS; +} + +static int enotty(void) +{ + return -ENOTTY; +} + +static int ebadf(void) +{ + return -EBADF; +} + +static int enodev(void) +{ + return -ENODEV; +} + +static inline struct rtdm_fd_index * +fetch_fd_index(struct cobalt_ppd *p, int ufd) +{ + struct xnid *id = xnid_fetch(&p->fds, ufd); + if (id == NULL) + return NULL; + + return container_of(id, struct rtdm_fd_index, id); +} + +static struct rtdm_fd *fetch_fd(struct cobalt_ppd *p, int ufd) +{ + struct rtdm_fd_index *idx = fetch_fd_index(p, ufd); + if (idx == NULL) + return NULL; + + return idx->fd; +} + +#define assign_invalid_handler(__handler, __invalid) \ + do \ + (__handler) = (typeof(__handler))(void (*)(void))__invalid; \ + while (0) + +/* Calling this handler should beget ENOSYS if not implemented. */ +#define assign_switch_handler(__handler) \ + do \ + if ((__handler) == NULL) \ + assign_invalid_handler(__handler, enosys); \ + while (0) + +#define assign_default_handler(__handler, __invalid) \ + do \ + if ((__handler) == NULL) \ + assign_invalid_handler(__handler, __invalid); \ + while (0) + +#define __rt(__handler) __handler ## _rt +#define __nrt(__handler) __handler ## _nrt + +/* + * Install a placeholder returning EADV if none of the dual handlers + * are implemented, ENOSYS otherwise for NULL handlers to trigger the + * adaptive switch. + */ +#define assign_default_dual_handlers(__handler, __invalid_handler) \ + do \ + if (__rt(__handler) || __nrt(__handler)) { \ + assign_switch_handler(__rt(__handler)); \ + assign_switch_handler(__nrt(__handler)); \ + } else { \ + assign_invalid_handler(__rt(__handler), \ + __invalid_handler); \ + assign_invalid_handler(__nrt(__handler), \ + __invalid_handler); \ + } \ + while (0) + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +static inline void set_compat_bit(struct rtdm_fd *fd) +{ + struct pt_regs *regs; + + if (cobalt_ppd_get(0) == &cobalt_kernel_ppd) + fd->compat = 0; + else { + regs = task_pt_regs(current); + XENO_BUG_ON(COBALT, !__xn_syscall_p(regs)); + fd->compat = __COBALT_CALL_COMPAT(__xn_reg_sys(regs)); + } +} + +#else /* !CONFIG_XENO_ARCH_SYS3264 */ + +static inline void set_compat_bit(struct rtdm_fd *fd) +{ +} + +#endif /* !CONFIG_XENO_ARCH_SYS3264 */ + +int rtdm_fd_enter(struct rtdm_fd *fd, int ufd, unsigned int magic, + struct rtdm_fd_ops *ops) +{ + struct cobalt_ppd *ppd; + + secondary_mode_only(); + + if (magic == 0) + return -EINVAL; + + assign_default_dual_handlers(ops->ioctl, enotty); + assign_default_dual_handlers(ops->read, ebadf); + assign_default_dual_handlers(ops->write, ebadf); + assign_default_dual_handlers(ops->recvmsg, ebadf); + assign_default_dual_handlers(ops->sendmsg, ebadf); + assign_default_handler(ops->select, ebadf); + assign_default_handler(ops->mmap, enodev); + + ppd = cobalt_ppd_get(0); + fd->magic = magic; + fd->ops = ops; + fd->owner = ppd; + fd->ufd = ufd; + fd->refs = 1; + fd->stale = false; + set_compat_bit(fd); + INIT_LIST_HEAD(&fd->next); + + return 0; +} + +int rtdm_fd_register(struct rtdm_fd *fd, int ufd) +{ + struct rtdm_fd_index *idx; + struct cobalt_ppd *ppd; + spl_t s; + int ret = 0; + + ppd = cobalt_ppd_get(0); + idx = kmalloc(sizeof(*idx), GFP_KERNEL); + if (idx == NULL) + return -ENOMEM; + + idx->fd = fd; + + xnlock_get_irqsave(&fdtree_lock, s); + ret = xnid_enter(&ppd->fds, &idx->id, ufd); + xnlock_put_irqrestore(&fdtree_lock, s); + if (ret < 0) { + kfree(idx); + ret = -EBUSY; + } + + return ret; +} + +int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd, + struct rtdm_device *device) +{ + spl_t s; + int ret; + + ret = rtdm_fd_register(fd, ufd); + if (ret < 0) + return ret; + + trace_cobalt_fd_created(fd, ufd); + xnlock_get_irqsave(&fdtree_lock, s); + list_add(&fd->next, &device->openfd_list); + xnlock_put_irqrestore(&fdtree_lock, s); + + return 0; +} + +/** + * @brief Retrieve and lock a RTDM file descriptor + * + * @param[in] ufd User-side file descriptor + * @param[in] magic Magic word for lookup validation + * + * @return Pointer to the RTDM file descriptor matching @a + * ufd. Otherwise: + * + * - ERR_PTR(-EADV) if the use-space handle is either invalid, or not + * managed by RTDM. + * + * - ERR_PTR(-EBADF) if the underlying device is being torn down at + * the time of the call. + * + * @note The file descriptor returned must be later released by a call + * to rtdm_fd_put(). + * + * @coretags{unrestricted} + */ +struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic) +{ + struct cobalt_ppd *p = cobalt_ppd_get(0); + struct rtdm_fd *fd; + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + fd = fetch_fd(p, ufd); + if (fd == NULL || (magic != 0 && fd->magic != magic)) { + fd = ERR_PTR(-EADV); + goto out; + } + + if (fd->stale) { + fd = ERR_PTR(-EBADF); + goto out; + } + + ++fd->refs; +out: + xnlock_put_irqrestore(&fdtree_lock, s); + + return fd; +} +EXPORT_SYMBOL_GPL(rtdm_fd_get); + +static int fd_cleanup_thread(void *data) +{ + struct rtdm_fd *fd; + int err; + spl_t s; + + for (;;) { + set_cpus_allowed_ptr(current, cpu_online_mask); + + do { + err = down_interruptible(&rtdm_fd_cleanup_sem); + if (kthread_should_stop()) + return 0; + } while (err); + + xnlock_get_irqsave(&fdtree_lock, s); + fd = list_first_entry(&rtdm_fd_cleanup_queue, + struct rtdm_fd, cleanup); + list_del(&fd->cleanup); + xnlock_put_irqrestore(&fdtree_lock, s); + + fd->ops->close(fd); + } + + return 0; +} + +static void lostage_trigger_close(struct pipeline_inband_work *inband_work) +{ + up(&rtdm_fd_cleanup_sem); +} + +static struct lostage_trigger_close { + struct pipeline_inband_work inband_work; /* Must be first. */ +} fd_closework = { + .inband_work = PIPELINE_INBAND_WORK_INITIALIZER(fd_closework, + lostage_trigger_close), +}; + +static void __put_fd(struct rtdm_fd *fd, spl_t s) +{ + bool destroy, trigger; + + XENO_WARN_ON(COBALT, fd->refs <= 0); + destroy = --fd->refs == 0; + if (destroy && !list_empty(&fd->next)) + list_del_init(&fd->next); + + xnlock_put_irqrestore(&fdtree_lock, s); + + if (!destroy) + return; + + if (is_secondary_domain()) + fd->ops->close(fd); + else { + xnlock_get_irqsave(&fdtree_lock, s); + trigger = list_empty(&rtdm_fd_cleanup_queue); + list_add_tail(&fd->cleanup, &rtdm_fd_cleanup_queue); + xnlock_put_irqrestore(&fdtree_lock, s); + + if (trigger) + pipeline_post_inband_work(&fd_closework); + } +} + +void rtdm_device_flush_fds(struct rtdm_device *dev) +{ + struct rtdm_driver *drv = dev->driver; + struct rtdm_fd *fd; + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + + while (!list_empty(&dev->openfd_list)) { + fd = list_get_entry_init(&dev->openfd_list, struct rtdm_fd, next); + fd->stale = true; + if (drv->ops.close) { + rtdm_fd_get_light(fd); + xnlock_put_irqrestore(&fdtree_lock, s); + drv->ops.close(fd); + rtdm_fd_put(fd); + xnlock_get_irqsave(&fdtree_lock, s); + } + } + + xnlock_put_irqrestore(&fdtree_lock, s); +} + +/** + * @brief Release a RTDM file descriptor obtained via rtdm_fd_get() + * + * @param[in] fd RTDM file descriptor to release + * + * @note Every call to rtdm_fd_get() must be matched by a call to + * rtdm_fd_put(). + * + * @coretags{unrestricted} + */ +void rtdm_fd_put(struct rtdm_fd *fd) +{ + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + __put_fd(fd, s); +} +EXPORT_SYMBOL_GPL(rtdm_fd_put); + +/** + * @brief Hold a reference on a RTDM file descriptor + * + * @param[in] fd Target file descriptor + * + * @note rtdm_fd_lock() increments the reference counter of @a fd. You + * only need to call this function in special scenarios, e.g. when + * keeping additional references to the file descriptor that have + * different lifetimes. Only use rtdm_fd_lock() on descriptors that + * are currently locked via an earlier rtdm_fd_get()/rtdm_fd_lock() or + * while running a device operation handler. + * + * @coretags{unrestricted} + */ +int rtdm_fd_lock(struct rtdm_fd *fd) +{ + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + if (fd->refs == 0) { + xnlock_put_irqrestore(&fdtree_lock, s); + return -EIDRM; + } + ++fd->refs; + xnlock_put_irqrestore(&fdtree_lock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_fd_lock); + +/** + * @brief Drop a reference on a RTDM file descriptor + * + * @param[in] fd Target file descriptor + * + * @note Every call to rtdm_fd_lock() must be matched by a call to + * rtdm_fd_unlock(). + * + * @coretags{unrestricted} + */ +void rtdm_fd_unlock(struct rtdm_fd *fd) +{ + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + __put_fd(fd, s); +} +EXPORT_SYMBOL_GPL(rtdm_fd_unlock); + +int rtdm_fd_fcntl(int ufd, int cmd, ...) +{ + struct rtdm_fd *fd; + va_list ap; + long arg; + int ret; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) + return PTR_ERR(fd); + + va_start(ap, cmd); + arg = va_arg(ap, long); + va_end(ap); + + switch (cmd) { + case F_GETFL: + ret = fd->oflags; + break; + case F_SETFL: + fd->oflags = (fd->oflags & ~RTDM_SETFL_MASK) | + (arg & RTDM_SETFL_MASK); + ret = 0; + break; + default: + ret = -EINVAL; + } + + rtdm_fd_put(fd); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_fcntl); + +static struct rtdm_fd *get_fd_fixup_mode(int ufd) +{ + struct xnthread *thread; + struct rtdm_fd *fd; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) + return fd; + + /* + * Mode is selected according to the following convention: + * + * - Cobalt threads must try running the syscall from primary + * mode as a first attempt, regardless of their scheduling + * class. The driver handler may ask for demoting the caller + * to secondary mode by returning -ENOSYS. + * + * - Regular threads (i.e. not bound to Cobalt) may only run + * the syscall from secondary mode. + */ + thread = xnthread_current(); + if (unlikely(is_secondary_domain())) { + if (thread == NULL || + xnthread_test_localinfo(thread, XNDESCENT)) + return fd; + } else if (likely(thread)) + return fd; + + /* + * We need to switch to the converse mode. Since all callers + * bear the "adaptive" tag, we just pass -ENOSYS back to the + * syscall dispatcher to get switched to the next mode. + */ + rtdm_fd_put(fd); + + return ERR_PTR(-ENOSYS); +} + +int rtdm_fd_ioctl(int ufd, unsigned int request, ...) +{ + struct rtdm_fd *fd; + void __user *arg; + va_list args; + int err, ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + err = PTR_ERR(fd); + goto out; + } + + va_start(args, request); + arg = va_arg(args, void __user *); + va_end(args); + + set_compat_bit(fd); + + trace_cobalt_fd_ioctl(current, fd, ufd, request); + + if (is_secondary_domain()) + err = fd->ops->ioctl_nrt(fd, request, arg); + else + err = fd->ops->ioctl_rt(fd, request, arg); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + if (err < 0) { + ret = __rtdm_dev_ioctl_core(fd, request, arg); + if (ret != -EADV) + err = ret; + } + + rtdm_fd_put(fd); + out: + if (err < 0) + trace_cobalt_fd_ioctl_status(current, fd, ufd, err); + + return err; +} +EXPORT_SYMBOL_GPL(rtdm_fd_ioctl); + +ssize_t +rtdm_fd_read(int ufd, void __user *buf, size_t size) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_read(current, fd, ufd, size); + + if (is_secondary_domain()) + ret = fd->ops->read_nrt(fd, buf, size); + else + ret = fd->ops->read_rt(fd, buf, size); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); + + out: + if (ret < 0) + trace_cobalt_fd_read_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_read); + +ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_write(current, fd, ufd, size); + + if (is_secondary_domain()) + ret = fd->ops->write_nrt(fd, buf, size); + else + ret = fd->ops->write_rt(fd, buf, size); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); + + out: + if (ret < 0) + trace_cobalt_fd_write_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_write); + +ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_recvmsg(current, fd, ufd, flags); + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + if (is_secondary_domain()) + ret = fd->ops->recvmsg_nrt(fd, msg, flags); + else + ret = fd->ops->recvmsg_rt(fd, msg, flags); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); +out: + if (ret < 0) + trace_cobalt_fd_recvmsg_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_recvmsg); + +struct cobalt_recvmmsg_timer { + struct xntimer timer; + struct xnthread *waiter; +}; + +static void recvmmsg_timeout_handler(struct xntimer *timer) +{ + struct cobalt_recvmmsg_timer *rq; + + rq = container_of(timer, struct cobalt_recvmmsg_timer, timer); + xnthread_set_info(rq->waiter, XNTIMEO); + xnthread_resume(rq->waiter, XNDELAY); +} + +int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg), + int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts)) +{ + struct cobalt_recvmmsg_timer rq; + xntmode_t tmode = XN_RELATIVE; + struct timespec64 ts = { 0 }; + int ret = 0, datagrams = 0; + xnticks_t timeout = 0; + struct mmsghdr mmsg; + struct rtdm_fd *fd; + void __user *u_p; + ssize_t len; + spl_t s; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_recvmmsg(current, fd, ufd, flags); + + if (u_timeout) { + ret = get_timespec(&ts, u_timeout); + if (ret) + goto fail; + + if (!timespec64_valid(&ts)) { + ret = -EINVAL; + goto fail; + } + + tmode = XN_ABSOLUTE; + timeout = ts2ns(&ts); + if (timeout == 0) + flags |= MSG_DONTWAIT; + else { + timeout += xnclock_read_monotonic(&nkclock); + rq.waiter = xnthread_current(); + xntimer_init(&rq.timer, &nkclock, + recvmmsg_timeout_handler, + NULL, XNTIMER_IGRAVITY); + xnlock_get_irqsave(&nklock, s); + ret = xntimer_start(&rq.timer, timeout, + XN_INFINITE, tmode); + xnlock_put_irqrestore(&nklock, s); + } + } + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + for (u_p = u_msgvec; vlen > 0; vlen--) { + ret = get_mmsg(&mmsg, u_p); + if (ret) + break; + len = fd->ops->recvmsg_rt(fd, &mmsg.msg_hdr, flags); + if (len < 0) { + ret = len; + break; + } + mmsg.msg_len = (unsigned int)len; + ret = put_mmsg(&u_p, &mmsg); + if (ret) + break; + datagrams++; + /* OOB data requires immediate handling. */ + if (mmsg.msg_hdr.msg_flags & MSG_OOB) + break; + if (flags & MSG_WAITFORONE) + flags |= MSG_DONTWAIT; + } + + if (timeout) { + xnlock_get_irqsave(&nklock, s); + xntimer_destroy(&rq.timer); + xnlock_put_irqrestore(&nklock, s); + } + +fail: + rtdm_fd_put(fd); + + if (datagrams > 0) + ret = datagrams; + +out: + trace_cobalt_fd_recvmmsg_status(current, fd, ufd, ret); + + return ret; +} + +static inline int __rtdm_fetch_timeout64(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts); +} + +int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, + void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, + const struct mmsghdr *mmsg)) +{ + return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout, + get_mmsg, put_mmsg, __rtdm_fetch_timeout64); +} + + +ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, int flags) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_sendmsg(current, fd, ufd, flags); + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + if (is_secondary_domain()) + ret = fd->ops->sendmsg_nrt(fd, msg, flags); + else + ret = fd->ops->sendmsg_rt(fd, msg, flags); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); +out: + if (ret < 0) + trace_cobalt_fd_sendmsg_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_sendmsg); + +int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg)) +{ + int ret = 0, datagrams = 0; + struct mmsghdr mmsg; + struct rtdm_fd *fd; + void __user *u_p; + ssize_t len; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_sendmmsg(current, fd, ufd, flags); + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + for (u_p = u_msgvec; vlen > 0; vlen--) { + ret = get_mmsg(&mmsg, u_p); + if (ret) + break; + len = fd->ops->sendmsg_rt(fd, &mmsg.msg_hdr, flags); + if (len < 0) { + ret = len; + break; + } + mmsg.msg_len = (unsigned int)len; + ret = put_mmsg(&u_p, &mmsg); + if (ret) + break; + datagrams++; + } + + rtdm_fd_put(fd); + + if (datagrams > 0) + ret = datagrams; + +out: + trace_cobalt_fd_sendmmsg_status(current, fd, ufd, ret); + + return ret; +} + +static void +__fd_close(struct cobalt_ppd *p, struct rtdm_fd_index *idx, spl_t s) +{ + xnid_remove(&p->fds, &idx->id); + __put_fd(idx->fd, s); + + kfree(idx); +} + +int rtdm_fd_close(int ufd, unsigned int magic) +{ + struct rtdm_fd_index *idx; + struct cobalt_ppd *ppd; + struct rtdm_fd *fd; + spl_t s; + + secondary_mode_only(); + + ppd = cobalt_ppd_get(0); + + xnlock_get_irqsave(&fdtree_lock, s); + idx = fetch_fd_index(ppd, ufd); + if (idx == NULL) + goto eadv; + + fd = idx->fd; + if (magic != 0 && fd->magic != magic) { +eadv: + xnlock_put_irqrestore(&fdtree_lock, s); + return -EADV; + } + + set_compat_bit(fd); + + trace_cobalt_fd_close(current, fd, ufd, fd->refs); + + /* + * In dual kernel mode, the linux-side fdtable and the RTDM + * ->close() handler are asynchronously managed, i.e. the + * handler execution may be deferred after the regular file + * descriptor was removed from the fdtable if some refs on + * rtdm_fd are still pending. + */ + __fd_close(ppd, idx, s); + close_fd(ufd); + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_fd_close); + +int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma, + void **u_addrp) +{ + struct rtdm_fd *fd; + int ret; + + secondary_mode_only(); + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_mmap(current, fd, ufd, rma); + + if (rma->flags & (MAP_FIXED|MAP_ANONYMOUS)) { + ret = -EADV; + goto unlock; + } + + ret = __rtdm_mmap_from_fdop(fd, rma->length, rma->offset, + rma->prot, rma->flags, u_addrp); +unlock: + rtdm_fd_put(fd); +out: + if (ret) + trace_cobalt_fd_mmap_status(current, fd, ufd, ret); + + return ret; +} + +int rtdm_fd_valid_p(int ufd) +{ + struct rtdm_fd *fd; + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + fd = fetch_fd(cobalt_ppd_get(0), ufd); + xnlock_put_irqrestore(&fdtree_lock, s); + + return fd != NULL; +} + +/** + * @brief Bind a selector to specified event types of a given file descriptor + * @internal + * + * This function is invoked by higher RTOS layers implementing select-like + * services. It shall not be called directly by RTDM drivers. + * + * @param[in] ufd User-side file descriptor to bind to + * @param[in,out] selector Selector object that shall be bound to the given + * event + * @param[in] type Event type the caller is interested in + * + * @return 0 on success, otherwise: + * + * - -EADV is returned if the file descriptor @a ufd cannot be resolved. + * + * - -EBADF is returned if the underlying device is being torn down at the time + * of the call. + * + * - -EINVAL is returned if @a type is invalid. + * + * @coretags{task-unrestricted} + */ +int rtdm_fd_select(int ufd, struct xnselector *selector, + unsigned int type) +{ + struct rtdm_fd *fd; + int ret; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) + return PTR_ERR(fd); + + set_compat_bit(fd); + + ret = fd->ops->select(fd, selector, type, ufd); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); + + return ret; +} + +int rtdm_fd_get_setsockaddr_args(struct rtdm_fd *fd, + struct _rtdm_setsockaddr_args *dst, + const void *src) +{ + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_setsockaddr_args cargs; + int ret; + + if (!rtdm_read_user_ok(fd, src, sizeof(cargs))) + return -EFAULT; + + ret = rtdm_copy_from_user(fd, &cargs, src, sizeof(cargs)); + if (ret) + return ret; + + dst->addr = compat_ptr(cargs.addr); + dst->addrlen = cargs.addrlen; + + return 0; + } +#endif + + if (!rtdm_read_user_ok(fd, src, sizeof(*dst))) + return -EFAULT; + + return rtdm_copy_from_user(fd, dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL_GPL(rtdm_fd_get_setsockaddr_args); + +int rtdm_fd_get_setsockopt_args(struct rtdm_fd *fd, + struct _rtdm_setsockopt_args *dst, + const void *src) +{ + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_setsockopt_args cargs; + int ret; + + if (!rtdm_read_user_ok(fd, src, sizeof(cargs))) + return -EFAULT; + + ret = rtdm_copy_from_user(fd, &cargs, src, sizeof(cargs)); + if (ret) + return ret; + + dst->optlen = cargs.optlen; + dst->optval = compat_ptr(cargs.optval); + dst->optname = cargs.optname; + dst->level = cargs.level; + + return 0; + } +#endif + + if (!rtdm_read_user_ok(fd, src, sizeof(*dst))) + return -EFAULT; + + return rtdm_copy_from_user(fd, dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL_GPL(rtdm_fd_get_setsockopt_args); + +int rtdm_fd_get_iovec(struct rtdm_fd *fd, struct iovec *iov, + const struct user_msghdr *msg, bool rw) +{ + size_t sz; + +#ifdef CONFIG_XENO_ARCH_SYS3264 + sz = rtdm_fd_is_compat(fd) ? sizeof(struct compat_iovec) : sizeof(*iov); +#else + sz = sizeof(*iov); +#endif + + sz *= msg->msg_iovlen; + + if (!rw && !rtdm_read_user_ok(fd, msg->msg_iov, sz)) + return -EFAULT; + + if (rw && !rtdm_rw_user_ok(fd, msg->msg_iov, sz)) + return -EFAULT; + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) + return sys32_get_iovec( + iov, (struct compat_iovec __user *)msg->msg_iov, + (int)msg->msg_iovlen); +#endif + + return rtdm_copy_from_user(fd, iov, msg->msg_iov, sz); +} +EXPORT_SYMBOL_GPL(rtdm_fd_get_iovec); + +int rtdm_fd_put_iovec(struct rtdm_fd *fd, const struct iovec *iov, + const struct user_msghdr *msg) +{ + size_t sz; + +#ifdef CONFIG_XENO_ARCH_SYS3264 + sz = rtdm_fd_is_compat(fd) ? sizeof(struct compat_iovec) : sizeof(*iov); +#else + sz = sizeof(*iov); +#endif + + sz *= msg->msg_iovlen; + + if (!rtdm_rw_user_ok(fd, msg->msg_iov, sz)) + return -EFAULT; + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) + return sys32_put_iovec( + (struct compat_iovec __user *)msg->msg_iov, iov, + (int)msg->msg_iovlen); +#endif + + return rtdm_copy_to_user(fd, msg->msg_iov, iov, sz); +} +EXPORT_SYMBOL_GPL(rtdm_fd_put_iovec); + +static void destroy_fd(void *cookie, struct xnid *id) +{ + struct cobalt_ppd *p = cookie; + struct rtdm_fd_index *idx; + spl_t s; + + idx = container_of(id, struct rtdm_fd_index, id); + xnlock_get_irqsave(&fdtree_lock, s); + __fd_close(p, idx, 0); +} + +void rtdm_fd_cleanup(struct cobalt_ppd *p) +{ + /* + * This is called on behalf of a (userland) task exit handler, + * so we don't have to deal with the regular file descriptors, + * we only have to empty our own index. + */ + xntree_cleanup(&p->fds, p, destroy_fd); +} + +void rtdm_fd_init(void) +{ + sema_init(&rtdm_fd_cleanup_sem, 0); + kthread_run(fd_cleanup_thread, NULL, "rtdm_fd"); +} + +static inline void warn_user(struct file *file, const char *call) +{ + struct dentry *dentry = file->f_path.dentry; + + printk(XENO_WARNING + "%s[%d] called regular %s() on /dev/rtdm/%s\n", + current->comm, task_pid_nr(current), call + 5, dentry->d_name.name); +} + +static ssize_t dumb_read(struct file *file, char __user *buf, + size_t count, loff_t __user *ppos) +{ + warn_user(file, __func__); + return -EINVAL; +} + +static ssize_t dumb_write(struct file *file, const char __user *buf, + size_t count, loff_t __user *ppos) +{ + warn_user(file, __func__); + return -EINVAL; +} + +static unsigned int dumb_poll(struct file *file, poll_table *pt) +{ + warn_user(file, __func__); + return -EINVAL; +} + +static long dumb_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + warn_user(file, __func__); + return -EINVAL; +} + +const struct file_operations rtdm_dumb_fops = { + .read = dumb_read, + .write = dumb_write, + .poll = dumb_poll, + .unlocked_ioctl = dumb_ioctl, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h new file mode 100644 index 0000000..99488e0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>. + * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _RTDM_INTERNAL_H +#define _RTDM_INTERNAL_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/sem.h> +#include <linux/file.h> +#include <linux/atomic.h> +#include <cobalt/kernel/tree.h> +#include <cobalt/kernel/lock.h> +#include <rtdm/driver.h> + +static inline void __rtdm_get_device(struct rtdm_device *device) +{ + atomic_inc(&device->refcount); +} + +void __rtdm_put_device(struct rtdm_device *device); + +struct rtdm_device *__rtdm_get_namedev(const char *path); + +struct rtdm_device *__rtdm_get_protodev(int protocol_family, + int socket_type); + +void __rtdm_dev_close(struct rtdm_fd *fd); + +int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + +int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset, + int prot, int flags, void **pptr); + +/* nklock held, irqs off. */ +static inline void rtdm_fd_get_light(struct rtdm_fd *fd) +{ + ++fd->refs; +} + +int rtdm_init(void); + +void rtdm_cleanup(void); + +extern const struct file_operations rtdm_dumb_fops; + +#endif /* _RTDM_INTERNAL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c new file mode 100644 index 0000000..d35bb3b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013 Hauke Mehrtens <hauke@hauke-m.de> + * Copyright (c) 2013 Hannes Frederic Sowa <hannes@stressinduktion.org> + * Copyright (c) 2014 Luis R. Rodriguez <mcgrof@do-not-panic.com> + * + * Backport functionality introduced in Linux 3.13. + * + * Copyright (c) 2014 Hauke Mehrtens <hauke@hauke-m.de> + * + * Backport functionality introduced in Linux 3.14. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/version.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/hwmon.h> +#include <asm/xenomai/wrappers.h> + +/* + * Same rules as kernel/cobalt/include/asm-generic/xenomai/wrappers.h + * apply to reduce #ifdefery. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) +#ifdef CONFIG_PCI_MSI +int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +EXPORT_SYMBOL(pci_enable_msix_range); +#endif +#endif /* < 3.14 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) +#ifdef CONFIG_HWMON +struct device* +hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups) +{ + struct device *hwdev; + + hwdev = hwmon_device_register(dev); + hwdev->groups = groups; + dev_set_drvdata(hwdev, drvdata); + return hwdev; +} + +static void devm_hwmon_release(struct device *dev, void *res) +{ + struct device *hwdev = *(struct device **)res; + + hwmon_device_unregister(hwdev); +} + +struct device * +devm_hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups) +{ + struct device **ptr, *hwdev; + + if (!dev) + return ERR_PTR(-EINVAL); + + ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups); + if (IS_ERR(hwdev)) + goto error; + + *ptr = hwdev; + devres_add(dev, ptr); + return hwdev; + +error: + devres_free(ptr); + return hwdev; +} +EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups); +#endif +#endif /* < 3.13 */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c new file mode 100644 index 0000000..e679982 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/sched.h> + +static struct xnthread *xnsched_idle_pick(struct xnsched *sched) +{ + return &sched->rootcb; +} + +static bool xnsched_idle_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + return __xnsched_idle_setparam(thread, p); +} + +static void xnsched_idle_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + __xnsched_idle_getparam(thread, p); +} + +static void xnsched_idle_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + __xnsched_idle_trackprio(thread, p); +} + +static void xnsched_idle_protectprio(struct xnthread *thread, int prio) +{ + __xnsched_idle_protectprio(thread, prio); +} + +struct xnsched_class xnsched_class_idle = { + .sched_init = NULL, + .sched_enqueue = NULL, + .sched_dequeue = NULL, + .sched_requeue = NULL, + .sched_tick = NULL, + .sched_rotate = NULL, + .sched_forget = NULL, + .sched_kick = NULL, + .sched_declare = NULL, + .sched_pick = xnsched_idle_pick, + .sched_setparam = xnsched_idle_setparam, + .sched_getparam = xnsched_idle_getparam, + .sched_trackprio = xnsched_idle_trackprio, + .sched_protectprio = xnsched_idle_protectprio, + .weight = XNSCHED_CLASS_WEIGHT(0), + .policy = SCHED_IDLE, + .name = "idle" +}; diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c new file mode 100644 index 0000000..4c3383b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c @@ -0,0 +1,835 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/bitmap.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/arith.h> +#include <cobalt/uapi/sched.h> +#include <trace/events/cobalt-core.h> + +/* + * With this policy, each per-CPU scheduler slot maintains a list of + * active thread groups, picking from the sched_rt runqueue. + * + * Each time a thread is picked from the runqueue, we check whether we + * still have budget for running it, looking at the group it belongs + * to. If so, a timer is armed to elapse when that group has no more + * budget, would the incoming thread run unpreempted until then + * (i.e. xnsched_quota->limit_timer). + * + * Otherwise, if no budget remains in the group for running the + * candidate thread, we move the latter to a local expiry queue + * maintained by the group. This process is done on the fly as we pull + * from the runqueue. + * + * Updating the remaining budget is done each time the Cobalt core + * asks for replacing the current thread with the next runnable one, + * i.e. xnsched_quota_pick(). There we charge the elapsed run time of + * the outgoing thread to the relevant group, and conversely, we check + * whether the incoming thread has budget. + * + * Finally, a per-CPU timer (xnsched_quota->refill_timer) periodically + * ticks in the background, in accordance to the defined quota + * interval. Thread group budgets get replenished by its handler in + * accordance to their respective share, pushing all expired threads + * back to the run queue in the same move. + * + * NOTE: since the core logic enforcing the budget entirely happens in + * xnsched_quota_pick(), applying a budget change can be done as + * simply as forcing the rescheduling procedure to be invoked asap. As + * a result of this, the Cobalt core will ask for the next thread to + * run, which means calling xnsched_quota_pick() eventually. + * + * CAUTION: xnsched_quota_group->nr_active does count both the threads + * from that group linked to the sched_rt runqueue, _and_ the threads + * moved to the local expiry queue. As a matter of fact, the expired + * threads - those for which we consumed all the per-group budget - + * are still seen as runnable (i.e. not blocked/suspended) by the + * Cobalt core. This only means that the SCHED_QUOTA policy won't pick + * them until the corresponding budget is replenished. + */ +static DECLARE_BITMAP(group_map, CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS); + +static inline int group_is_active(struct xnsched_quota_group *tg) +{ + struct xnthread *curr = tg->sched->curr; + + if (tg->nr_active) + return 1; + + /* + * Check whether the current thread belongs to the group, and + * is still in running state (XNREADY denotes a thread linked + * to the runqueue, in which case tg->nr_active already + * accounts for it). + */ + if (curr->quota == tg && + xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0) + return 1; + + return 0; +} + +static inline void replenish_budget(struct xnsched_quota *qs, + struct xnsched_quota_group *tg) +{ + xnticks_t budget_ns, credit_ns; + + if (tg->quota_ns == tg->quota_peak_ns) { + /* + * Fast path: we don't accumulate runtime credit. + * This includes groups with no runtime limit + * (i.e. quota off: quota >= period && quota == peak). + */ + tg->run_budget_ns = tg->quota_ns; + return; + } + + /* + * We have to deal with runtime credit accumulation, as the + * group may consume more than its base quota during a single + * interval, up to a peak duration though (not to monopolize + * the CPU). + * + * - In the simplest case, a group is allotted a new full + * budget plus the unconsumed portion of the previous budget, + * provided the sum does not exceed the peak quota. + * + * - When there is too much budget for a single interval + * (i.e. above peak quota), we spread the extra time over + * multiple intervals through a credit accumulation mechanism. + * + * - The accumulated credit is dropped whenever a group has no + * runnable threads. + */ + if (!group_is_active(tg)) { + /* Drop accumulated credit. */ + tg->run_credit_ns = 0; + tg->run_budget_ns = tg->quota_ns; + return; + } + + budget_ns = tg->run_budget_ns + tg->quota_ns; + if (budget_ns > tg->quota_peak_ns) { + /* Too much budget, spread it over intervals. */ + tg->run_credit_ns += budget_ns - tg->quota_peak_ns; + tg->run_budget_ns = tg->quota_peak_ns; + } else if (tg->run_credit_ns) { + credit_ns = tg->quota_peak_ns - budget_ns; + /* Consume the accumulated credit. */ + if (tg->run_credit_ns >= credit_ns) + tg->run_credit_ns -= credit_ns; + else { + credit_ns = tg->run_credit_ns; + tg->run_credit_ns = 0; + } + /* Allot extended budget, limited to peak quota. */ + tg->run_budget_ns = budget_ns + credit_ns; + } else + /* No credit, budget was below peak quota. */ + tg->run_budget_ns = budget_ns; +} + +static void quota_refill_handler(struct xntimer *timer) +{ + struct xnsched_quota_group *tg; + struct xnthread *thread, *tmp; + struct xnsched_quota *qs; + struct xnsched *sched; + + qs = container_of(timer, struct xnsched_quota, refill_timer); + XENO_BUG_ON(COBALT, list_empty(&qs->groups)); + sched = container_of(qs, struct xnsched, quota); + + trace_cobalt_schedquota_refill(0); + + list_for_each_entry(tg, &qs->groups, next) { + /* Allot a new runtime budget for the group. */ + replenish_budget(qs, tg); + + if (tg->run_budget_ns == 0 || list_empty(&tg->expired)) + continue; + /* + * For each group living on this CPU, move all expired + * threads back to the runqueue. Since those threads + * were moved out of the runqueue as we were + * considering them for execution, we push them back + * in LIFO order to their respective priority group. + * The expiry queue is FIFO to keep ordering right + * among expired threads. + */ + list_for_each_entry_safe_reverse(thread, tmp, &tg->expired, quota_expired) { + list_del_init(&thread->quota_expired); + xnsched_addq(&sched->rt.runnable, thread); + } + } + + xnsched_set_self_resched(timer->sched); +} + +static void quota_limit_handler(struct xntimer *timer) +{ + struct xnsched *sched; + + sched = container_of(timer, struct xnsched, quota.limit_timer); + /* + * Force a rescheduling on the return path of the current + * interrupt, so that the budget is re-evaluated for the + * current group in xnsched_quota_pick(). + */ + xnsched_set_self_resched(sched); +} + +static int quota_sum_all(struct xnsched_quota *qs) +{ + struct xnsched_quota_group *tg; + int sum; + + if (list_empty(&qs->groups)) + return 0; + + sum = 0; + list_for_each_entry(tg, &qs->groups, next) + sum += tg->quota_percent; + + return sum; +} + +static void xnsched_quota_init(struct xnsched *sched) +{ + char limiter_name[XNOBJECT_NAME_LEN], refiller_name[XNOBJECT_NAME_LEN]; + struct xnsched_quota *qs = &sched->quota; + + qs->period_ns = CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD * 1000ULL; + INIT_LIST_HEAD(&qs->groups); + +#ifdef CONFIG_SMP + ksformat(refiller_name, sizeof(refiller_name), + "[quota-refill/%u]", sched->cpu); + ksformat(limiter_name, sizeof(limiter_name), + "[quota-limit/%u]", sched->cpu); +#else + strcpy(refiller_name, "[quota-refill]"); + strcpy(limiter_name, "[quota-limit]"); +#endif + xntimer_init(&qs->refill_timer, + &nkclock, quota_refill_handler, sched, + XNTIMER_IGRAVITY); + xntimer_set_name(&qs->refill_timer, refiller_name); + + xntimer_init(&qs->limit_timer, + &nkclock, quota_limit_handler, sched, + XNTIMER_IGRAVITY); + xntimer_set_name(&qs->limit_timer, limiter_name); +} + +static bool xnsched_quota_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_quota_group *tg; + struct xnsched_quota *qs; + bool effective; + + xnthread_clear_state(thread, XNWEAK); + effective = xnsched_set_effective_priority(thread, p->quota.prio); + + qs = &thread->sched->quota; + list_for_each_entry(tg, &qs->groups, next) { + if (tg->tgid != p->quota.tgid) + continue; + if (thread->quota) { + /* Dequeued earlier by our caller. */ + list_del(&thread->quota_next); + thread->quota->nr_threads--; + } + + trace_cobalt_schedquota_add_thread(tg, thread); + + thread->quota = tg; + list_add(&thread->quota_next, &tg->members); + tg->nr_threads++; + return effective; + } + + XENO_BUG(COBALT); + + return false; +} + +static void xnsched_quota_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->quota.prio = thread->cprio; + p->quota.tgid = thread->quota->tgid; +} + +static void xnsched_quota_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) { + /* We should not cross groups during PI boost. */ + XENO_WARN_ON(COBALT, + thread->base_class == &xnsched_class_quota && + thread->quota->tgid != p->quota.tgid); + thread->cprio = p->quota.prio; + } else + thread->cprio = thread->bprio; +} + +static void xnsched_quota_protectprio(struct xnthread *thread, int prio) +{ + if (prio > XNSCHED_QUOTA_MAX_PRIO) + prio = XNSCHED_QUOTA_MAX_PRIO; + + thread->cprio = prio; +} + +static int xnsched_quota_chkparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_quota_group *tg; + struct xnsched_quota *qs; + int tgid; + + if (p->quota.prio < XNSCHED_QUOTA_MIN_PRIO || + p->quota.prio > XNSCHED_QUOTA_MAX_PRIO) + return -EINVAL; + + tgid = p->quota.tgid; + if (tgid < 0 || tgid >= CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS) + return -EINVAL; + + /* + * The group must be managed on the same CPU the thread + * currently runs on. + */ + qs = &thread->sched->quota; + list_for_each_entry(tg, &qs->groups, next) { + if (tg->tgid == tgid) + return 0; + } + + /* + * If that group exists nevertheless, we give userland a + * specific error code. + */ + if (test_bit(tgid, group_map)) + return -EPERM; + + return -EINVAL; +} + +static void xnsched_quota_forget(struct xnthread *thread) +{ + trace_cobalt_schedquota_remove_thread(thread->quota, thread); + + thread->quota->nr_threads--; + XENO_BUG_ON(COBALT, thread->quota->nr_threads < 0); + list_del(&thread->quota_next); + thread->quota = NULL; +} + +static void xnsched_quota_kick(struct xnthread *thread) +{ + struct xnsched_quota_group *tg = thread->quota; + struct xnsched *sched = thread->sched; + + /* + * Allow a kicked thread to be elected for running until it + * relaxes, even if the group it belongs to lacks runtime + * budget. + */ + if (tg->run_budget_ns == 0 && !list_empty(&thread->quota_expired)) { + list_del_init(&thread->quota_expired); + xnsched_addq_tail(&sched->rt.runnable, thread); + } +} + +static inline int thread_is_runnable(struct xnthread *thread) +{ + return thread->quota->run_budget_ns > 0 || + xnthread_test_info(thread, XNKICKED); +} + +static void xnsched_quota_enqueue(struct xnthread *thread) +{ + struct xnsched_quota_group *tg = thread->quota; + struct xnsched *sched = thread->sched; + + if (!thread_is_runnable(thread)) + list_add_tail(&thread->quota_expired, &tg->expired); + else + xnsched_addq_tail(&sched->rt.runnable, thread); + + tg->nr_active++; +} + +static void xnsched_quota_dequeue(struct xnthread *thread) +{ + struct xnsched_quota_group *tg = thread->quota; + struct xnsched *sched = thread->sched; + + if (!list_empty(&thread->quota_expired)) + list_del_init(&thread->quota_expired); + else + xnsched_delq(&sched->rt.runnable, thread); + + tg->nr_active--; +} + +static void xnsched_quota_requeue(struct xnthread *thread) +{ + struct xnsched_quota_group *tg = thread->quota; + struct xnsched *sched = thread->sched; + + if (!thread_is_runnable(thread)) + list_add(&thread->quota_expired, &tg->expired); + else + xnsched_addq(&sched->rt.runnable, thread); + + tg->nr_active++; +} + +static struct xnthread *xnsched_quota_pick(struct xnsched *sched) +{ + struct xnthread *next, *curr = sched->curr; + struct xnsched_quota *qs = &sched->quota; + struct xnsched_quota_group *otg, *tg; + xnticks_t now, elapsed; + int ret; + + now = xnclock_read_monotonic(&nkclock); + otg = curr->quota; + if (otg == NULL) + goto pick; + /* + * Charge the time consumed by the outgoing thread to the + * group it belongs to. + */ + elapsed = now - otg->run_start_ns; + if (elapsed < otg->run_budget_ns) + otg->run_budget_ns -= elapsed; + else + otg->run_budget_ns = 0; +pick: + next = xnsched_getq(&sched->rt.runnable); + if (next == NULL) { + xntimer_stop(&qs->limit_timer); + return NULL; + } + + /* + * As we basically piggyback on the SCHED_FIFO runqueue, make + * sure to detect non-quota threads. + */ + tg = next->quota; + if (tg == NULL) + return next; + + tg->run_start_ns = now; + + /* + * Don't consider budget if kicked, we have to allow this + * thread to run until it eventually relaxes. + */ + if (xnthread_test_info(next, XNKICKED)) { + xntimer_stop(&qs->limit_timer); + goto out; + } + + if (tg->run_budget_ns == 0) { + /* Flush expired group members as we go. */ + list_add_tail(&next->quota_expired, &tg->expired); + goto pick; + } + + if (otg == tg && xntimer_running_p(&qs->limit_timer)) + /* Same group, leave the running timer untouched. */ + goto out; + + /* Arm limit timer for the new running group. */ + ret = xntimer_start(&qs->limit_timer, now + tg->run_budget_ns, + XN_INFINITE, XN_ABSOLUTE); + if (ret) { + /* Budget exhausted: deactivate this group. */ + tg->run_budget_ns = 0; + list_add_tail(&next->quota_expired, &tg->expired); + goto pick; + } +out: + tg->nr_active--; + + return next; +} + +static void xnsched_quota_migrate(struct xnthread *thread, struct xnsched *sched) +{ + union xnsched_policy_param param; + /* + * Runtime quota groups are defined per-CPU, so leaving the + * current CPU means exiting the group. We do this by moving + * the target thread to the plain RT class. + */ + param.rt.prio = thread->cprio; + __xnthread_set_schedparam(thread, &xnsched_class_rt, ¶m); +} + +/** + * @ingroup cobalt_core_sched + * @defgroup sched_quota SCHED_QUOTA scheduling policy + * + * The SCHED_QUOTA policy enforces a limitation on the CPU consumption + * of threads over a globally defined period, known as the quota + * interval. This is done by pooling threads with common requirements + * in groups, and giving each group a share of the global period + * (CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD). + * + * When threads have entirely consumed the quota allotted to the group + * they belong to, the latter is suspended as a whole, until the next + * quota interval starts. At this point, a new runtime budget is + * given to each group, in accordance with its share. + * + *@{ + */ +int xnsched_quota_create_group(struct xnsched_quota_group *tg, + struct xnsched *sched, + int *quota_sum_r) +{ + int tgid, nr_groups = CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS; + struct xnsched_quota *qs = &sched->quota; + + atomic_only(); + + tgid = find_first_zero_bit(group_map, nr_groups); + if (tgid >= nr_groups) + return -ENOSPC; + + __set_bit(tgid, group_map); + tg->tgid = tgid; + tg->sched = sched; + tg->run_budget_ns = qs->period_ns; + tg->run_credit_ns = 0; + tg->quota_percent = 100; + tg->quota_peak_percent = 100; + tg->quota_ns = qs->period_ns; + tg->quota_peak_ns = qs->period_ns; + tg->nr_active = 0; + tg->nr_threads = 0; + INIT_LIST_HEAD(&tg->members); + INIT_LIST_HEAD(&tg->expired); + + trace_cobalt_schedquota_create_group(tg); + + if (list_empty(&qs->groups)) + xntimer_start(&qs->refill_timer, + qs->period_ns, qs->period_ns, XN_RELATIVE); + + list_add(&tg->next, &qs->groups); + *quota_sum_r = quota_sum_all(qs); + + return 0; +} +EXPORT_SYMBOL_GPL(xnsched_quota_create_group); + +int xnsched_quota_destroy_group(struct xnsched_quota_group *tg, + int force, int *quota_sum_r) +{ + struct xnsched_quota *qs = &tg->sched->quota; + union xnsched_policy_param param; + struct xnthread *thread, *tmp; + + atomic_only(); + + if (!list_empty(&tg->members)) { + if (!force) + return -EBUSY; + /* Move group members to the rt class. */ + list_for_each_entry_safe(thread, tmp, &tg->members, quota_next) { + param.rt.prio = thread->cprio; + __xnthread_set_schedparam(thread, &xnsched_class_rt, ¶m); + } + } + + trace_cobalt_schedquota_destroy_group(tg); + + list_del(&tg->next); + __clear_bit(tg->tgid, group_map); + + if (list_empty(&qs->groups)) + xntimer_stop(&qs->refill_timer); + + if (quota_sum_r) + *quota_sum_r = quota_sum_all(qs); + + return 0; +} +EXPORT_SYMBOL_GPL(xnsched_quota_destroy_group); + +void xnsched_quota_set_limit(struct xnsched_quota_group *tg, + int quota_percent, int quota_peak_percent, + int *quota_sum_r) +{ + struct xnsched *sched = tg->sched; + struct xnsched_quota *qs = &sched->quota; + xnticks_t old_quota_ns = tg->quota_ns; + struct xnthread *thread, *tmp, *curr; + xnticks_t now, elapsed, consumed; + + atomic_only(); + + trace_cobalt_schedquota_set_limit(tg, quota_percent, + quota_peak_percent); + + if (quota_percent < 0 || quota_percent > 100) { /* Quota off. */ + quota_percent = 100; + tg->quota_ns = qs->period_ns; + } else + tg->quota_ns = xnarch_div64(qs->period_ns * quota_percent, 100); + + if (quota_peak_percent < quota_percent) + quota_peak_percent = quota_percent; + + if (quota_peak_percent < 0 || quota_peak_percent > 100) { + quota_peak_percent = 100; + tg->quota_peak_ns = qs->period_ns; + } else + tg->quota_peak_ns = xnarch_div64(qs->period_ns * quota_peak_percent, 100); + + tg->quota_percent = quota_percent; + tg->quota_peak_percent = quota_peak_percent; + + curr = sched->curr; + if (curr->quota == tg && + xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0) { + now = xnclock_read_monotonic(&nkclock); + + elapsed = now - tg->run_start_ns; + if (elapsed < tg->run_budget_ns) + tg->run_budget_ns -= elapsed; + else + tg->run_budget_ns = 0; + + tg->run_start_ns = now; + + xntimer_stop(&qs->limit_timer); + } + + if (tg->run_budget_ns <= old_quota_ns) + consumed = old_quota_ns - tg->run_budget_ns; + else + consumed = 0; + if (tg->quota_ns >= consumed) + tg->run_budget_ns = tg->quota_ns - consumed; + else + tg->run_budget_ns = 0; + + tg->run_credit_ns = 0; /* Drop accumulated credit. */ + + *quota_sum_r = quota_sum_all(qs); + + if (tg->run_budget_ns > 0) { + list_for_each_entry_safe_reverse(thread, tmp, &tg->expired, + quota_expired) { + list_del_init(&thread->quota_expired); + xnsched_addq(&sched->rt.runnable, thread); + } + } + + /* + * Apply the new budget immediately, in case a member of this + * group is currently running. + */ + xnsched_set_resched(sched); + xnsched_run(); +} +EXPORT_SYMBOL_GPL(xnsched_quota_set_limit); + +struct xnsched_quota_group * +xnsched_quota_find_group(struct xnsched *sched, int tgid) +{ + struct xnsched_quota_group *tg; + + atomic_only(); + + if (list_empty(&sched->quota.groups)) + return NULL; + + list_for_each_entry(tg, &sched->quota.groups, next) { + if (tg->tgid == tgid) + return tg; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(xnsched_quota_find_group); + +int xnsched_quota_sum_all(struct xnsched *sched) +{ + struct xnsched_quota *qs = &sched->quota; + + atomic_only(); + + return quota_sum_all(qs); +} +EXPORT_SYMBOL_GPL(xnsched_quota_sum_all); + +/** @} */ + +#ifdef CONFIG_XENO_OPT_VFILE + +struct xnvfile_directory sched_quota_vfroot; + +struct vfile_sched_quota_priv { + struct xnthread *curr; +}; + +struct vfile_sched_quota_data { + int cpu; + pid_t pid; + int prio; + int tgid; + xnticks_t budget; + char name[XNOBJECT_NAME_LEN]; +}; + +static struct xnvfile_snapshot_ops vfile_sched_quota_ops; + +static struct xnvfile_snapshot vfile_sched_quota = { + .privsz = sizeof(struct vfile_sched_quota_priv), + .datasz = sizeof(struct vfile_sched_quota_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_sched_quota_ops, +}; + +static int vfile_sched_quota_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it); + int nrthreads = xnsched_class_quota.nthreads; + + if (nrthreads == 0) + return -ESRCH; + + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + + return nrthreads; +} + +static int vfile_sched_quota_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it); + struct vfile_sched_quota_data *p = data; + struct xnthread *thread; + + if (priv->curr == NULL) + return 0; /* All done. */ + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + if (thread->base_class != &xnsched_class_quota) + return VFILE_SEQ_SKIP; + + p->cpu = xnsched_cpu(thread->sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->tgid = thread->quota->tgid; + p->prio = thread->cprio; + p->budget = thread->quota->run_budget_ns; + + return 1; +} + +static int vfile_sched_quota_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_quota_data *p = data; + char buf[16]; + + if (p == NULL) + xnvfile_printf(it, "%-3s %-6s %-4s %-4s %-10s %s\n", + "CPU", "PID", "TGID", "PRI", "BUDGET", "NAME"); + else { + xntimer_format_time(p->budget, buf, sizeof(buf)); + xnvfile_printf(it, "%3u %-6d %-4d %-4d %-10s %s\n", + p->cpu, + p->pid, + p->tgid, + p->prio, + buf, + p->name); + } + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_sched_quota_ops = { + .rewind = vfile_sched_quota_rewind, + .next = vfile_sched_quota_next, + .show = vfile_sched_quota_show, +}; + +static int xnsched_quota_init_vfile(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot) +{ + int ret; + + ret = xnvfile_init_dir(schedclass->name, &sched_quota_vfroot, vfroot); + if (ret) + return ret; + + return xnvfile_init_snapshot("threads", &vfile_sched_quota, + &sched_quota_vfroot); +} + +static void xnsched_quota_cleanup_vfile(struct xnsched_class *schedclass) +{ + xnvfile_destroy_snapshot(&vfile_sched_quota); + xnvfile_destroy_dir(&sched_quota_vfroot); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +struct xnsched_class xnsched_class_quota = { + .sched_init = xnsched_quota_init, + .sched_enqueue = xnsched_quota_enqueue, + .sched_dequeue = xnsched_quota_dequeue, + .sched_requeue = xnsched_quota_requeue, + .sched_pick = xnsched_quota_pick, + .sched_tick = NULL, + .sched_rotate = NULL, + .sched_migrate = xnsched_quota_migrate, + .sched_chkparam = xnsched_quota_chkparam, + .sched_setparam = xnsched_quota_setparam, + .sched_getparam = xnsched_quota_getparam, + .sched_trackprio = xnsched_quota_trackprio, + .sched_protectprio = xnsched_quota_protectprio, + .sched_forget = xnsched_quota_forget, + .sched_kick = xnsched_quota_kick, +#ifdef CONFIG_XENO_OPT_VFILE + .sched_init_vfile = xnsched_quota_init_vfile, + .sched_cleanup_vfile = xnsched_quota_cleanup_vfile, +#endif + .weight = XNSCHED_CLASS_WEIGHT(3), + .policy = SCHED_QUOTA, + .name = "quota" +}; +EXPORT_SYMBOL_GPL(xnsched_class_quota); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c new file mode 100644 index 0000000..2457032 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c @@ -0,0 +1,257 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/sched.h> + +static void xnsched_rt_init(struct xnsched *sched) +{ + xnsched_initq(&sched->rt.runnable); +} + +static void xnsched_rt_requeue(struct xnthread *thread) +{ + /* + * Put back at same place: i.e. requeue to head of current + * priority group (i.e. LIFO, used for preemption handling). + */ + __xnsched_rt_requeue(thread); +} + +static void xnsched_rt_enqueue(struct xnthread *thread) +{ + /* + * Enqueue for next pick: i.e. move to end of current priority + * group (i.e. FIFO). + */ + __xnsched_rt_enqueue(thread); +} + +static void xnsched_rt_dequeue(struct xnthread *thread) +{ + /* + * Pull from the runnable thread queue. + */ + __xnsched_rt_dequeue(thread); +} + +static void xnsched_rt_rotate(struct xnsched *sched, + const union xnsched_policy_param *p) +{ + struct xnthread *thread, *curr; + + if (xnsched_emptyq_p(&sched->rt.runnable)) + return; /* No runnable thread in this class. */ + + curr = sched->curr; + + if (p->rt.prio == XNSCHED_RUNPRIO) + thread = curr; + else { + thread = xnsched_findq(&sched->rt.runnable, p->rt.prio); + if (thread == NULL) + return; + } + + /* + * In case we picked the current thread, we have to make sure + * not to move it back to the run queue if it was blocked + * before we were called. The same goes if the current thread + * holds the scheduler lock. + */ + if (thread != curr || + (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS) && + curr->lock_count == 0)) + xnsched_putback(thread); +} + +void xnsched_rt_tick(struct xnsched *sched) +{ + /* + * The round-robin time credit is only consumed by a running + * thread that neither holds the scheduler lock nor was + * blocked before entering this callback. As the time slice is + * exhausted for the running thread, move it back to the + * run queue at the end of its priority group. + */ + xnsched_putback(sched->curr); +} + +static bool xnsched_rt_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + return __xnsched_rt_setparam(thread, p); +} + +static void xnsched_rt_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + __xnsched_rt_getparam(thread, p); +} + +static void xnsched_rt_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + __xnsched_rt_trackprio(thread, p); +} + +static void xnsched_rt_protectprio(struct xnthread *thread, int prio) +{ + __xnsched_rt_protectprio(thread, prio); +} + +#ifdef CONFIG_XENO_OPT_VFILE + +struct xnvfile_directory sched_rt_vfroot; + +struct vfile_sched_rt_priv { + struct xnthread *curr; +}; + +struct vfile_sched_rt_data { + int cpu; + pid_t pid; + char name[XNOBJECT_NAME_LEN]; + xnticks_t period; + int cprio; +}; + +static struct xnvfile_snapshot_ops vfile_sched_rt_ops; + +static struct xnvfile_snapshot vfile_sched_rt = { + .privsz = sizeof(struct vfile_sched_rt_priv), + .datasz = sizeof(struct vfile_sched_rt_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_sched_rt_ops, +}; + +static int vfile_sched_rt_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it); + int nrthreads = xnsched_class_rt.nthreads; + + if (nrthreads == 0) + return -ESRCH; + + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + + return nrthreads; +} + +static int vfile_sched_rt_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it); + struct vfile_sched_rt_data *p = data; + struct xnthread *thread; + + if (priv->curr == NULL) + return 0; /* All done. */ + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + if (thread->base_class != &xnsched_class_rt || + xnthread_test_state(thread, XNWEAK)) + return VFILE_SEQ_SKIP; + + p->cpu = xnsched_cpu(thread->sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->cprio = thread->cprio; + p->period = xnthread_get_period(thread); + + return 1; +} + +static int vfile_sched_rt_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_rt_data *p = data; + char pribuf[16], ptbuf[16]; + + if (p == NULL) + xnvfile_printf(it, "%-3s %-6s %-8s %-10s %s\n", + "CPU", "PID", "PRI", "PERIOD", "NAME"); + else { + ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio); + xntimer_format_time(p->period, ptbuf, sizeof(ptbuf)); + xnvfile_printf(it, "%3u %-6d %-8s %-10s %s\n", + p->cpu, + p->pid, + pribuf, + ptbuf, + p->name); + } + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_sched_rt_ops = { + .rewind = vfile_sched_rt_rewind, + .next = vfile_sched_rt_next, + .show = vfile_sched_rt_show, +}; + +static int xnsched_rt_init_vfile(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot) +{ + int ret; + + ret = xnvfile_init_dir(schedclass->name, &sched_rt_vfroot, vfroot); + if (ret) + return ret; + + return xnvfile_init_snapshot("threads", &vfile_sched_rt, + &sched_rt_vfroot); +} + +static void xnsched_rt_cleanup_vfile(struct xnsched_class *schedclass) +{ + xnvfile_destroy_snapshot(&vfile_sched_rt); + xnvfile_destroy_dir(&sched_rt_vfroot); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +struct xnsched_class xnsched_class_rt = { + .sched_init = xnsched_rt_init, + .sched_enqueue = xnsched_rt_enqueue, + .sched_dequeue = xnsched_rt_dequeue, + .sched_requeue = xnsched_rt_requeue, + .sched_pick = xnsched_rt_pick, + .sched_tick = xnsched_rt_tick, + .sched_rotate = xnsched_rt_rotate, + .sched_forget = NULL, + .sched_kick = NULL, + .sched_declare = NULL, + .sched_setparam = xnsched_rt_setparam, + .sched_trackprio = xnsched_rt_trackprio, + .sched_protectprio = xnsched_rt_protectprio, + .sched_getparam = xnsched_rt_getparam, +#ifdef CONFIG_XENO_OPT_VFILE + .sched_init_vfile = xnsched_rt_init_vfile, + .sched_cleanup_vfile = xnsched_rt_cleanup_vfile, +#endif + .weight = XNSCHED_CLASS_WEIGHT(4), + .policy = SCHED_FIFO, + .name = "rt" +}; +EXPORT_SYMBOL_GPL(xnsched_class_rt); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c new file mode 100644 index 0000000..77f1d00 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c @@ -0,0 +1,560 @@ +/* + * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/uapi/sched.h> + +#define MAX_REPLENISH CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL + +static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget); + +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + +static inline void sporadic_note_late_drop(struct xnsched *sched) +{ + /* + * This code should pull the break when a misconfigured + * sporadic thread is late on its drop date for more than a + * hundred times in a row. This normally reveals a time budget + * which is too tight. + */ + XENO_BUG_ON(COBALT, ++sched->pss.drop_retries > 100); +} + +static inline void sporadic_note_valid_drop(struct xnsched *sched) +{ + sched->pss.drop_retries = 0; +} + +#else /* !CONFIG_XENO_OPT_DEBUG_COBALT */ + +static inline void sporadic_note_late_drop(struct xnsched *sched) +{ +} + +static inline void sporadic_note_valid_drop(struct xnsched *sched) +{ +} + +#endif /* !CONFIG_XENO_OPT_DEBUG_COBALT */ + +static inline xnticks_t sporadic_diff_time(xnticks_t start, xnticks_t end) +{ + xnsticks_t d = (xnsticks_t)(end - start); + return unlikely(d < 0) ? -d : d; +} + +static void sporadic_drop_handler(struct xntimer *timer) +{ + struct xnsched_sporadic_data *pss; + union xnsched_policy_param p; + struct xnthread *thread; + + /* + * XXX: this code will work properly regardless of + * primary/secondary mode issues. + */ + pss = container_of(timer, struct xnsched_sporadic_data, drop_timer); + thread = pss->thread; + + sporadic_post_recharge(thread, pss->budget); + + if (pss->budget == 0 && thread->cprio > pss->param.low_prio) { + if (pss->param.low_prio < 0) + /* + * Special case: low_prio == -1, we want the + * thread to suspend until a replenishment + * happens. + */ + xnthread_suspend(thread, XNHELD, + XN_INFINITE, XN_RELATIVE, NULL); + else { + p.pss.init_budget = 0; + p.pss.current_prio = pss->param.low_prio; + /* Move sporadic thread to the background. */ + __xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p); + } + } +} + +static void sporadic_schedule_drop(struct xnthread *thread) +{ + xnticks_t now = xnclock_read_monotonic(&nkclock); + struct xnsched_sporadic_data *pss = thread->pss; + int ret; + + pss->resume_date = now; + /* + * Assuming this timer should not fire that often unless the + * monitored thread behaves badly, we don't pin it on the CPU + * the thread is running, trading cycles at firing time + * against cycles when arming the timer. + */ + xntimer_set_affinity(&pss->drop_timer, thread->sched); + ret = xntimer_start(&pss->drop_timer, now + pss->budget, + XN_INFINITE, XN_ABSOLUTE); + if (ret == -ETIMEDOUT) { + sporadic_note_late_drop(thread->sched); + sporadic_drop_handler(&pss->drop_timer); + } else + sporadic_note_valid_drop(thread->sched); +} + +static void sporadic_replenish_handler(struct xntimer *timer) +{ + struct xnsched_sporadic_data *pss; + union xnsched_policy_param p; + struct xnthread *thread; + xnticks_t now; + int r, ret; + + pss = container_of(timer, struct xnsched_sporadic_data, repl_timer); + thread = pss->thread; + XENO_BUG_ON(COBALT, pss->repl_pending <= 0); + +retry: + now = xnclock_read_monotonic(&nkclock); + + do { + r = pss->repl_out; + if ((xnsticks_t)(now - pss->repl_data[r].date) <= 0) + break; + pss->budget += pss->repl_data[r].amount; + if (pss->budget > pss->param.init_budget) + pss->budget = pss->param.init_budget; + pss->repl_out = (r + 1) % MAX_REPLENISH; + } while(--pss->repl_pending > 0); + + if (pss->repl_pending > 0) { + xntimer_set_affinity(&pss->repl_timer, thread->sched); + ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date, + XN_INFINITE, XN_ABSOLUTE); + if (ret == -ETIMEDOUT) + goto retry; /* This plugs a tiny race. */ + } + + if (pss->budget == 0) + return; + + if (xnthread_test_state(thread, XNHELD)) + xnthread_resume(thread, XNHELD); + else if (thread->cprio < pss->param.normal_prio) { + p.pss.init_budget = 0; + p.pss.current_prio = pss->param.normal_prio; + /* Move sporadic thread to the foreground. */ + __xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p); + } + + /* + * XXX: we have to reset the drop timer in case we preempted + * the thread which just got a budget increase. + */ + if (thread->sched->curr == thread) + sporadic_schedule_drop(thread); +} + +static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget) +{ + struct xnsched_sporadic_data *pss = thread->pss; + int r, ret; + + if (pss->repl_pending >= pss->param.max_repl) + return; + + if (budget > pss->budget) { + budget = pss->budget; + pss->budget = 0; + } else + pss->budget -= budget; + + r = pss->repl_in; + pss->repl_data[r].date = pss->resume_date + pss->param.repl_period; + pss->repl_data[r].amount = budget; + pss->repl_in = (r + 1) % MAX_REPLENISH; + + if (pss->repl_pending++ == 0) { + xntimer_set_affinity(&pss->repl_timer, thread->sched); + ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date, + XN_INFINITE, XN_ABSOLUTE); + /* + * The following case should not happen unless the + * initial budget value is inappropriate, but let's + * handle it anyway. + */ + if (ret == -ETIMEDOUT) + sporadic_replenish_handler(&pss->repl_timer); + } +} + +static void sporadic_suspend_activity(struct xnthread *thread) +{ + struct xnsched_sporadic_data *pss = thread->pss; + xnticks_t budget, now; + + if (pss->budget > 0) { + xntimer_stop(&pss->drop_timer); + now = xnclock_read_monotonic(&nkclock); + budget = sporadic_diff_time(now, pss->resume_date); + sporadic_post_recharge(thread, budget); + } +} + +static inline void sporadic_resume_activity(struct xnthread *thread) +{ + if (thread->pss->budget > 0) + sporadic_schedule_drop(thread); +} + +static void xnsched_sporadic_init(struct xnsched *sched) +{ + /* + * We litterally stack the sporadic scheduler on top of the RT + * one, reusing its run queue directly. This way, RT and + * sporadic threads are merged into the same runqueue and thus + * share the same priority scale, with the addition of budget + * management for the sporadic ones. + */ +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + sched->pss.drop_retries = 0; +#endif +} + +static bool xnsched_sporadic_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_sporadic_data *pss = thread->pss; + bool effective; + + xnthread_clear_state(thread, XNWEAK); + effective = xnsched_set_effective_priority(thread, p->pss.current_prio); + + /* + * We use the budget information to determine whether we got + * here from one of our internal calls to + * xnthread_set_schedparam(), in which case we don't want to + * update the scheduling parameters, but only set the + * effective priority. + */ + if (p->pss.init_budget > 0) { + pss->param = p->pss; + pss->budget = p->pss.init_budget; + pss->repl_in = 0; + pss->repl_out = 0; + pss->repl_pending = 0; + if (effective && thread == thread->sched->curr) { + xntimer_stop(&pss->drop_timer); + sporadic_schedule_drop(thread); + } + } + + return effective; +} + +static void xnsched_sporadic_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->pss = thread->pss->param; + p->pss.current_prio = thread->cprio; +} + +static void xnsched_sporadic_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) + thread->cprio = p->pss.current_prio; + else + thread->cprio = thread->bprio; +} + +static void xnsched_sporadic_protectprio(struct xnthread *thread, int prio) +{ + if (prio > XNSCHED_SPORADIC_MAX_PRIO) + prio = XNSCHED_SPORADIC_MAX_PRIO; + + thread->cprio = prio; +} + +static int xnsched_sporadic_chkparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p->pss.low_prio != -1 && + (p->pss.low_prio < XNSCHED_SPORADIC_MIN_PRIO || + p->pss.low_prio > XNSCHED_SPORADIC_MAX_PRIO)) + return -EINVAL; + + if (p->pss.normal_prio < XNSCHED_SPORADIC_MIN_PRIO || + p->pss.normal_prio > XNSCHED_SPORADIC_MAX_PRIO) + return -EINVAL; + + if (p->pss.init_budget == 0) + return -EINVAL; + + if (p->pss.current_prio != p->pss.normal_prio) + return -EINVAL; + + if (p->pss.repl_period < p->pss.init_budget) + return -EINVAL; + + if (p->pss.normal_prio <= p->pss.low_prio) + return -EINVAL; + + if (p->pss.max_repl < 1 || p->pss.max_repl > MAX_REPLENISH) + return -EINVAL; + + return 0; +} + +static int xnsched_sporadic_declare(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_sporadic_data *pss; + + pss = xnmalloc(sizeof(*pss)); + if (pss == NULL) + return -ENOMEM; + + xntimer_init(&pss->repl_timer, &nkclock, sporadic_replenish_handler, + thread->sched, XNTIMER_IGRAVITY); + xntimer_set_name(&pss->repl_timer, "pss-replenish"); + xntimer_init(&pss->drop_timer, &nkclock, sporadic_drop_handler, + thread->sched, XNTIMER_IGRAVITY); + xntimer_set_name(&pss->drop_timer, "pss-drop"); + + thread->pss = pss; + pss->thread = thread; + + return 0; +} + +static void xnsched_sporadic_forget(struct xnthread *thread) +{ + struct xnsched_sporadic_data *pss = thread->pss; + + xntimer_destroy(&pss->repl_timer); + xntimer_destroy(&pss->drop_timer); + xnfree(pss); + thread->pss = NULL; +} + +static void xnsched_sporadic_enqueue(struct xnthread *thread) +{ + __xnsched_rt_enqueue(thread); +} + +static void xnsched_sporadic_dequeue(struct xnthread *thread) +{ + __xnsched_rt_dequeue(thread); +} + +static void xnsched_sporadic_requeue(struct xnthread *thread) +{ + __xnsched_rt_requeue(thread); +} + +static struct xnthread *xnsched_sporadic_pick(struct xnsched *sched) +{ + struct xnthread *curr = sched->curr, *next; + + next = xnsched_getq(&sched->rt.runnable); + if (next == NULL) + goto swap; + + if (curr == next) + return next; + + /* Arm the drop timer for an incoming sporadic thread. */ + if (next->pss) + sporadic_resume_activity(next); +swap: + /* + * A non-sporadic outgoing thread is having a priority + * inheritance boost, so apply an infinite time budget as we + * want it to release the claimed resource asap. Otherwise, + * clear the drop timer, then schedule a replenishment + * operation. + */ + if (curr->pss) + sporadic_suspend_activity(curr); + + return next; +} + +#ifdef CONFIG_XENO_OPT_VFILE + +struct xnvfile_directory sched_sporadic_vfroot; + +struct vfile_sched_sporadic_priv { + int nrthreads; + struct xnthread *curr; +}; + +struct vfile_sched_sporadic_data { + int cpu; + pid_t pid; + char name[XNOBJECT_NAME_LEN]; + int current_prio; + int low_prio; + int normal_prio; + xnticks_t period; + xnticks_t timeout; + xnticks_t budget; +}; + +static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops; + +static struct xnvfile_snapshot vfile_sched_sporadic = { + .privsz = sizeof(struct vfile_sched_sporadic_priv), + .datasz = sizeof(struct vfile_sched_sporadic_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_sched_sporadic_ops, +}; + +static int vfile_sched_sporadic_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it); + int nrthreads = xnsched_class_sporadic.nthreads; + + if (nrthreads == 0) + return -ESRCH; + + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + + return nrthreads; +} + +static int vfile_sched_sporadic_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it); + struct vfile_sched_sporadic_data *p = data; + struct xnthread *thread; + + if (priv->curr == NULL) + return 0; /* All done. */ + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + if (thread->base_class != &xnsched_class_sporadic) + return VFILE_SEQ_SKIP; + + p->cpu = xnsched_cpu(thread->sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->current_prio = thread->cprio; + p->low_prio = thread->pss->param.low_prio; + p->normal_prio = thread->pss->param.normal_prio; + p->period = xnthread_get_period(thread); + p->budget = thread->pss->param.init_budget; + + return 1; +} + +static int vfile_sched_sporadic_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + char lpbuf[16], npbuf[16], ptbuf[16], btbuf[16]; + struct vfile_sched_sporadic_data *p = data; + + if (p == NULL) + xnvfile_printf(it, + "%-3s %-6s %-4s %-4s %-10s %-10s %s\n", + "CPU", "PID", "LPRI", "NPRI", "BUDGET", + "PERIOD", "NAME"); + else { + ksformat(lpbuf, sizeof(lpbuf), "%3d%c", + p->low_prio, p->current_prio == p->low_prio ? '*' : ' '); + + ksformat(npbuf, sizeof(npbuf), "%3d%c", + p->normal_prio, p->current_prio == p->normal_prio ? '*' : ' '); + + xntimer_format_time(p->period, ptbuf, sizeof(ptbuf)); + xntimer_format_time(p->budget, btbuf, sizeof(btbuf)); + + xnvfile_printf(it, + "%3u %-6d %-4s %-4s %-10s %-10s %s\n", + p->cpu, + p->pid, + lpbuf, + npbuf, + btbuf, + ptbuf, + p->name); + } + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops = { + .rewind = vfile_sched_sporadic_rewind, + .next = vfile_sched_sporadic_next, + .show = vfile_sched_sporadic_show, +}; + +static int xnsched_sporadic_init_vfile(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot) +{ + int ret; + + ret = xnvfile_init_dir(schedclass->name, + &sched_sporadic_vfroot, vfroot); + if (ret) + return ret; + + return xnvfile_init_snapshot("threads", &vfile_sched_sporadic, + &sched_sporadic_vfroot); +} + +static void xnsched_sporadic_cleanup_vfile(struct xnsched_class *schedclass) +{ + xnvfile_destroy_snapshot(&vfile_sched_sporadic); + xnvfile_destroy_dir(&sched_sporadic_vfroot); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +struct xnsched_class xnsched_class_sporadic = { + .sched_init = xnsched_sporadic_init, + .sched_enqueue = xnsched_sporadic_enqueue, + .sched_dequeue = xnsched_sporadic_dequeue, + .sched_requeue = xnsched_sporadic_requeue, + .sched_pick = xnsched_sporadic_pick, + .sched_tick = NULL, + .sched_rotate = NULL, + .sched_migrate = NULL, + .sched_chkparam = xnsched_sporadic_chkparam, + .sched_setparam = xnsched_sporadic_setparam, + .sched_getparam = xnsched_sporadic_getparam, + .sched_trackprio = xnsched_sporadic_trackprio, + .sched_protectprio = xnsched_sporadic_protectprio, + .sched_declare = xnsched_sporadic_declare, + .sched_forget = xnsched_sporadic_forget, + .sched_kick = NULL, +#ifdef CONFIG_XENO_OPT_VFILE + .sched_init_vfile = xnsched_sporadic_init_vfile, + .sched_cleanup_vfile = xnsched_sporadic_cleanup_vfile, +#endif + .weight = XNSCHED_CLASS_WEIGHT(3), + .policy = SCHED_SPORADIC, + .name = "pss" +}; +EXPORT_SYMBOL_GPL(xnsched_class_sporadic); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c new file mode 100644 index 0000000..ccff374 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c @@ -0,0 +1,464 @@ +/* + * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/uapi/sched.h> + +static void tp_schedule_next(struct xnsched_tp *tp) +{ + struct xnsched_tp_window *w; + struct xnsched *sched; + int p_next, ret; + xnticks_t t; + + for (;;) { + /* + * Switch to the next partition. Time holes in a + * global time frame are defined as partition windows + * assigned to part# -1, in which case the (always + * empty) idle queue will be polled for runnable + * threads. Therefore, we may assume that a window + * begins immediately after the previous one ends, + * which simplifies the implementation a lot. + */ + w = &tp->gps->pwins[tp->wnext]; + p_next = w->w_part; + tp->tps = p_next < 0 ? &tp->idle : &tp->partitions[p_next]; + + /* Schedule tick to advance to the next window. */ + tp->wnext = (tp->wnext + 1) % tp->gps->pwin_nr; + w = &tp->gps->pwins[tp->wnext]; + t = tp->tf_start + w->w_offset; + + ret = xntimer_start(&tp->tf_timer, t, XN_INFINITE, XN_ABSOLUTE); + if (ret != -ETIMEDOUT) + break; + /* + * We are late, make sure to remain within the bounds + * of a valid time frame before advancing to the next + * window. Otherwise, fix up by advancing to the next + * time frame immediately. + */ + for (;;) { + t = tp->tf_start + tp->gps->tf_duration; + if (xnclock_read_monotonic(&nkclock) > t) { + tp->tf_start = t; + tp->wnext = 0; + } else + break; + } + } + + sched = container_of(tp, struct xnsched, tp); + xnsched_set_resched(sched); +} + +static void tp_tick_handler(struct xntimer *timer) +{ + struct xnsched_tp *tp = container_of(timer, struct xnsched_tp, tf_timer); + /* + * Advance beginning date of time frame by a full period if we + * are processing the last window. + */ + if (tp->wnext + 1 == tp->gps->pwin_nr) + tp->tf_start += tp->gps->tf_duration; + + tp_schedule_next(tp); +} + +static void xnsched_tp_init(struct xnsched *sched) +{ + struct xnsched_tp *tp = &sched->tp; + char timer_name[XNOBJECT_NAME_LEN]; + int n; + + for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++) + xnsched_initq(&tp->partitions[n].runnable); + + xnsched_initq(&tp->idle.runnable); + +#ifdef CONFIG_SMP + ksformat(timer_name, sizeof(timer_name), "[tp-tick/%u]", sched->cpu); +#else + strcpy(timer_name, "[tp-tick]"); +#endif + tp->tps = NULL; + tp->gps = NULL; + INIT_LIST_HEAD(&tp->threads); + xntimer_init(&tp->tf_timer, &nkclock, tp_tick_handler, + sched, XNTIMER_IGRAVITY); + xntimer_set_name(&tp->tf_timer, timer_name); +} + +static bool xnsched_tp_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched *sched = thread->sched; + + thread->tps = &sched->tp.partitions[p->tp.ptid]; + xnthread_clear_state(thread, XNWEAK); + + return xnsched_set_effective_priority(thread, p->tp.prio); +} + +static void xnsched_tp_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->tp.prio = thread->cprio; + p->tp.ptid = thread->tps - thread->sched->tp.partitions; +} + +static void xnsched_tp_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + /* + * The assigned partition never changes internally due to PI + * (see xnsched_track_policy), since this would be pretty + * wrong with respect to TP scheduling: i.e. we may not allow + * a thread from another partition to consume CPU time from + * the current one, despite this would help enforcing PI (see + * note). In any case, introducing resource contention between + * threads that belong to different partitions is utterly + * wrong in the first place. Only an explicit call to + * xnsched_set_policy() may change the partition assigned to a + * thread. For that reason, a policy reset action only boils + * down to reinstating the base priority. + * + * NOTE: we do allow threads from lower scheduling classes to + * consume CPU time from the current window as a result of a + * PI boost, since this is aimed at speeding up the release of + * a synchronization object a TP thread needs. + */ + if (p) { + /* We should never cross partition boundaries. */ + XENO_WARN_ON(COBALT, + thread->base_class == &xnsched_class_tp && + thread->tps - thread->sched->tp.partitions != p->tp.ptid); + thread->cprio = p->tp.prio; + } else + thread->cprio = thread->bprio; +} + +static void xnsched_tp_protectprio(struct xnthread *thread, int prio) +{ + if (prio > XNSCHED_TP_MAX_PRIO) + prio = XNSCHED_TP_MAX_PRIO; + + thread->cprio = prio; +} + +static int xnsched_tp_chkparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_tp *tp = &thread->sched->tp; + + if (p->tp.ptid < 0 || + p->tp.ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART) + return -EINVAL; + + if (tp->gps == NULL || + p->tp.prio < XNSCHED_TP_MIN_PRIO || + p->tp.prio > XNSCHED_TP_MAX_PRIO) + return -EINVAL; + + return 0; +} + +static int xnsched_tp_declare(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched *sched = thread->sched; + + list_add_tail(&thread->tp_link, &sched->tp.threads); + + return 0; +} + +static void xnsched_tp_forget(struct xnthread *thread) +{ + list_del(&thread->tp_link); + thread->tps = NULL; +} + +static void xnsched_tp_enqueue(struct xnthread *thread) +{ + xnsched_addq_tail(&thread->tps->runnable, thread); +} + +static void xnsched_tp_dequeue(struct xnthread *thread) +{ + xnsched_delq(&thread->tps->runnable, thread); +} + +static void xnsched_tp_requeue(struct xnthread *thread) +{ + xnsched_addq(&thread->tps->runnable, thread); +} + +static struct xnthread *xnsched_tp_pick(struct xnsched *sched) +{ + /* Never pick a thread if we don't schedule partitions. */ + if (!xntimer_running_p(&sched->tp.tf_timer)) + return NULL; + + return xnsched_getq(&sched->tp.tps->runnable); +} + +static void xnsched_tp_migrate(struct xnthread *thread, struct xnsched *sched) +{ + union xnsched_policy_param param; + /* + * Since our partition schedule is a per-scheduler property, + * it cannot apply to a thread that moves to another CPU + * anymore. So we upgrade that thread to the RT class when a + * CPU migration occurs. A subsequent call to + * __xnthread_set_schedparam() may move it back to TP + * scheduling, with a partition assignment that fits the + * remote CPU's partition schedule. + */ + param.rt.prio = thread->cprio; + __xnthread_set_schedparam(thread, &xnsched_class_rt, ¶m); +} + +void xnsched_tp_start_schedule(struct xnsched *sched) +{ + struct xnsched_tp *tp = &sched->tp; + + if (tp->gps == NULL) + return; + + tp->wnext = 0; + tp->tf_start = xnclock_read_monotonic(&nkclock); + tp_schedule_next(tp); +} +EXPORT_SYMBOL_GPL(xnsched_tp_start_schedule); + +void xnsched_tp_stop_schedule(struct xnsched *sched) +{ + struct xnsched_tp *tp = &sched->tp; + + if (tp->gps) + xntimer_stop(&tp->tf_timer); +} +EXPORT_SYMBOL_GPL(xnsched_tp_stop_schedule); + +struct xnsched_tp_schedule * +xnsched_tp_set_schedule(struct xnsched *sched, + struct xnsched_tp_schedule *gps) +{ + struct xnsched_tp_schedule *old_gps; + struct xnsched_tp *tp = &sched->tp; + union xnsched_policy_param param; + struct xnthread *thread, *tmp; + + XENO_BUG_ON(COBALT, gps != NULL && + (gps->pwin_nr <= 0 || gps->pwins[0].w_offset != 0)); + + xnsched_tp_stop_schedule(sched); + + /* + * Move all TP threads on this scheduler to the RT class, + * until we call __xnthread_set_schedparam() for them again. + */ + if (list_empty(&tp->threads)) + goto done; + + list_for_each_entry_safe(thread, tmp, &tp->threads, tp_link) { + param.rt.prio = thread->cprio; + __xnthread_set_schedparam(thread, &xnsched_class_rt, ¶m); + } +done: + old_gps = tp->gps; + tp->gps = gps; + + return old_gps; +} +EXPORT_SYMBOL_GPL(xnsched_tp_set_schedule); + +struct xnsched_tp_schedule * +xnsched_tp_get_schedule(struct xnsched *sched) +{ + struct xnsched_tp_schedule *gps; + + gps = sched->tp.gps; + if (gps == NULL) + return NULL; + + atomic_inc(&gps->refcount); + + return gps; +} +EXPORT_SYMBOL_GPL(xnsched_tp_get_schedule); + +void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps) +{ + if (atomic_dec_and_test(&gps->refcount)) + xnfree(gps); +} +EXPORT_SYMBOL_GPL(xnsched_tp_put_schedule); + +int xnsched_tp_get_partition(struct xnsched *sched) +{ + struct xnsched_tp *tp = &sched->tp; + + if (tp->tps == NULL || tp->tps == &tp->idle) + return -1; + + return tp->tps - tp->partitions; +} +EXPORT_SYMBOL_GPL(xnsched_tp_get_partition); + +#ifdef CONFIG_XENO_OPT_VFILE + +struct xnvfile_directory sched_tp_vfroot; + +struct vfile_sched_tp_priv { + struct xnthread *curr; +}; + +struct vfile_sched_tp_data { + int cpu; + pid_t pid; + char name[XNOBJECT_NAME_LEN]; + int prio; + int ptid; +}; + +static struct xnvfile_snapshot_ops vfile_sched_tp_ops; + +static struct xnvfile_snapshot vfile_sched_tp = { + .privsz = sizeof(struct vfile_sched_tp_priv), + .datasz = sizeof(struct vfile_sched_tp_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_sched_tp_ops, +}; + +static int vfile_sched_tp_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it); + int nrthreads = xnsched_class_tp.nthreads; + + if (nrthreads == 0) + return -ESRCH; + + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + + return nrthreads; +} + +static int vfile_sched_tp_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it); + struct vfile_sched_tp_data *p = data; + struct xnthread *thread; + + if (priv->curr == NULL) + return 0; /* All done. */ + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + if (thread->base_class != &xnsched_class_tp) + return VFILE_SEQ_SKIP; + + p->cpu = xnsched_cpu(thread->sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->ptid = thread->tps - thread->sched->tp.partitions; + p->prio = thread->cprio; + + return 1; +} + +static int vfile_sched_tp_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_tp_data *p = data; + + if (p == NULL) + xnvfile_printf(it, "%-3s %-6s %-4s %-4s %s\n", + "CPU", "PID", "PTID", "PRI", "NAME"); + else + xnvfile_printf(it, "%3u %-6d %-4d %-4d %s\n", + p->cpu, + p->pid, + p->ptid, + p->prio, + p->name); + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_sched_tp_ops = { + .rewind = vfile_sched_tp_rewind, + .next = vfile_sched_tp_next, + .show = vfile_sched_tp_show, +}; + +static int xnsched_tp_init_vfile(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot) +{ + int ret; + + ret = xnvfile_init_dir(schedclass->name, &sched_tp_vfroot, vfroot); + if (ret) + return ret; + + return xnvfile_init_snapshot("threads", &vfile_sched_tp, + &sched_tp_vfroot); +} + +static void xnsched_tp_cleanup_vfile(struct xnsched_class *schedclass) +{ + xnvfile_destroy_snapshot(&vfile_sched_tp); + xnvfile_destroy_dir(&sched_tp_vfroot); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +struct xnsched_class xnsched_class_tp = { + .sched_init = xnsched_tp_init, + .sched_enqueue = xnsched_tp_enqueue, + .sched_dequeue = xnsched_tp_dequeue, + .sched_requeue = xnsched_tp_requeue, + .sched_pick = xnsched_tp_pick, + .sched_tick = NULL, + .sched_rotate = NULL, + .sched_migrate = xnsched_tp_migrate, + .sched_chkparam = xnsched_tp_chkparam, + .sched_setparam = xnsched_tp_setparam, + .sched_getparam = xnsched_tp_getparam, + .sched_trackprio = xnsched_tp_trackprio, + .sched_protectprio = xnsched_tp_protectprio, + .sched_declare = xnsched_tp_declare, + .sched_forget = xnsched_tp_forget, + .sched_kick = NULL, +#ifdef CONFIG_XENO_OPT_VFILE + .sched_init_vfile = xnsched_tp_init_vfile, + .sched_cleanup_vfile = xnsched_tp_cleanup_vfile, +#endif + .weight = XNSCHED_CLASS_WEIGHT(2), + .policy = SCHED_TP, + .name = "tp" +}; +EXPORT_SYMBOL_GPL(xnsched_class_tp); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c new file mode 100644 index 0000000..dd6a78e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <cobalt/kernel/sched.h> +#include <cobalt/uapi/sched.h> + +static void xnsched_weak_init(struct xnsched *sched) +{ + xnsched_initq(&sched->weak.runnable); +} + +static void xnsched_weak_requeue(struct xnthread *thread) +{ + xnsched_addq(&thread->sched->weak.runnable, thread); +} + +static void xnsched_weak_enqueue(struct xnthread *thread) +{ + xnsched_addq_tail(&thread->sched->weak.runnable, thread); +} + +static void xnsched_weak_dequeue(struct xnthread *thread) +{ + xnsched_delq(&thread->sched->weak.runnable, thread); +} + +static struct xnthread *xnsched_weak_pick(struct xnsched *sched) +{ + return xnsched_getq(&sched->weak.runnable); +} + +static bool xnsched_weak_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (!xnthread_test_state(thread, XNBOOST)) + xnthread_set_state(thread, XNWEAK); + + return xnsched_set_effective_priority(thread, p->weak.prio); +} + +static void xnsched_weak_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->weak.prio = thread->cprio; +} + +static void xnsched_weak_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) + thread->cprio = p->weak.prio; + else + thread->cprio = thread->bprio; +} + +static void xnsched_weak_protectprio(struct xnthread *thread, int prio) +{ + if (prio > XNSCHED_WEAK_MAX_PRIO) + prio = XNSCHED_WEAK_MAX_PRIO; + + thread->cprio = prio; +} + +static int xnsched_weak_chkparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p->weak.prio < XNSCHED_WEAK_MIN_PRIO || + p->weak.prio > XNSCHED_WEAK_MAX_PRIO) + return -EINVAL; + + return 0; +} + +#ifdef CONFIG_XENO_OPT_VFILE + +struct xnvfile_directory sched_weak_vfroot; + +struct vfile_sched_weak_priv { + struct xnthread *curr; +}; + +struct vfile_sched_weak_data { + int cpu; + pid_t pid; + char name[XNOBJECT_NAME_LEN]; + int cprio; +}; + +static struct xnvfile_snapshot_ops vfile_sched_weak_ops; + +static struct xnvfile_snapshot vfile_sched_weak = { + .privsz = sizeof(struct vfile_sched_weak_priv), + .datasz = sizeof(struct vfile_sched_weak_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_sched_weak_ops, +}; + +static int vfile_sched_weak_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it); + int nrthreads = xnsched_class_weak.nthreads; + + if (nrthreads == 0) + return -ESRCH; + + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + + return nrthreads; +} + +static int vfile_sched_weak_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it); + struct vfile_sched_weak_data *p = data; + struct xnthread *thread; + + if (priv->curr == NULL) + return 0; /* All done. */ + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + if (thread->base_class != &xnsched_class_weak) + return VFILE_SEQ_SKIP; + + p->cpu = xnsched_cpu(thread->sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->cprio = thread->cprio; + + return 1; +} + +static int vfile_sched_weak_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_sched_weak_data *p = data; + char pribuf[16]; + + if (p == NULL) + xnvfile_printf(it, "%-3s %-6s %-4s %s\n", + "CPU", "PID", "PRI", "NAME"); + else { + ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio); + xnvfile_printf(it, "%3u %-6d %-4s %s\n", + p->cpu, + p->pid, + pribuf, + p->name); + } + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_sched_weak_ops = { + .rewind = vfile_sched_weak_rewind, + .next = vfile_sched_weak_next, + .show = vfile_sched_weak_show, +}; + +static int xnsched_weak_init_vfile(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot) +{ + int ret; + + ret = xnvfile_init_dir(schedclass->name, &sched_weak_vfroot, vfroot); + if (ret) + return ret; + + return xnvfile_init_snapshot("threads", &vfile_sched_weak, + &sched_weak_vfroot); +} + +static void xnsched_weak_cleanup_vfile(struct xnsched_class *schedclass) +{ + xnvfile_destroy_snapshot(&vfile_sched_weak); + xnvfile_destroy_dir(&sched_weak_vfroot); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +struct xnsched_class xnsched_class_weak = { + .sched_init = xnsched_weak_init, + .sched_enqueue = xnsched_weak_enqueue, + .sched_dequeue = xnsched_weak_dequeue, + .sched_requeue = xnsched_weak_requeue, + .sched_pick = xnsched_weak_pick, + .sched_tick = NULL, + .sched_rotate = NULL, + .sched_forget = NULL, + .sched_kick = NULL, + .sched_chkparam = xnsched_weak_chkparam, + .sched_setparam = xnsched_weak_setparam, + .sched_trackprio = xnsched_weak_trackprio, + .sched_protectprio = xnsched_weak_protectprio, + .sched_getparam = xnsched_weak_getparam, +#ifdef CONFIG_XENO_OPT_VFILE + .sched_init_vfile = xnsched_weak_init_vfile, + .sched_cleanup_vfile = xnsched_weak_cleanup_vfile, +#endif + .weight = XNSCHED_CLASS_WEIGHT(1), + .policy = SCHED_WEAK, + .name = "weak" +}; +EXPORT_SYMBOL_GPL(xnsched_class_weak); diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c new file mode 100644 index 0000000..aa65fd7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c @@ -0,0 +1,1493 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/signal.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/arith.h> +#include <cobalt/uapi/signal.h> +#include <pipeline/sched.h> +#define CREATE_TRACE_POINTS +#include <trace/events/cobalt-core.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_sched Thread scheduling control + * @{ + */ + +DEFINE_PER_CPU(struct xnsched, nksched); +EXPORT_PER_CPU_SYMBOL_GPL(nksched); + +cpumask_t cobalt_cpu_affinity = CPU_MASK_ALL; +EXPORT_SYMBOL_GPL(cobalt_cpu_affinity); + +LIST_HEAD(nkthreadq); + +int cobalt_nrthreads; + +#ifdef CONFIG_XENO_OPT_VFILE +struct xnvfile_rev_tag nkthreadlist_tag; +#endif + +static struct xnsched_class *xnsched_class_highest; + +#define for_each_xnsched_class(p) \ + for (p = xnsched_class_highest; p; p = p->next) + +static void xnsched_register_class(struct xnsched_class *sched_class) +{ + sched_class->next = xnsched_class_highest; + xnsched_class_highest = sched_class; + + /* + * Classes shall be registered by increasing priority order, + * idle first and up. + */ + XENO_BUG_ON(COBALT, sched_class->next && + sched_class->next->weight > sched_class->weight); + + printk(XENO_INFO "scheduling class %s registered.\n", sched_class->name); +} + +void xnsched_register_classes(void) +{ + xnsched_register_class(&xnsched_class_idle); +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + xnsched_register_class(&xnsched_class_weak); +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + xnsched_register_class(&xnsched_class_tp); +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + xnsched_register_class(&xnsched_class_sporadic); +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + xnsched_register_class(&xnsched_class_quota); +#endif + xnsched_register_class(&xnsched_class_rt); +} + +#ifdef CONFIG_XENO_OPT_WATCHDOG + +static unsigned long wd_timeout_arg = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT; +module_param_named(watchdog_timeout, wd_timeout_arg, ulong, 0644); + +static inline xnticks_t get_watchdog_timeout(void) +{ + return wd_timeout_arg * 1000000000ULL; +} + +/** + * @internal + * @fn void watchdog_handler(struct xntimer *timer) + * @brief Process watchdog ticks. + * + * This internal routine handles incoming watchdog triggers to detect + * software lockups. It forces the offending thread to stop + * monopolizing the CPU, either by kicking it out of primary mode if + * running in user space, or cancelling it if kernel-based. + * + * @coretags{coreirq-only, atomic-entry} + */ +static void watchdog_handler(struct xntimer *timer) +{ + struct xnsched *sched = xnsched_current(); + struct xnthread *curr = sched->curr; + + /* + * CAUTION: The watchdog tick might have been delayed while we + * were busy switching the CPU to secondary mode at the + * trigger date eventually. Make sure that we are not about to + * kick the incoming root thread. + */ + if (xnthread_test_state(curr, XNROOT)) + return; + + trace_cobalt_watchdog_signal(curr); + + if (xnthread_test_state(curr, XNUSER)) { + printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread " + "'%s' signaled\n", xnsched_cpu(sched), curr->name); + xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG); + } else { + printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread " + "'%s' canceled\n", xnsched_cpu(sched), curr->name); + /* + * On behalf on an IRQ handler, xnthread_cancel() + * would go half way cancelling the preempted + * thread. Therefore we manually raise XNKICKED to + * cause the next call to xnthread_suspend() to return + * early in XNBREAK condition, and XNCANCELD so that + * @thread exits next time it invokes + * xnthread_test_cancel(). + */ + xnthread_set_info(curr, XNKICKED|XNCANCELD); + } +} + +#endif /* CONFIG_XENO_OPT_WATCHDOG */ + +static void roundrobin_handler(struct xntimer *timer) +{ + struct xnsched *sched = container_of(timer, struct xnsched, rrbtimer); + xnsched_tick(sched); +} + +static void xnsched_init(struct xnsched *sched, int cpu) +{ + char rrbtimer_name[XNOBJECT_NAME_LEN]; + char htimer_name[XNOBJECT_NAME_LEN]; + char root_name[XNOBJECT_NAME_LEN]; + union xnsched_policy_param param; + struct xnthread_init_attr attr; + struct xnsched_class *p; + +#ifdef CONFIG_SMP + sched->cpu = cpu; + ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu); + ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu); + ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu); + cpumask_clear(&sched->resched); +#else + strcpy(htimer_name, "[host-timer]"); + strcpy(rrbtimer_name, "[rrb-timer]"); + strcpy(root_name, "ROOT"); +#endif + for_each_xnsched_class(p) { + if (p->sched_init) + p->sched_init(sched); + } + + sched->status = 0; + sched->lflags = XNIDLE; + sched->inesting = 0; + sched->curr = &sched->rootcb; + + attr.flags = XNROOT | XNFPU; + attr.name = root_name; + attr.personality = &xenomai_personality; + attr.affinity = *cpumask_of(cpu); + param.idle.prio = XNSCHED_IDLE_PRIO; + + __xnthread_init(&sched->rootcb, &attr, + sched, &xnsched_class_idle, ¶m); + + /* + * No direct handler here since the host timer processing is + * postponed to xnintr_irq_handler(), as part of the interrupt + * exit code. + */ + xntimer_init(&sched->htimer, &nkclock, NULL, + sched, XNTIMER_IGRAVITY); + xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO); + xntimer_set_name(&sched->htimer, htimer_name); + xntimer_init(&sched->rrbtimer, &nkclock, roundrobin_handler, + sched, XNTIMER_IGRAVITY); + xntimer_set_name(&sched->rrbtimer, rrbtimer_name); + xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO); + + xnstat_exectime_set_current(sched, &sched->rootcb.stat.account); +#ifdef CONFIG_XENO_ARCH_FPU + sched->fpuholder = &sched->rootcb; +#endif /* CONFIG_XENO_ARCH_FPU */ + + pipeline_init_root_tcb(&sched->rootcb); + list_add_tail(&sched->rootcb.glink, &nkthreadq); + cobalt_nrthreads++; + +#ifdef CONFIG_XENO_OPT_WATCHDOG + xntimer_init(&sched->wdtimer, &nkclock, watchdog_handler, + sched, XNTIMER_IGRAVITY); + xntimer_set_name(&sched->wdtimer, "[watchdog]"); + xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO); +#endif /* CONFIG_XENO_OPT_WATCHDOG */ +} + +void xnsched_init_all(void) +{ + struct xnsched *sched; + int cpu; + + for_each_online_cpu(cpu) { + sched = &per_cpu(nksched, cpu); + xnsched_init(sched, cpu); + } + + pipeline_request_resched_ipi(__xnsched_run_handler); +} + +static void xnsched_destroy(struct xnsched *sched) +{ + xntimer_destroy(&sched->htimer); + xntimer_destroy(&sched->rrbtimer); + xntimer_destroy(&sched->rootcb.ptimer); + xntimer_destroy(&sched->rootcb.rtimer); +#ifdef CONFIG_XENO_OPT_WATCHDOG + xntimer_destroy(&sched->wdtimer); +#endif /* CONFIG_XENO_OPT_WATCHDOG */ +} + +void xnsched_destroy_all(void) +{ + struct xnthread *thread, *tmp; + struct xnsched *sched; + int cpu; + spl_t s; + + pipeline_free_resched_ipi(); + + xnlock_get_irqsave(&nklock, s); + + /* NOTE: &nkthreadq can't be empty (root thread(s)). */ + list_for_each_entry_safe(thread, tmp, &nkthreadq, glink) { + if (!xnthread_test_state(thread, XNROOT)) + xnthread_cancel(thread); + } + + xnsched_run(); + + for_each_online_cpu(cpu) { + sched = xnsched_struct(cpu); + xnsched_destroy(sched); + } + + xnlock_put_irqrestore(&nklock, s); +} + +static inline void set_thread_running(struct xnsched *sched, + struct xnthread *thread) +{ + xnthread_clear_state(thread, XNREADY); + if (xnthread_test_state(thread, XNRRB)) + xntimer_start(&sched->rrbtimer, + thread->rrperiod, XN_INFINITE, XN_RELATIVE); + else + xntimer_stop(&sched->rrbtimer); +} + +/* Must be called with nklock locked, interrupts off. */ +struct xnthread *xnsched_pick_next(struct xnsched *sched) +{ + struct xnsched_class *p __maybe_unused; + struct xnthread *curr = sched->curr; + struct xnthread *thread; + + if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) { + /* + * Do not preempt the current thread if it holds the + * scheduler lock. + */ + if (curr->lock_count > 0) { + xnsched_set_self_resched(sched); + return curr; + } + /* + * Push the current thread back to the run queue of + * the scheduling class it belongs to, if not yet + * linked to it (XNREADY tells us if it is). + */ + if (!xnthread_test_state(curr, XNREADY)) { + xnsched_requeue(curr); + xnthread_set_state(curr, XNREADY); + } + } + + /* + * Find the runnable thread having the highest priority among + * all scheduling classes, scanned by decreasing priority. + */ +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES + for_each_xnsched_class(p) { + thread = p->sched_pick(sched); + if (thread) { + set_thread_running(sched, thread); + return thread; + } + } + + return NULL; /* Never executed because of the idle class. */ +#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */ + thread = xnsched_rt_pick(sched); + if (unlikely(thread == NULL)) + thread = &sched->rootcb; + + set_thread_running(sched, thread); + + return thread; +#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */ +} + +void xnsched_lock(void) +{ + struct xnsched *sched = xnsched_current(); + /* See comments in xnsched_run(), ___xnsched_run(). */ + struct xnthread *curr = READ_ONCE(sched->curr); + + if (sched->lflags & XNINIRQ) + return; + + /* + * CAUTION: The fast xnthread_current() accessor carries the + * relevant lock nesting count only if current runs in primary + * mode. Otherwise, if the caller is unknown or relaxed + * Xenomai-wise, then we fall back to the root thread on the + * current scheduler, which must be done with IRQs off. + * Either way, we don't need to grab the super lock. + */ + XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) && + !hard_irqs_disabled()); + + curr->lock_count++; +} +EXPORT_SYMBOL_GPL(xnsched_lock); + +void xnsched_unlock(void) +{ + struct xnsched *sched = xnsched_current(); + struct xnthread *curr = READ_ONCE(sched->curr); + + XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) && + !hard_irqs_disabled()); + + if (sched->lflags & XNINIRQ) + return; + + if (!XENO_ASSERT(COBALT, curr->lock_count > 0)) + return; + + if (--curr->lock_count == 0) { + xnthread_clear_localinfo(curr, XNLBALERT); + xnsched_run(); + } +} +EXPORT_SYMBOL_GPL(xnsched_unlock); + +/* nklock locked, interrupts off. */ +void xnsched_putback(struct xnthread *thread) +{ + if (xnthread_test_state(thread, XNREADY)) + xnsched_dequeue(thread); + else + xnthread_set_state(thread, XNREADY); + + xnsched_enqueue(thread); + xnsched_set_resched(thread->sched); +} + +/* nklock locked, interrupts off. */ +int xnsched_set_policy(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *p) +{ + struct xnsched_class *orig_effective_class __maybe_unused; + bool effective; + int ret; + + ret = xnsched_chkparam(sched_class, thread, p); + if (ret) + return ret; + + /* + * Declaring a thread to a new scheduling class may fail, so + * we do that early, while the thread is still a member of the + * previous class. However, this also means that the + * declaration callback shall not do anything that might + * affect the previous class (such as touching thread->rlink + * for instance). + */ + if (sched_class != thread->base_class) { + ret = xnsched_declare(sched_class, thread, p); + if (ret) + return ret; + } + + /* + * As a special case, we may be called from __xnthread_init() + * with no previous scheduling class at all. + */ + if (likely(thread->base_class != NULL)) { + if (xnthread_test_state(thread, XNREADY)) + xnsched_dequeue(thread); + + if (sched_class != thread->base_class) + xnsched_forget(thread); + } + + /* + * Set the base and effective scheduling parameters. However, + * xnsched_setparam() will deny lowering the effective + * priority if a boost is undergoing, only recording the + * change into the base priority field in such situation. + */ + thread->base_class = sched_class; + /* + * Referring to the effective class from a setparam() handler + * is wrong: make sure to break if so. + */ + if (XENO_DEBUG(COBALT)) { + orig_effective_class = thread->sched_class; + thread->sched_class = NULL; + } + + /* + * This is the ONLY place where calling xnsched_setparam() is + * legit, sane and safe. + */ + effective = xnsched_setparam(thread, p); + if (effective) { + thread->sched_class = sched_class; + thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio); + } else if (XENO_DEBUG(COBALT)) + thread->sched_class = orig_effective_class; + + if (xnthread_test_state(thread, XNREADY)) + xnsched_enqueue(thread); + + /* + * Make sure not to raise XNSCHED when setting up the root + * thread, so that we can't start rescheduling on interrupt + * exit before all CPUs have their runqueue fully + * built. Filtering on XNROOT here is correct because the root + * thread enters the idle class once as part of the runqueue + * setup process and never leaves it afterwards. + */ + if (!xnthread_test_state(thread, XNDORMANT|XNROOT)) + xnsched_set_resched(thread->sched); + + return 0; +} +EXPORT_SYMBOL_GPL(xnsched_set_policy); + +/* nklock locked, interrupts off. */ +bool xnsched_set_effective_priority(struct xnthread *thread, int prio) +{ + int wprio = xnsched_calc_wprio(thread->base_class, prio); + + thread->bprio = prio; + if (wprio == thread->wprio) + return true; + + /* + * We may not lower the effective/current priority of a + * boosted thread when changing the base scheduling + * parameters. Only xnsched_track_policy() and + * xnsched_protect_priority() may do so when dealing with PI + * and PP synchs resp. + */ + if (wprio < thread->wprio && xnthread_test_state(thread, XNBOOST)) + return false; + + thread->cprio = prio; + + trace_cobalt_thread_set_current_prio(thread); + + return true; +} + +/* nklock locked, interrupts off. */ +void xnsched_track_policy(struct xnthread *thread, + struct xnthread *target) +{ + union xnsched_policy_param param; + + /* + * Inherit (or reset) the effective scheduling class and + * priority of a thread. Unlike xnsched_set_policy(), this + * routine is allowed to lower the weighted priority with no + * restriction, even if a boost is undergoing. + */ + if (xnthread_test_state(thread, XNREADY)) + xnsched_dequeue(thread); + /* + * Self-targeting means to reset the scheduling policy and + * parameters to the base settings. Otherwise, make thread + * inherit the scheduling parameters from target. + */ + if (target == thread) { + thread->sched_class = thread->base_class; + xnsched_trackprio(thread, NULL); + /* + * Per SuSv2, resetting the base scheduling parameters + * should not move the thread to the tail of its + * priority group. + */ + if (xnthread_test_state(thread, XNREADY)) + xnsched_requeue(thread); + + } else { + xnsched_getparam(target, ¶m); + thread->sched_class = target->sched_class; + xnsched_trackprio(thread, ¶m); + if (xnthread_test_state(thread, XNREADY)) + xnsched_enqueue(thread); + } + + trace_cobalt_thread_set_current_prio(thread); + + xnsched_set_resched(thread->sched); +} + +/* nklock locked, interrupts off. */ +void xnsched_protect_priority(struct xnthread *thread, int prio) +{ + /* + * Apply a PP boost by changing the effective priority of a + * thread, forcing it to the RT class. Like + * xnsched_track_policy(), this routine is allowed to lower + * the weighted priority with no restriction, even if a boost + * is undergoing. + * + * This routine only deals with active boosts, resetting the + * base priority when leaving a PP boost is obtained by a call + * to xnsched_track_policy(). + */ + if (xnthread_test_state(thread, XNREADY)) + xnsched_dequeue(thread); + + thread->sched_class = &xnsched_class_rt; + xnsched_protectprio(thread, prio); + + if (xnthread_test_state(thread, XNREADY)) + xnsched_enqueue(thread); + + trace_cobalt_thread_set_current_prio(thread); + + xnsched_set_resched(thread->sched); +} + +static void migrate_thread(struct xnthread *thread, struct xnsched *sched) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (xnthread_test_state(thread, XNREADY)) { + xnsched_dequeue(thread); + xnthread_clear_state(thread, XNREADY); + } + + if (sched_class->sched_migrate) + sched_class->sched_migrate(thread, sched); + /* + * WARNING: the scheduling class may have just changed as a + * result of calling the per-class migration hook. + */ + thread->sched = sched; +} + +/* + * nklock locked, interrupts off. thread must be runnable. + */ +void xnsched_migrate(struct xnthread *thread, struct xnsched *sched) +{ + xnsched_set_resched(thread->sched); + migrate_thread(thread, sched); + /* Move thread to the remote run queue. */ + xnsched_putback(thread); +} + +/* + * nklock locked, interrupts off. Thread may be blocked. + */ +void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched) +{ + struct xnsched *last_sched = thread->sched; + + migrate_thread(thread, sched); + + if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) { + xnsched_requeue(thread); + xnthread_set_state(thread, XNREADY); + xnsched_set_resched(last_sched); + } +} + +#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED + +void xnsched_initq(struct xnsched_mlq *q) +{ + int prio; + + q->elems = 0; + bitmap_zero(q->prio_map, XNSCHED_MLQ_LEVELS); + + for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++) + INIT_LIST_HEAD(q->heads + prio); +} + +static inline int get_qindex(struct xnsched_mlq *q, int prio) +{ + XENO_BUG_ON(COBALT, prio < 0 || prio >= XNSCHED_MLQ_LEVELS); + /* + * BIG FAT WARNING: We need to rescale the priority level to a + * 0-based range. We use find_first_bit() to scan the bitmap + * which is a bit scan forward operation. Therefore, the lower + * the index value, the higher the priority (since least + * significant bits will be found first when scanning the + * bitmap). + */ + return XNSCHED_MLQ_LEVELS - prio - 1; +} + +static struct list_head *add_q(struct xnsched_mlq *q, int prio) +{ + struct list_head *head; + int idx; + + idx = get_qindex(q, prio); + head = q->heads + idx; + q->elems++; + + /* New item is not linked yet. */ + if (list_empty(head)) + __set_bit(idx, q->prio_map); + + return head; +} + +void xnsched_addq(struct xnsched_mlq *q, struct xnthread *thread) +{ + struct list_head *head = add_q(q, thread->cprio); + list_add(&thread->rlink, head); +} + +void xnsched_addq_tail(struct xnsched_mlq *q, struct xnthread *thread) +{ + struct list_head *head = add_q(q, thread->cprio); + list_add_tail(&thread->rlink, head); +} + +static void del_q(struct xnsched_mlq *q, + struct list_head *entry, int idx) +{ + struct list_head *head = q->heads + idx; + + list_del(entry); + q->elems--; + + if (list_empty(head)) + __clear_bit(idx, q->prio_map); +} + +void xnsched_delq(struct xnsched_mlq *q, struct xnthread *thread) +{ + del_q(q, &thread->rlink, get_qindex(q, thread->cprio)); +} + +struct xnthread *xnsched_getq(struct xnsched_mlq *q) +{ + struct xnthread *thread; + struct list_head *head; + int idx; + + if (q->elems == 0) + return NULL; + + idx = xnsched_weightq(q); + head = q->heads + idx; + XENO_BUG_ON(COBALT, list_empty(head)); + thread = list_first_entry(head, struct xnthread, rlink); + del_q(q, &thread->rlink, idx); + + return thread; +} + +struct xnthread *xnsched_findq(struct xnsched_mlq *q, int prio) +{ + struct list_head *head; + int idx; + + idx = get_qindex(q, prio); + head = q->heads + idx; + if (list_empty(head)) + return NULL; + + return list_first_entry(head, struct xnthread, rlink); +} + +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES + +struct xnthread *xnsched_rt_pick(struct xnsched *sched) +{ + struct xnsched_mlq *q = &sched->rt.runnable; + struct xnthread *thread; + struct list_head *head; + int idx; + + if (q->elems == 0) + return NULL; + + /* + * Some scheduling policies may be implemented as variants of + * the core SCHED_FIFO class, sharing its runqueue + * (e.g. SCHED_SPORADIC, SCHED_QUOTA). This means that we have + * to do some cascading to call the right pick handler + * eventually. + */ + idx = xnsched_weightq(q); + head = q->heads + idx; + XENO_BUG_ON(COBALT, list_empty(head)); + + /* + * The active class (i.e. ->sched_class) is the one currently + * queuing the thread, reflecting any priority boost due to + * PI. + */ + thread = list_first_entry(head, struct xnthread, rlink); + if (unlikely(thread->sched_class != &xnsched_class_rt)) + return thread->sched_class->sched_pick(sched); + + del_q(q, &thread->rlink, idx); + + return thread; +} + +#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */ + +#else /* !CONFIG_XENO_OPT_SCALABLE_SCHED */ + +struct xnthread *xnsched_findq(struct list_head *q, int prio) +{ + struct xnthread *thread; + + if (list_empty(q)) + return NULL; + + /* Find thread leading a priority group. */ + list_for_each_entry(thread, q, rlink) { + if (prio == thread->cprio) + return thread; + } + + return NULL; +} + +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES + +struct xnthread *xnsched_rt_pick(struct xnsched *sched) +{ + struct list_head *q = &sched->rt.runnable; + struct xnthread *thread; + + if (list_empty(q)) + return NULL; + + thread = list_first_entry(q, struct xnthread, rlink); + if (unlikely(thread->sched_class != &xnsched_class_rt)) + return thread->sched_class->sched_pick(sched); + + list_del(&thread->rlink); + + return thread; +} + +#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */ + +#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */ + +/** + * @fn int xnsched_run(void) + * @brief The rescheduling procedure. + * + * This is the central rescheduling routine which should be called to + * validate and apply changes which have previously been made to the + * nucleus scheduling state, such as suspending, resuming or changing + * the priority of threads. This call performs context switches as + * needed. xnsched_run() schedules out the current thread if: + * + * - the current thread is about to block. + * - a runnable thread from a higher priority scheduling class is + * waiting for the CPU. + * - the current thread does not lead the runnable threads from its + * own scheduling class (i.e. round-robin). + * + * The Cobalt core implements a lazy rescheduling scheme so that most + * of the services affecting the threads state MUST be followed by a + * call to the rescheduling procedure for the new scheduling state to + * be applied. + * + * In other words, multiple changes on the scheduler state can be done + * in a row, waking threads up, blocking others, without being + * immediately translated into the corresponding context switches. + * When all changes have been applied, xnsched_run() should be called + * for considering those changes, and possibly switching context. + * + * As a notable exception to the previous principle however, every + * action which ends up suspending the current thread begets an + * implicit call to the rescheduling procedure on behalf of the + * blocking service. + * + * Typically, self-suspension or sleeping on a synchronization object + * automatically leads to a call to the rescheduling procedure, + * therefore the caller does not need to explicitly issue + * xnsched_run() after such operations. + * + * The rescheduling procedure always leads to a null-effect if it is + * called on behalf of an interrupt service routine. Any outstanding + * scheduler lock held by the outgoing thread will be restored when + * the thread is scheduled back in. + * + * Calling this procedure with no applicable context switch pending is + * harmless and simply leads to a null-effect. + * + * @return Non-zero is returned if a context switch actually happened, + * otherwise zero if the current thread was left running. + * + * @coretags{unrestricted} + */ +static inline int test_resched(struct xnsched *sched) +{ + int resched = xnsched_resched_p(sched); +#ifdef CONFIG_SMP + /* Send resched IPI to remote CPU(s). */ + if (unlikely(!cpumask_empty(&sched->resched))) { + smp_mb(); + pipeline_send_resched_ipi(&sched->resched); + cpumask_clear(&sched->resched); + } +#endif + sched->status &= ~XNRESCHED; + + return resched; +} + +static inline void enter_root(struct xnthread *root) +{ +#ifdef CONFIG_XENO_OPT_WATCHDOG + xntimer_stop(&root->sched->wdtimer); +#endif +} + +static inline void leave_root(struct xnthread *root) +{ + pipeline_prep_switch_oob(root); + +#ifdef CONFIG_XENO_OPT_WATCHDOG + xntimer_start(&root->sched->wdtimer, get_watchdog_timeout(), + XN_INFINITE, XN_RELATIVE); +#endif +} + +void __xnsched_run_handler(void) /* hw interrupts off. */ +{ + trace_cobalt_schedule_remote(xnsched_current()); + xnsched_run(); +} + +static inline void do_lazy_user_work(struct xnthread *curr) +{ + xnthread_commit_ceiling(curr); +} + +int ___xnsched_run(struct xnsched *sched) +{ + bool switched = false, leaving_inband; + struct xnthread *prev, *next, *curr; + spl_t s; + + XENO_WARN_ON_ONCE(COBALT, is_secondary_domain()); + + trace_cobalt_schedule(sched); + + xnlock_get_irqsave(&nklock, s); + + curr = sched->curr; + /* + * CAUTION: xnthread_host_task(curr) may be unsynced and even + * stale if curr = &rootcb, since the task logged by + * leave_root() may not still be the current one. Use + * "current" for disambiguating. + */ + xntrace_pid(task_pid_nr(current), xnthread_current_priority(curr)); + + if (xnthread_test_state(curr, XNUSER)) + do_lazy_user_work(curr); + + if (!test_resched(sched)) + goto out; + + next = xnsched_pick_next(sched); + if (next == curr) { + if (unlikely(xnthread_test_state(next, XNROOT))) { + if (sched->lflags & XNHTICK) + xnintr_host_tick(sched); + if (sched->lflags & XNHDEFER) + xnclock_program_shot(&nkclock, sched); + } + goto out; + } + + prev = curr; + + trace_cobalt_switch_context(prev, next); + + /* + * sched->curr is shared locklessly with xnsched_run() and + * xnsched_lock(). WRITE_ONCE() makes sure sched->curr is + * written atomically so that these routines always observe + * consistent values by preventing the compiler from using + * store tearing. + */ + WRITE_ONCE(sched->curr, next); + leaving_inband = false; + + if (xnthread_test_state(prev, XNROOT)) { + leave_root(prev); + leaving_inband = true; + } else if (xnthread_test_state(next, XNROOT)) { + if (sched->lflags & XNHTICK) + xnintr_host_tick(sched); + if (sched->lflags & XNHDEFER) + xnclock_program_shot(&nkclock, sched); + enter_root(next); + } + + xnstat_exectime_switch(sched, &next->stat.account); + xnstat_counter_inc(&next->stat.csw); + + if (pipeline_switch_to(prev, next, leaving_inband)) + /* oob -> in-band transition detected. */ + return true; + + /* + * Re-read sched->curr for tracing: the current thread may + * have switched from in-band to oob context. + */ + xntrace_pid(task_pid_nr(current), + xnthread_current_priority(xnsched_current()->curr)); + + switched = true; +out: + xnlock_put_irqrestore(&nklock, s); + + return !!switched; +} +EXPORT_SYMBOL_GPL(___xnsched_run); + +#ifdef CONFIG_XENO_OPT_VFILE + +static struct xnvfile_directory sched_vfroot; + +struct vfile_schedlist_priv { + struct xnthread *curr; + xnticks_t start_time; +}; + +struct vfile_schedlist_data { + int cpu; + pid_t pid; + char name[XNOBJECT_NAME_LEN]; + char sched_class[XNOBJECT_NAME_LEN]; + char personality[XNOBJECT_NAME_LEN]; + int cprio; + xnticks_t timeout; + int state; +}; + +static struct xnvfile_snapshot_ops vfile_schedlist_ops; + +static struct xnvfile_snapshot schedlist_vfile = { + .privsz = sizeof(struct vfile_schedlist_priv), + .datasz = sizeof(struct vfile_schedlist_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_schedlist_ops, +}; + +static int vfile_schedlist_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it); + + /* &nkthreadq cannot be empty (root thread(s)). */ + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + priv->start_time = xnclock_read_monotonic(&nkclock); + + return cobalt_nrthreads; +} + +static int vfile_schedlist_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it); + struct vfile_schedlist_data *p = data; + xnticks_t timeout, period; + struct xnthread *thread; + xnticks_t base_time; + + if (priv->curr == NULL) + return 0; /* All done. */ + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + p->cpu = xnsched_cpu(thread->sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->cprio = thread->cprio; + p->state = xnthread_get_state(thread); + if (thread->lock_count > 0) + p->state |= XNLOCK; + knamecpy(p->sched_class, thread->sched_class->name); + knamecpy(p->personality, thread->personality->name); + period = xnthread_get_period(thread); + base_time = priv->start_time; + if (xntimer_clock(&thread->ptimer) != &nkclock) + base_time = xnclock_read_monotonic(xntimer_clock(&thread->ptimer)); + timeout = xnthread_get_timeout(thread, base_time); + /* + * Here we cheat: thread is periodic and the sampling rate may + * be high, so it is indeed possible that the next tick date + * from the ptimer progresses fast enough while we are busy + * collecting output data in this loop, so that next_date - + * start_time > period. In such a case, we simply ceil the + * value to period to keep the result meaningful, even if not + * necessarily accurate. But what does accuracy mean when the + * sampling frequency is high, and the way to read it has to + * go through the vfile interface anyway? + */ + if (period > 0 && period < timeout && + !xntimer_running_p(&thread->rtimer)) + timeout = period; + + p->timeout = timeout; + + return 1; +} + +static int vfile_schedlist_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_schedlist_data *p = data; + char sbuf[64], pbuf[16], tbuf[16]; + + if (p == NULL) + xnvfile_printf(it, + "%-3s %-6s %-5s %-8s %-5s %-12s %-10s %s\n", + "CPU", "PID", "CLASS", "TYPE", "PRI", "TIMEOUT", + "STAT", "NAME"); + else { + ksformat(pbuf, sizeof(pbuf), "%3d", p->cprio); + xntimer_format_time(p->timeout, tbuf, sizeof(tbuf)); + xnthread_format_status(p->state, sbuf, sizeof(sbuf)); + + xnvfile_printf(it, + "%3u %-6d %-5s %-8s %-5s %-12s %-10s %s%s%s\n", + p->cpu, + p->pid, + p->sched_class, + p->personality, + pbuf, + tbuf, + sbuf, + (p->state & XNUSER) ? "" : "[", + p->name, + (p->state & XNUSER) ? "" : "]"); + } + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_schedlist_ops = { + .rewind = vfile_schedlist_rewind, + .next = vfile_schedlist_next, + .show = vfile_schedlist_show, +}; + +#ifdef CONFIG_XENO_OPT_STATS + +static spl_t vfile_schedstat_lock_s; + +static int vfile_schedstat_get_lock(struct xnvfile *vfile) +{ + int ret; + + ret = xnintr_get_query_lock(); + if (ret < 0) + return ret; + xnlock_get_irqsave(&nklock, vfile_schedstat_lock_s); + return 0; +} + +static void vfile_schedstat_put_lock(struct xnvfile *vfile) +{ + xnlock_put_irqrestore(&nklock, vfile_schedstat_lock_s); + xnintr_put_query_lock(); +} + +static struct xnvfile_lock_ops vfile_schedstat_lockops = { + .get = vfile_schedstat_get_lock, + .put = vfile_schedstat_put_lock, +}; + +struct vfile_schedstat_priv { + int irq; + struct xnthread *curr; + struct xnintr_iterator intr_it; +}; + +struct vfile_schedstat_data { + int cpu; + pid_t pid; + int state; + char name[XNOBJECT_NAME_LEN]; + unsigned long ssw; + unsigned long csw; + unsigned long xsc; + unsigned long pf; + xnticks_t exectime_period; + xnticks_t account_period; + xnticks_t exectime_total; + struct xnsched_class *sched_class; + xnticks_t period; + int cprio; +}; + +static struct xnvfile_snapshot_ops vfile_schedstat_ops; + +static struct xnvfile_snapshot schedstat_vfile = { + .privsz = sizeof(struct vfile_schedstat_priv), + .datasz = sizeof(struct vfile_schedstat_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_schedstat_ops, + .entry = { .lockops = &vfile_schedstat_lockops }, +}; + +static int vfile_schedstat_rewind(struct xnvfile_snapshot_iterator *it) +{ + struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it); + int irqnr; + + /* + * The activity numbers on each valid interrupt descriptor are + * grouped under a pseudo-thread. + */ + priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink); + priv->irq = 0; + irqnr = xnintr_query_init(&priv->intr_it) * num_online_cpus(); + + return irqnr + cobalt_nrthreads; +} + +static int vfile_schedstat_next(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it); + struct vfile_schedstat_data *p = data; + struct xnthread *thread; + struct xnsched *sched; + xnticks_t period; + int __maybe_unused ret; + + if (priv->curr == NULL) + /* + * We are done with actual threads, scan interrupt + * descriptors. + */ + goto scan_irqs; + + thread = priv->curr; + if (list_is_last(&thread->glink, &nkthreadq)) + priv->curr = NULL; + else + priv->curr = list_next_entry(thread, glink); + + sched = thread->sched; + p->cpu = xnsched_cpu(sched); + p->pid = xnthread_host_pid(thread); + memcpy(p->name, thread->name, sizeof(p->name)); + p->state = xnthread_get_state(thread); + if (thread->lock_count > 0) + p->state |= XNLOCK; + p->ssw = xnstat_counter_get(&thread->stat.ssw); + p->csw = xnstat_counter_get(&thread->stat.csw); + p->xsc = xnstat_counter_get(&thread->stat.xsc); + p->pf = xnstat_counter_get(&thread->stat.pf); + p->sched_class = thread->sched_class; + p->cprio = thread->cprio; + p->period = xnthread_get_period(thread); + + period = sched->last_account_switch - thread->stat.lastperiod.start; + if (period == 0 && thread == sched->curr) { + p->exectime_period = 1; + p->account_period = 1; + } else { + p->exectime_period = thread->stat.account.total - + thread->stat.lastperiod.total; + p->account_period = period; + } + p->exectime_total = thread->stat.account.total; + thread->stat.lastperiod.total = thread->stat.account.total; + thread->stat.lastperiod.start = sched->last_account_switch; + + return 1; + +scan_irqs: +#ifdef CONFIG_XENO_OPT_STATS_IRQS + if (priv->irq >= PIPELINE_NR_IRQS) + return 0; /* All done. */ + + ret = xnintr_query_next(priv->irq, &priv->intr_it, p->name); + if (ret) { + if (ret == -EAGAIN) + xnvfile_touch(it->vfile); /* force rewind. */ + priv->irq++; + return VFILE_SEQ_SKIP; + } + + if (!xnsched_supported_cpu(priv->intr_it.cpu)) + return VFILE_SEQ_SKIP; + + p->cpu = priv->intr_it.cpu; + p->csw = priv->intr_it.hits; + p->exectime_period = priv->intr_it.exectime_period; + p->account_period = priv->intr_it.account_period; + p->exectime_total = priv->intr_it.exectime_total; + p->pid = 0; + p->state = 0; + p->ssw = 0; + p->xsc = 0; + p->pf = 0; + p->sched_class = &xnsched_class_idle; + p->cprio = 0; + p->period = 0; + + return 1; +#else /* !CONFIG_XENO_OPT_STATS_IRQS */ + return 0; +#endif /* !CONFIG_XENO_OPT_STATS_IRQS */ +} + +static int vfile_schedstat_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_schedstat_data *p = data; + int usage = 0; + + if (p == NULL) + xnvfile_printf(it, + "%-3s %-6s %-10s %-10s %-10s %-4s %-8s %5s" + " %s\n", + "CPU", "PID", "MSW", "CSW", "XSC", "PF", "STAT", "%CPU", + "NAME"); + else { + if (p->account_period) { + while (p->account_period > 0xffffffffUL) { + p->exectime_period >>= 16; + p->account_period >>= 16; + } + usage = xnarch_ulldiv(p->exectime_period * 1000LL + + (p->account_period >> 1), + p->account_period, NULL); + } + xnvfile_printf(it, + "%3u %-6d %-10lu %-10lu %-10lu %-4lu %.8x %3u.%u" + " %s%s%s\n", + p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state, + usage / 10, usage % 10, + (p->state & XNUSER) ? "" : "[", + p->name, + (p->state & XNUSER) ? "" : "]"); + } + + return 0; +} + +static int vfile_schedacct_show(struct xnvfile_snapshot_iterator *it, + void *data) +{ + struct vfile_schedstat_data *p = data; + + if (p == NULL) + return 0; + + xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8x %Lu %Lu %Lu %s %s %d %Lu\n", + p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state, + xnclock_ticks_to_ns(&nkclock, p->account_period), + xnclock_ticks_to_ns(&nkclock, p->exectime_period), + xnclock_ticks_to_ns(&nkclock, p->exectime_total), + p->name, + p->sched_class->name, + p->cprio, + p->period); + + return 0; +} + +static struct xnvfile_snapshot_ops vfile_schedstat_ops = { + .rewind = vfile_schedstat_rewind, + .next = vfile_schedstat_next, + .show = vfile_schedstat_show, +}; + +/* + * An accounting vfile is a thread statistics vfile in disguise with a + * different output format, which is parser-friendly. + */ +static struct xnvfile_snapshot_ops vfile_schedacct_ops; + +static struct xnvfile_snapshot schedacct_vfile = { + .privsz = sizeof(struct vfile_schedstat_priv), + .datasz = sizeof(struct vfile_schedstat_data), + .tag = &nkthreadlist_tag, + .ops = &vfile_schedacct_ops, +}; + +static struct xnvfile_snapshot_ops vfile_schedacct_ops = { + .rewind = vfile_schedstat_rewind, + .next = vfile_schedstat_next, + .show = vfile_schedacct_show, +}; + +#endif /* CONFIG_XENO_OPT_STATS */ + +#ifdef CONFIG_SMP + +static int affinity_vfile_show(struct xnvfile_regular_iterator *it, + void *data) +{ + unsigned long val = 0; + int cpu; + + for (cpu = 0; cpu < nr_cpumask_bits; cpu++) + if (cpumask_test_cpu(cpu, &cobalt_cpu_affinity)) + val |= (1UL << cpu); + + xnvfile_printf(it, "%08lx\n", val); + + return 0; +} + +static ssize_t affinity_vfile_store(struct xnvfile_input *input) +{ + cpumask_t affinity; + ssize_t ret; + long val; + int cpu; + spl_t s; + + ret = xnvfile_get_integer(input, &val); + if (ret < 0) + return ret; + + if (val == 0) + affinity = xnsched_realtime_cpus; /* Reset to default. */ + else { + cpumask_clear(&affinity); + for (cpu = 0; cpu < nr_cpumask_bits; cpu++, val >>= 1) { + if (val & 1) { + /* + * The new dynamic affinity must be a strict + * subset of the static set of supported CPUs. + */ + if (!cpumask_test_cpu(cpu, + &xnsched_realtime_cpus)) + return -EINVAL; + cpumask_set_cpu(cpu, &affinity); + } + } + } + + cpumask_and(&affinity, &affinity, cpu_online_mask); + if (cpumask_empty(&affinity)) + return -EINVAL; + + xnlock_get_irqsave(&nklock, s); + cobalt_cpu_affinity = affinity; + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +static struct xnvfile_regular_ops affinity_vfile_ops = { + .show = affinity_vfile_show, + .store = affinity_vfile_store, +}; + +static struct xnvfile_regular affinity_vfile = { + .ops = &affinity_vfile_ops, +}; + +#endif /* CONFIG_SMP */ + +int xnsched_init_proc(void) +{ + struct xnsched_class *p; + int ret; + + ret = xnvfile_init_dir("sched", &sched_vfroot, &cobalt_vfroot); + if (ret) + return ret; + + ret = xnvfile_init_snapshot("threads", &schedlist_vfile, &sched_vfroot); + if (ret) + return ret; + + for_each_xnsched_class(p) { + if (p->sched_init_vfile) { + ret = p->sched_init_vfile(p, &sched_vfroot); + if (ret) + return ret; + } + } + +#ifdef CONFIG_XENO_OPT_STATS + ret = xnvfile_init_snapshot("stat", &schedstat_vfile, &sched_vfroot); + if (ret) + return ret; + ret = xnvfile_init_snapshot("acct", &schedacct_vfile, &sched_vfroot); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_STATS */ + +#ifdef CONFIG_SMP + xnvfile_init_regular("affinity", &affinity_vfile, &cobalt_vfroot); +#endif /* CONFIG_SMP */ + + return 0; +} + +void xnsched_cleanup_proc(void) +{ + struct xnsched_class *p; + + for_each_xnsched_class(p) { + if (p->sched_cleanup_vfile) + p->sched_cleanup_vfile(p); + } + +#ifdef CONFIG_SMP + xnvfile_destroy_regular(&affinity_vfile); +#endif /* CONFIG_SMP */ +#ifdef CONFIG_XENO_OPT_STATS + xnvfile_destroy_snapshot(&schedacct_vfile); + xnvfile_destroy_snapshot(&schedstat_vfile); +#endif /* CONFIG_XENO_OPT_STATS */ + xnvfile_destroy_snapshot(&schedlist_vfile); + xnvfile_destroy_dir(&sched_vfroot); +} + +#endif /* CONFIG_XENO_OPT_VFILE */ + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/select.c b/kernel/xenomai-v3.2.4/kernel/cobalt/select.c new file mode 100644 index 0000000..bd790af --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/select.c @@ -0,0 +1,461 @@ +/* + * Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * Copyright (C) 2008 Efixo + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/types.h> +#include <linux/bitops.h> /* For hweight_long */ +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/select.h> +#include <pipeline/sirq.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_select Synchronous I/O multiplexing + * + * This module implements the services needed for implementing the + * POSIX select() service, or any other event multiplexing services. + * + * Following the implementation of the posix select service, this module defines + * three types of events: + * - \a XNSELECT_READ meaning that a file descriptor is ready for reading; + * - \a XNSELECT_WRITE meaning that a file descriptor is ready for writing; + * - \a XNSELECT_EXCEPT meaning that a file descriptor received an exceptional + * event. + * + * It works by defining two structures: + * - a @a struct @a xnselect structure, which should be added to every file + * descriptor for every event type (read, write, or except); + * - a @a struct @a xnselector structure, the selection structure, passed by + * the thread calling the xnselect service, where this service does all its + * housekeeping. + * @{ + */ + +static LIST_HEAD(selector_list); +static int deletion_virq; + +/** + * Initialize a @a struct @a xnselect structure. + * + * This service must be called to initialize a @a struct @a xnselect structure + * before it is bound to a selector by the means of xnselect_bind(). + * + * @param select_block pointer to the xnselect structure to be initialized + * + * @coretags{task-unrestricted} + */ +void xnselect_init(struct xnselect *select_block) +{ + INIT_LIST_HEAD(&select_block->bindings); +} +EXPORT_SYMBOL_GPL(xnselect_init); + +static inline int xnselect_wakeup(struct xnselector *selector) +{ + return xnsynch_flush(&selector->synchbase, 0) == XNSYNCH_RESCHED; +} + +/** + * Bind a file descriptor (represented by its @a xnselect structure) to a + * selector block. + * + * @param select_block pointer to the @a struct @a xnselect to be bound; + * + * @param binding pointer to a newly allocated (using xnmalloc) @a struct + * @a xnselect_binding; + * + * @param selector pointer to the selector structure; + * + * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a + * XNSELECT_EXCEPT); + * + * @param index index of the file descriptor (represented by @a + * select_block) in the bit fields used by the @a selector structure; + * + * @param state current state of the file descriptor. + * + * @a select_block must have been initialized with xnselect_init(), + * the @a xnselector structure must have been initialized with + * xnselector_init(), @a binding may be uninitialized. + * + * This service must be called with nklock locked, irqs off. For this reason, + * the @a binding parameter must have been allocated by the caller outside the + * locking section. + * + * @retval -EINVAL if @a type or @a index is invalid; + * @retval 0 otherwise. + * + * @coretags{task-unrestricted, might-switch, atomic-entry} + */ +int xnselect_bind(struct xnselect *select_block, + struct xnselect_binding *binding, + struct xnselector *selector, + unsigned type, + unsigned index, + unsigned state) +{ + atomic_only(); + + if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE) + return -EINVAL; + + binding->selector = selector; + binding->fd = select_block; + binding->type = type; + binding->bit_index = index; + + list_add_tail(&binding->slink, &selector->bindings); + list_add_tail(&binding->link, &select_block->bindings); + __FD_SET__(index, &selector->fds[type].expected); + if (state) { + __FD_SET__(index, &selector->fds[type].pending); + if (xnselect_wakeup(selector)) + xnsched_run(); + } else + __FD_CLR__(index, &selector->fds[type].pending); + + return 0; +} +EXPORT_SYMBOL_GPL(xnselect_bind); + +/* Must be called with nklock locked irqs off */ +int __xnselect_signal(struct xnselect *select_block, unsigned state) +{ + struct xnselect_binding *binding; + struct xnselector *selector; + int resched = 0; + + list_for_each_entry(binding, &select_block->bindings, link) { + selector = binding->selector; + if (state) { + if (!__FD_ISSET__(binding->bit_index, + &selector->fds[binding->type].pending)) { + __FD_SET__(binding->bit_index, + &selector->fds[binding->type].pending); + if (xnselect_wakeup(selector)) + resched = 1; + } + } else + __FD_CLR__(binding->bit_index, + &selector->fds[binding->type].pending); + } + + return resched; +} +EXPORT_SYMBOL_GPL(__xnselect_signal); + +/** + * Destroy the @a xnselect structure associated with a file descriptor. + * + * Any binding with a @a xnselector block is destroyed. + * + * @param select_block pointer to the @a xnselect structure associated + * with a file descriptor + * + * @coretags{task-unrestricted, might-switch} + */ +void xnselect_destroy(struct xnselect *select_block) +{ + struct xnselect_binding *binding, *tmp; + struct xnselector *selector; + int resched = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(&select_block->bindings)) + goto out; + + list_for_each_entry_safe(binding, tmp, &select_block->bindings, link) { + list_del(&binding->link); + selector = binding->selector; + __FD_CLR__(binding->bit_index, + &selector->fds[binding->type].expected); + if (!__FD_ISSET__(binding->bit_index, + &selector->fds[binding->type].pending)) { + __FD_SET__(binding->bit_index, + &selector->fds[binding->type].pending); + if (xnselect_wakeup(selector)) + resched = 1; + } + list_del(&binding->slink); + xnlock_put_irqrestore(&nklock, s); + xnfree(binding); + xnlock_get_irqsave(&nklock, s); + } + if (resched) + xnsched_run(); +out: + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnselect_destroy); + +static unsigned +fd_set_andnot(fd_set *result, fd_set *first, fd_set *second, unsigned n) +{ + unsigned i, not_empty = 0; + + for (i = 0; i < __FDELT__(n); i++) + if((result->fds_bits[i] = + first->fds_bits[i] & ~(second->fds_bits[i]))) + not_empty = 1; + + if (i < __FDSET_LONGS__ + && (result->fds_bits[i] = + first->fds_bits[i] & ~(second->fds_bits[i]) & (__FDMASK__(n) - 1))) + not_empty = 1; + + return not_empty; +} + +static unsigned +fd_set_and(fd_set *result, fd_set *first, fd_set *second, unsigned n) +{ + unsigned i, not_empty = 0; + + for (i = 0; i < __FDELT__(n); i++) + if((result->fds_bits[i] = + first->fds_bits[i] & second->fds_bits[i])) + not_empty = 1; + + if (i < __FDSET_LONGS__ + && (result->fds_bits[i] = + first->fds_bits[i] & second->fds_bits[i] & (__FDMASK__(n) - 1))) + not_empty = 1; + + return not_empty; +} + +static void fd_set_zeropad(fd_set *set, unsigned n) +{ + unsigned i; + + i = __FDELT__(n); + + if (i < __FDSET_LONGS__) + set->fds_bits[i] &= (__FDMASK__(n) - 1); + + for(i++; i < __FDSET_LONGS__; i++) + set->fds_bits[i] = 0; +} + +static unsigned fd_set_popcount(fd_set *set, unsigned n) +{ + unsigned count = 0, i; + + for (i = 0; i < __FDELT__(n); i++) + if (set->fds_bits[i]) + count += hweight_long(set->fds_bits[i]); + + if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1))) + count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1)); + + return count; +} + +/** + * Initialize a selector structure. + * + * @param selector The selector structure to be initialized. + * + * @retval 0 + * + * @coretags{task-unrestricted} + */ +int xnselector_init(struct xnselector *selector) +{ + unsigned int i; + + xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL); + for (i = 0; i < XNSELECT_MAX_TYPES; i++) { + __FD_ZERO__(&selector->fds[i].expected); + __FD_ZERO__(&selector->fds[i].pending); + } + INIT_LIST_HEAD(&selector->bindings); + + return 0; +} +EXPORT_SYMBOL_GPL(xnselector_init); + +/** + * Check the state of a number of file descriptors, wait for a state change if + * no descriptor is ready. + * + * @param selector structure to check for pending events + * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned; + * @param in_fds the set of descriptors which events should be checked + * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1; + * @param timeout the timeout, whose meaning depends on @a timeout_mode, note + * that xnselect() pass @a timeout and @a timeout_mode unchanged to + * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a + * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep + * than expected if the sleep is interrupted. + * @param timeout_mode the mode of @a timeout. + * + * @retval -EINVAL if @a nfds is negative; + * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet + * been registered with xnselect_bind(), @a out_fds contains the set of such + * descriptors; + * @retval -EINTR if @a xnselect was interrupted while waiting; + * @retval 0 in case of timeout. + * @retval the number of file descriptors having received an event. + * + * @coretags{primary-only, might-switch} + */ +int xnselect(struct xnselector *selector, + fd_set *out_fds[XNSELECT_MAX_TYPES], + fd_set *in_fds[XNSELECT_MAX_TYPES], + int nfds, + xnticks_t timeout, xntmode_t timeout_mode) +{ + unsigned int i, not_empty = 0, count; + int info = 0; + spl_t s; + + if ((unsigned) nfds > __FD_SETSIZE) + return -EINVAL; + + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (out_fds[i]) + fd_set_zeropad(out_fds[i], nfds); + + xnlock_get_irqsave(&nklock, s); + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (out_fds[i] + && fd_set_andnot(out_fds[i], in_fds[i], + &selector->fds[i].expected, nfds)) + not_empty = 1; + xnlock_put_irqrestore(&nklock, s); + + if (not_empty) + return -ECHRNG; + + xnlock_get_irqsave(&nklock, s); + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (out_fds[i] + && fd_set_and(out_fds[i], in_fds[i], + &selector->fds[i].pending, nfds)) + not_empty = 1; + + while (!not_empty) { + info = xnsynch_sleep_on(&selector->synchbase, + timeout, timeout_mode); + + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (out_fds[i] + && fd_set_and(out_fds[i], in_fds[i], + &selector->fds[i].pending, nfds)) + not_empty = 1; + + if (info & (XNBREAK | XNTIMEO)) + break; + } + xnlock_put_irqrestore(&nklock, s); + + if (not_empty) { + for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++) + if (out_fds[i]) + count += fd_set_popcount(out_fds[i], nfds); + + return count; + } + + if (info & XNBREAK) + return -EINTR; + + return 0; /* Timeout */ +} +EXPORT_SYMBOL_GPL(xnselect); + +/** + * Destroy a selector block. + * + * All bindings with file descriptor are destroyed. + * + * @param selector the selector block to be destroyed + * + * @coretags{task-unrestricted} + */ +void xnselector_destroy(struct xnselector *selector) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + list_add_tail(&selector->destroy_link, &selector_list); + pipeline_post_sirq(deletion_virq); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnselector_destroy); + +static irqreturn_t xnselector_destroy_loop(int virq, void *dev_id) +{ + struct xnselect_binding *binding, *tmpb; + struct xnselector *selector, *tmps; + struct xnselect *fd; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(&selector_list)) + goto out; + + list_for_each_entry_safe(selector, tmps, &selector_list, destroy_link) { + list_del(&selector->destroy_link); + if (list_empty(&selector->bindings)) + goto release; + list_for_each_entry_safe(binding, tmpb, &selector->bindings, slink) { + list_del(&binding->slink); + fd = binding->fd; + list_del(&binding->link); + xnlock_put_irqrestore(&nklock, s); + xnfree(binding); + xnlock_get_irqsave(&nklock, s); + } + release: + xnsynch_destroy(&selector->synchbase); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + + xnfree(selector); + + xnlock_get_irqsave(&nklock, s); + } +out: + xnlock_put_irqrestore(&nklock, s); + + return IRQ_HANDLED; +} + +int xnselect_mount(void) +{ + deletion_virq = pipeline_create_inband_sirq(xnselector_destroy_loop); + if (deletion_virq < 0) + return deletion_virq; + + return 0; +} + +int xnselect_umount(void) +{ + pipeline_delete_inband_sirq(deletion_virq); + return 0; +} + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c b/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c new file mode 100644 index 0000000..6e50e53 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c @@ -0,0 +1,1185 @@ +/* + * Copyright (C) 2001-2008 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/stdarg.h> +#include <linux/signal.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/uapi/signal.h> +#include <trace/events/cobalt-core.h> + +#define PP_CEILING_MASK 0xff + +static inline int get_ceiling_value(struct xnsynch *synch) +{ + /* + * The ceiling priority value is stored in user-writable + * memory, make sure to constrain it within valid bounds for + * xnsched_class_rt before using it. + */ + return *synch->ceiling_ref & PP_CEILING_MASK ?: 1; +} + +struct xnsynch *lookup_lazy_pp(xnhandle_t handle); + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_synch Thread synchronization services + * @{ + */ + +/** + * @brief Initialize a synchronization object. + * + * Initializes a synchronization object. Xenomai threads can wait on + * and signal such objects for serializing access to resources. + * This object has built-in support for priority inheritance. + * + * @param synch The address of a synchronization object descriptor + * Cobalt will use to store the object-specific data. This descriptor + * must always be valid while the object is active therefore it must + * be allocated in permanent memory. + * + * @param flags A set of creation flags affecting the operation. The + * valid flags are: + * + * - XNSYNCH_PRIO causes the threads waiting for the resource to pend + * in priority order. Otherwise, FIFO ordering is used (XNSYNCH_FIFO). + * + * - XNSYNCH_OWNER indicates that the synchronization object shall + * track the resource ownership, allowing a single owner at most at + * any point in time. Note that setting this flag implies the use of + * xnsynch_acquire() and xnsynch_release() instead of + * xnsynch_sleep_on() and xnsynch_wakeup_*(). + * + * - XNSYNCH_PI enables priority inheritance when a priority inversion + * is detected among threads using this object. XNSYNCH_PI implies + * XNSYNCH_OWNER and XNSYNCH_PRIO. + * + * - XNSYNCH_PP enables priority protect to prevent priority inversion. + * XNSYNCH_PP implies XNSYNCH_OWNER and XNSYNCH_PRIO. + * + * - XNSYNCH_DREORD (Disable REORDering) tells Cobalt not to reorder + * the wait list upon priority change of a waiter. Reordering is the + * default. Only applies when XNSYNCH_PRIO is present. + * + * @param fastlock Address of the fast lock word to be associated with + * a synchronization object with ownership tracking. Therefore, a + * valid fast-lock address is required if XNSYNCH_OWNER is set in @a + * flags. + * + * @coretags{task-unrestricted} + */ +void xnsynch_init(struct xnsynch *synch, int flags, atomic_t *fastlock) +{ + if (flags & (XNSYNCH_PI|XNSYNCH_PP)) + flags |= XNSYNCH_PRIO | XNSYNCH_OWNER; /* Obviously... */ + + synch->status = flags & ~XNSYNCH_CLAIMED; + synch->owner = NULL; + synch->cleanup = NULL; /* for PI/PP only. */ + synch->wprio = -1; + synch->ceiling_ref = NULL; + INIT_LIST_HEAD(&synch->pendq); + + if (flags & XNSYNCH_OWNER) { + BUG_ON(fastlock == NULL); + synch->fastlock = fastlock; + atomic_set(fastlock, XN_NO_HANDLE); + } else + synch->fastlock = NULL; +} +EXPORT_SYMBOL_GPL(xnsynch_init); + +/** + * @brief Initialize a synchronization object enforcing PP. + * + * This call is a variant of xnsynch_init() for initializing + * synchronization objects enabling the priority protect protocol. + * + * @param synch The address of a synchronization object descriptor + * Cobalt will use to store the object-specific data. See + * xnsynch_init(). + * + * @param flags A set of creation flags affecting the operation. See + * xnsynch_init(). XNSYNCH_PI is mutually exclusive with XNSYNCH_PP, + * and won't be considered. + * + * @param fastlock Address of the fast lock word to be associated with + * a synchronization object with ownership tracking. See xnsynch_init(). + * + * @param ceiling_ref The address of the variable holding the current + * priority ceiling value for this object. + * + * @coretags{task-unrestricted} + */ +void xnsynch_init_protect(struct xnsynch *synch, int flags, + atomic_t *fastlock, u32 *ceiling_ref) +{ + xnsynch_init(synch, (flags & ~XNSYNCH_PI) | XNSYNCH_PP, fastlock); + synch->ceiling_ref = ceiling_ref; +} + +/** + * @fn void xnsynch_destroy(struct xnsynch *synch) + * @brief Destroy a synchronization object. + * + * Destroys the synchronization object @a synch, unblocking all + * waiters with the XNRMID status. + * + * @return XNSYNCH_RESCHED is returned if at least one thread is + * unblocked, which means the caller should invoke xnsched_run() for + * applying the new scheduling state. Otherwise, XNSYNCH_DONE is + * returned. + + * @sideeffect Same as xnsynch_flush(). + * + * @coretags{task-unrestricted} + */ +int xnsynch_destroy(struct xnsynch *synch) +{ + int ret; + + ret = xnsynch_flush(synch, XNRMID); + XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED); + + return ret; +} +EXPORT_SYMBOL_GPL(xnsynch_destroy); + +/** + * @fn int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode); + * @brief Sleep on an ownerless synchronization object. + * + * Makes the calling thread sleep on the specified synchronization + * object, waiting for it to be signaled. + * + * This service should be called by upper interfaces wanting the + * current thread to pend on the given resource. It must not be used + * with synchronization objects that are supposed to track ownership + * (XNSYNCH_OWNER). + * + * @param synch The descriptor address of the synchronization object + * to sleep on. + * + * @param timeout The timeout which may be used to limit the time the + * thread pends on the resource. This value is a wait time given as a + * count of nanoseconds. It can either be relative, absolute + * monotonic, or absolute adjustable depending on @a + * timeout_mode. Passing XN_INFINITE @b and setting @a mode to + * XN_RELATIVE specifies an unbounded wait. All other values are used + * to initialize a watchdog timer. + * + * @param timeout_mode The mode of the @a timeout parameter. It can + * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also + * xntimer_start()). + * + * @return A bitmask which may include zero or one information bit + * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the + * caller, for detecting respectively: object deletion, timeout or + * signal/unblock conditions which might have happened while waiting. + * + * @coretags{primary-only, might-switch} + */ +int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, + xntmode_t timeout_mode) +{ + struct xnthread *thread; + spl_t s; + + primary_mode_only(); + + XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER); + + thread = xnthread_current(); + + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP) && + thread->res_count > 0 && + xnthread_test_state(thread, XNWARN)) + xnthread_signal(thread, SIGDEBUG, SIGDEBUG_MUTEX_SLEEP); + + xnlock_get_irqsave(&nklock, s); + + trace_cobalt_synch_sleepon(synch); + + if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */ + list_add_tail(&thread->plink, &synch->pendq); + else /* i.e. priority-sorted */ + list_add_priff(thread, &synch->pendq, wprio, plink); + + xnthread_suspend(thread, XNPEND, timeout, timeout_mode, synch); + + xnlock_put_irqrestore(&nklock, s); + + return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); +} +EXPORT_SYMBOL_GPL(xnsynch_sleep_on); + +/** + * @fn struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch); + * @brief Unblock the heading thread from wait. + * + * This service wakes up the thread which is currently leading the + * synchronization object's pending list. The sleeping thread is + * unblocked from its pending state, but no reschedule is performed. + * + * This service should be called by upper interfaces wanting to signal + * the given resource so that a single waiter is resumed. It must not + * be used with synchronization objects that are supposed to track + * ownership (XNSYNCH_OWNER not set). + * + * @param synch The descriptor address of the synchronization object + * whose ownership is changed. + * + * @return The descriptor address of the unblocked thread. + * + * @coretags{unrestricted} + */ +struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch) +{ + struct xnthread *thread; + spl_t s; + + XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER); + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(&synch->pendq)) { + thread = NULL; + goto out; + } + + trace_cobalt_synch_wakeup(synch); + thread = list_first_entry(&synch->pendq, struct xnthread, plink); + list_del(&thread->plink); + thread->wchan = NULL; + xnthread_resume(thread, XNPEND); +out: + xnlock_put_irqrestore(&nklock, s); + + return thread; +} +EXPORT_SYMBOL_GPL(xnsynch_wakeup_one_sleeper); + +int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr) +{ + struct xnthread *thread, *tmp; + int nwakeups = 0; + spl_t s; + + XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER); + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(&synch->pendq)) + goto out; + + trace_cobalt_synch_wakeup_many(synch); + + list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) { + if (nwakeups++ >= nr) + break; + list_del(&thread->plink); + thread->wchan = NULL; + xnthread_resume(thread, XNPEND); + } +out: + xnlock_put_irqrestore(&nklock, s); + + return nwakeups; +} +EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers); + +/** + * @fn void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper); + * @brief Unblock a particular thread from wait. + * + * This service wakes up a specific thread which is currently pending on + * the given synchronization object. The sleeping thread is unblocked + * from its pending state, but no reschedule is performed. + * + * This service should be called by upper interfaces wanting to signal + * the given resource so that a specific waiter is resumed. It must not + * be used with synchronization objects that are supposed to track + * ownership (XNSYNCH_OWNER not set). + * + * @param synch The descriptor address of the synchronization object + * whose ownership is changed. + * + * @param sleeper The thread to unblock which MUST be currently linked + * to the synchronization object's pending queue (i.e. synch->pendq). + * + * @coretags{unrestricted} + */ +void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper) +{ + spl_t s; + + XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER); + + xnlock_get_irqsave(&nklock, s); + + trace_cobalt_synch_wakeup(synch); + list_del(&sleeper->plink); + sleeper->wchan = NULL; + xnthread_resume(sleeper, XNPEND); + + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnsynch_wakeup_this_sleeper); + +static inline void raise_boost_flag(struct xnthread *owner) +{ + /* Backup the base priority at first boost only. */ + if (!xnthread_test_state(owner, XNBOOST)) { + owner->bprio = owner->cprio; + xnthread_set_state(owner, XNBOOST); + } +} + +static void inherit_thread_priority(struct xnthread *owner, + struct xnthread *target) +{ + if (xnthread_test_state(owner, XNZOMBIE)) + return; + + /* Apply the scheduling policy of "target" to "thread" */ + xnsched_track_policy(owner, target); + + /* + * Owner may be sleeping, propagate priority update through + * the PI chain if needed. + */ + if (owner->wchan) + xnsynch_requeue_sleeper(owner); +} + +static void __ceil_owner_priority(struct xnthread *owner, int prio) +{ + if (xnthread_test_state(owner, XNZOMBIE)) + return; + /* + * Raise owner priority to the ceiling value, this implicitly + * selects SCHED_FIFO for the owner. + */ + xnsched_protect_priority(owner, prio); + + if (owner->wchan) + xnsynch_requeue_sleeper(owner); +} + +static void adjust_boost(struct xnthread *owner, struct xnthread *target) +{ + struct xnsynch *synch; + + /* + * CAUTION: we may have PI and PP-enabled objects among the + * boosters, considering the leader of synch->pendq is + * therefore NOT enough for determining the next boost + * priority, since PP is tracked on acquisition, not on + * contention. Check the head of the booster list instead. + */ + synch = list_first_entry(&owner->boosters, struct xnsynch, next); + if (synch->wprio == owner->wprio) + return; + + if (synch->status & XNSYNCH_PP) + __ceil_owner_priority(owner, get_ceiling_value(synch)); + else { + XENO_BUG_ON(COBALT, list_empty(&synch->pendq)); + if (target == NULL) + target = list_first_entry(&synch->pendq, + struct xnthread, plink); + inherit_thread_priority(owner, target); + } +} + +static void ceil_owner_priority(struct xnsynch *synch) +{ + struct xnthread *owner = synch->owner; + int wprio; + + /* PP ceiling values are implicitly based on the RT class. */ + wprio = xnsched_calc_wprio(&xnsched_class_rt, + get_ceiling_value(synch)); + synch->wprio = wprio; + list_add_priff(synch, &owner->boosters, wprio, next); + raise_boost_flag(owner); + synch->status |= XNSYNCH_CEILING; + + /* + * If the ceiling value is lower than the current effective + * priority, we must not adjust the latter. BEWARE: not only + * this restriction is required to keep the PP logic right, + * but this is also a basic assumption made by all + * xnthread_commit_ceiling() callers which won't check for any + * rescheduling opportunity upon return. + * + * However we do want the object to be linked to the booster + * list, and XNBOOST must appear in the current thread status. + * + * This way, setparam() won't be allowed to decrease the + * current weighted priority below the ceiling value, until we + * eventually release this object. + */ + if (wprio > owner->wprio) + adjust_boost(owner, NULL); +} + +static inline +void track_owner(struct xnsynch *synch, struct xnthread *owner) +{ + synch->owner = owner; +} + +static inline /* nklock held, irqs off */ +void set_current_owner_locked(struct xnsynch *synch, struct xnthread *owner) +{ + /* + * Update the owner information, and apply priority protection + * for PP objects. We may only get there if owner is current, + * or blocked. + */ + track_owner(synch, owner); + if (synch->status & XNSYNCH_PP) + ceil_owner_priority(synch); +} + +static inline +void set_current_owner(struct xnsynch *synch, struct xnthread *owner) +{ + spl_t s; + + track_owner(synch, owner); + if (synch->status & XNSYNCH_PP) { + xnlock_get_irqsave(&nklock, s); + ceil_owner_priority(synch); + xnlock_put_irqrestore(&nklock, s); + } +} + +static inline +xnhandle_t get_owner_handle(xnhandle_t ownerh, struct xnsynch *synch) +{ + /* + * On acquisition from kernel space, the fast lock handle + * should bear the FLCEIL bit for PP objects, so that userland + * takes the slow path on release, jumping to the kernel for + * dropping the ceiling priority boost. + */ + if (synch->status & XNSYNCH_PP) + ownerh = xnsynch_fast_ceiling(ownerh); + + return ownerh; +} + +static void commit_ceiling(struct xnsynch *synch, struct xnthread *curr) +{ + xnhandle_t oldh, h; + atomic_t *lockp; + + track_owner(synch, curr); + ceil_owner_priority(synch); + /* + * Raise FLCEIL, which indicates a kernel entry will be + * required for releasing this resource. + */ + lockp = xnsynch_fastlock(synch); + do { + h = atomic_read(lockp); + oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_ceiling(h)); + } while (oldh != h); +} + +void xnsynch_commit_ceiling(struct xnthread *curr) /* nklock held, irqs off */ +{ + struct xnsynch *synch; + atomic_t *lockp; + + /* curr->u_window has to be valid, curr bears XNUSER. */ + synch = lookup_lazy_pp(curr->u_window->pp_pending); + if (synch == NULL) { + /* + * If pp_pending is a bad handle, don't panic but + * rather ignore: we don't want a misbehaving userland + * to crash the kernel. + */ + XENO_WARN_ON_ONCE(USER, 1); + goto out; + } + + /* + * For PP locks, userland does, in that order: + * + * -- LOCK + * 1. curr->u_window->pp_pending = lock_handle + * barrier(); + * 2. atomic_cmpxchg(lockp, XN_NO_HANDLE, curr->handle); + * + * -- UNLOCK + * 1. atomic_cmpxchg(lockp, curr->handle, XN_NO_HANDLE); [unclaimed] + * barrier(); + * 2. curr->u_window->pp_pending = XN_NO_HANDLE + * + * Make sure we have not been caught in a rescheduling in + * between those steps. If we did, then we won't be holding + * the lock as we schedule away, therefore no priority update + * must take place. + */ + lockp = xnsynch_fastlock(synch); + if (xnsynch_fast_owner_check(lockp, curr->handle)) + return; + + /* + * In rare cases, we could be called multiple times for + * committing a lazy ceiling for the same object, e.g. if + * userland is preempted in the middle of a recursive locking + * sequence. + * + * This stems from the fact that userland has to update + * ->pp_pending prior to trying to grab the lock atomically, + * at which point it can figure out whether a recursive + * locking happened. We get out of this trap by testing the + * XNSYNCH_CEILING flag. + */ + if ((synch->status & XNSYNCH_CEILING) == 0) + commit_ceiling(synch, curr); +out: + curr->u_window->pp_pending = XN_NO_HANDLE; +} + +/** + * @fn int xnsynch_try_acquire(struct xnsynch *synch); + * @brief Try acquiring the ownership of a synchronization object. + * + * This service should be called by upper interfaces wanting the + * current thread to acquire the ownership of the given resource. If + * the resource is already assigned to another thread, the call + * returns with an error code. + * + * This service must be used only with synchronization objects that + * track ownership (XNSYNCH_OWNER set. + * + * @param synch The descriptor address of the synchronization object + * to acquire. + * + * @return Zero is returned if @a synch has been successfully + * acquired. Otherwise: + * + * - -EDEADLK is returned if @a synch is currently held by the calling + * thread. + * + * - -EBUSY is returned if @a synch is currently held by another + * thread. + * + * @coretags{primary-only} + */ +int xnsynch_try_acquire(struct xnsynch *synch) +{ + struct xnthread *curr; + atomic_t *lockp; + xnhandle_t h; + + primary_mode_only(); + + XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0); + + curr = xnthread_current(); + lockp = xnsynch_fastlock(synch); + trace_cobalt_synch_try_acquire(synch); + + h = atomic_cmpxchg(lockp, XN_NO_HANDLE, + get_owner_handle(curr->handle, synch)); + if (h != XN_NO_HANDLE) + return xnhandle_get_id(h) == curr->handle ? + -EDEADLK : -EBUSY; + + set_current_owner(synch, curr); + xnthread_get_resource(curr); + + return 0; +} +EXPORT_SYMBOL_GPL(xnsynch_try_acquire); + +/** + * @fn int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode); + * @brief Acquire the ownership of a synchronization object. + * + * This service should be called by upper interfaces wanting the + * current thread to acquire the ownership of the given resource. If + * the resource is already assigned to another thread, the caller is + * suspended. + * + * This service must be used only with synchronization objects that + * track ownership (XNSYNCH_OWNER set. + * + * @param synch The descriptor address of the synchronization object + * to acquire. + * + * @param timeout The timeout which may be used to limit the time the + * thread pends on the resource. This value is a wait time given as a + * count of nanoseconds. It can either be relative, absolute + * monotonic, or absolute adjustable depending on @a + * timeout_mode. Passing XN_INFINITE @b and setting @a mode to + * XN_RELATIVE specifies an unbounded wait. All other values are used + * to initialize a watchdog timer. + * + * @param timeout_mode The mode of the @a timeout parameter. It can + * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also + * xntimer_start()). + * + * @return A bitmask which may include zero or one information bit + * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the + * caller, for detecting respectively: object deletion, timeout or + * signal/unblock conditions which might have happened while waiting. + * + * @coretags{primary-only, might-switch} + * + * @note Unlike xnsynch_try_acquire(), this call does NOT check for + * invalid recursive locking request, which means that such request + * will always cause a deadlock for the caller. + */ +int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, + xntmode_t timeout_mode) +{ + struct xnthread *curr, *owner; + xnhandle_t currh, h, oldh; + atomic_t *lockp; + spl_t s; + + primary_mode_only(); + + XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0); + + curr = xnthread_current(); + currh = curr->handle; + lockp = xnsynch_fastlock(synch); + trace_cobalt_synch_acquire(synch); +redo: + /* Basic form of xnsynch_try_acquire(). */ + h = atomic_cmpxchg(lockp, XN_NO_HANDLE, + get_owner_handle(currh, synch)); + if (likely(h == XN_NO_HANDLE)) { + set_current_owner(synch, curr); + xnthread_get_resource(curr); + return 0; + } + + xnlock_get_irqsave(&nklock, s); + + /* + * Set claimed bit. In case it appears to be set already, + * re-read its state under nklock so that we don't miss any + * change between the lock-less read and here. But also try to + * avoid cmpxchg where possible. Only if it appears not to be + * set, start with cmpxchg directly. + */ + if (xnsynch_fast_is_claimed(h)) { + oldh = atomic_read(lockp); + goto test_no_owner; + } + + do { + oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_claimed(h)); + if (likely(oldh == h)) + break; + test_no_owner: + if (oldh == XN_NO_HANDLE) { + /* Mutex released from another cpu. */ + xnlock_put_irqrestore(&nklock, s); + goto redo; + } + h = oldh; + } while (!xnsynch_fast_is_claimed(h)); + + owner = xnthread_lookup(h); + if (owner == NULL) { + /* + * The handle is broken, therefore pretend that the + * synch object was deleted to signal an error. + */ + xnthread_set_info(curr, XNRMID); + goto out; + } + + /* + * This is the contended path. We just detected an earlier + * syscall-less fast locking from userland, fix up the + * in-kernel state information accordingly. + * + * The consistency of the state information is guaranteed, + * because we just raised the claim bit atomically for this + * contended lock, therefore userland will have to jump to the + * kernel when releasing it, instead of doing a fast + * unlock. Since we currently own the superlock, consistency + * wrt transfer_ownership() is guaranteed through + * serialization. + * + * CAUTION: in this particular case, the only assumptions we + * can safely make is that *owner is valid but not current on + * this CPU. + */ + track_owner(synch, owner); + xnsynch_detect_relaxed_owner(synch, curr); + + if ((synch->status & XNSYNCH_PRIO) == 0) { /* i.e. FIFO */ + list_add_tail(&curr->plink, &synch->pendq); + goto block; + } + + if (curr->wprio > owner->wprio) { + if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { + /* Ownership is still pending, steal the resource. */ + set_current_owner_locked(synch, curr); + xnthread_clear_info(curr, XNRMID | XNTIMEO | XNBREAK); + xnthread_set_info(owner, XNROBBED); + goto grab; + } + + list_add_priff(curr, &synch->pendq, wprio, plink); + + if (synch->status & XNSYNCH_PI) { + raise_boost_flag(owner); + + if (synch->status & XNSYNCH_CLAIMED) + list_del(&synch->next); /* owner->boosters */ + else + synch->status |= XNSYNCH_CLAIMED; + + synch->wprio = curr->wprio; + list_add_priff(synch, &owner->boosters, wprio, next); + /* + * curr->wprio > owner->wprio implies that + * synch must be leading the booster list + * after insertion, so we may call + * inherit_thread_priority() for tracking + * current's priority directly without going + * through adjust_boost(). + */ + inherit_thread_priority(owner, curr); + } + } else + list_add_priff(curr, &synch->pendq, wprio, plink); +block: + xnthread_suspend(curr, XNPEND, timeout, timeout_mode, synch); + curr->wwake = NULL; + xnthread_clear_info(curr, XNWAKEN); + + if (xnthread_test_info(curr, XNRMID | XNTIMEO | XNBREAK)) + goto out; + + if (xnthread_test_info(curr, XNROBBED)) { + /* + * Somebody stole us the ownership while we were ready + * to run, waiting for the CPU: we need to wait again + * for the resource. + */ + if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) { + xnlock_put_irqrestore(&nklock, s); + goto redo; + } + timeout = xntimer_get_timeout_stopped(&curr->rtimer); + if (timeout > 1) { /* Otherwise, it's too late. */ + xnlock_put_irqrestore(&nklock, s); + goto redo; + } + xnthread_set_info(curr, XNTIMEO); + goto out; + } +grab: + xnthread_get_resource(curr); + + if (xnsynch_pended_p(synch)) + currh = xnsynch_fast_claimed(currh); + + /* Set new ownership for this object. */ + atomic_set(lockp, get_owner_handle(currh, synch)); +out: + xnlock_put_irqrestore(&nklock, s); + + return xnthread_test_info(curr, XNRMID|XNTIMEO|XNBREAK); +} +EXPORT_SYMBOL_GPL(xnsynch_acquire); + +static void drop_booster(struct xnsynch *synch, struct xnthread *owner) +{ + list_del(&synch->next); /* owner->boosters */ + + if (list_empty(&owner->boosters)) { + xnthread_clear_state(owner, XNBOOST); + inherit_thread_priority(owner, owner); + } else + adjust_boost(owner, NULL); +} + +static inline void clear_pi_boost(struct xnsynch *synch, + struct xnthread *owner) +{ /* nklock held, irqs off */ + synch->status &= ~XNSYNCH_CLAIMED; + drop_booster(synch, owner); +} + +static inline void clear_pp_boost(struct xnsynch *synch, + struct xnthread *owner) +{ /* nklock held, irqs off */ + synch->status &= ~XNSYNCH_CEILING; + drop_booster(synch, owner); +} + +static bool transfer_ownership(struct xnsynch *synch, + struct xnthread *lastowner) +{ /* nklock held, irqs off */ + struct xnthread *nextowner; + xnhandle_t nextownerh; + atomic_t *lockp; + + lockp = xnsynch_fastlock(synch); + + /* + * Our caller checked for contention locklessly, so we do have + * to check again under lock in a different way. + */ + if (list_empty(&synch->pendq)) { + synch->owner = NULL; + atomic_set(lockp, XN_NO_HANDLE); + return false; + } + + nextowner = list_first_entry(&synch->pendq, struct xnthread, plink); + list_del(&nextowner->plink); + nextowner->wchan = NULL; + nextowner->wwake = synch; + set_current_owner_locked(synch, nextowner); + xnthread_set_info(nextowner, XNWAKEN); + xnthread_resume(nextowner, XNPEND); + + if (synch->status & XNSYNCH_CLAIMED) + clear_pi_boost(synch, lastowner); + + nextownerh = get_owner_handle(nextowner->handle, synch); + if (xnsynch_pended_p(synch)) + nextownerh = xnsynch_fast_claimed(nextownerh); + + atomic_set(lockp, nextownerh); + + return true; +} + +/** + * @fn bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr) + * @brief Release a resource and pass it to the next waiting thread. + * + * This service releases the ownership of the given synchronization + * object. The thread which is currently leading the object's pending + * list, if any, is unblocked from its pending state. However, no + * reschedule is performed. + * + * This service must be used only with synchronization objects that + * track ownership (XNSYNCH_OWNER set). + * + * @param synch The descriptor address of the synchronization object + * whose ownership is changed. + * + * @param curr The descriptor address of the current thread, which + * must own the object at the time of calling. + * + * @return True if a reschedule is required. + * + * @sideeffect + * + * - The effective priority of the previous resource owner might be + * lowered to its base priority value as a consequence of the priority + * boost being cleared. + * + * - The synchronization object ownership is transfered to the + * unblocked thread. + * + * @coretags{primary-only, might-switch} + */ +bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr) +{ + bool need_resched = false; + xnhandle_t currh, h; + atomic_t *lockp; + spl_t s; + + XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0); + + trace_cobalt_synch_release(synch); + + if (xnthread_put_resource(curr)) + return false; + + lockp = xnsynch_fastlock(synch); + currh = curr->handle; + /* + * FLCEIL may only be raised by the owner, or when the owner + * is blocked waiting for the synch (ownership transfer). In + * addition, only the current owner of a synch may release it, + * therefore we can't race while testing FLCEIL locklessly. + * All updates to FLCLAIM are covered by the superlock. + * + * Therefore, clearing the fastlock racelessly in this routine + * without leaking FLCEIL/FLCLAIM updates can be achieved by + * holding the superlock. + */ + xnlock_get_irqsave(&nklock, s); + + if (synch->status & XNSYNCH_CEILING) { + clear_pp_boost(synch, curr); + need_resched = true; + } + + h = atomic_cmpxchg(lockp, currh, XN_NO_HANDLE); + if ((h & ~XNSYNCH_FLCEIL) != currh) + /* FLCLAIM set, synch is contended. */ + need_resched = transfer_ownership(synch, curr); + else if (h != currh) /* FLCEIL set, FLCLAIM clear. */ + atomic_set(lockp, XN_NO_HANDLE); + + xnlock_put_irqrestore(&nklock, s); + + return need_resched; +} +EXPORT_SYMBOL_GPL(xnsynch_release); + +void xnsynch_requeue_sleeper(struct xnthread *thread) +{ /* nklock held, irqs off */ + struct xnsynch *synch = thread->wchan; + struct xnthread *owner; + + XENO_BUG_ON(COBALT, !(synch->status & XNSYNCH_PRIO)); + + /* + * Update the position in the pend queue of a thread waiting + * for a lock. This routine propagates the change throughout + * the PI chain if required. + */ + list_del(&thread->plink); + list_add_priff(thread, &synch->pendq, wprio, plink); + owner = synch->owner; + + /* Only PI-enabled objects are of interest here. */ + if ((synch->status & XNSYNCH_PI) == 0) + return; + + synch->wprio = thread->wprio; + if (synch->status & XNSYNCH_CLAIMED) + list_del(&synch->next); + else { + synch->status |= XNSYNCH_CLAIMED; + raise_boost_flag(owner); + } + + list_add_priff(synch, &owner->boosters, wprio, next); + adjust_boost(owner, thread); +} +EXPORT_SYMBOL_GPL(xnsynch_requeue_sleeper); + +/** + * @fn struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch); + * @brief Access the thread leading a synch object wait queue. + * + * This services returns the descriptor address of to the thread leading a + * synchronization object wait queue. + * + * @param synch The descriptor address of the target synchronization object. + * + * @return The descriptor address of the unblocked thread. + * + * @coretags{unrestricted} + */ +struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch) +{ + struct xnthread *thread = NULL; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (!list_empty(&synch->pendq)) + thread = list_first_entry(&synch->pendq, + struct xnthread, plink); + + xnlock_put_irqrestore(&nklock, s); + + return thread; +} +EXPORT_SYMBOL_GPL(xnsynch_peek_pendq); + +/** + * @fn int xnsynch_flush(struct xnsynch *synch, int reason); + * @brief Unblock all waiters pending on a resource. + * + * This service atomically releases all threads which currently sleep + * on a given resource. This service should be called by upper + * interfaces under circumstances requiring that the pending queue of + * a given resource is cleared, such as before the resource is + * deleted. + * + * @param synch The descriptor address of the synchronization object + * to be flushed. + * + * @param reason Some flags to set in the information mask of every + * unblocked thread. Zero is an acceptable value. The following bits + * are pre-defined by Cobalt: + * + * - XNRMID should be set to indicate that the synchronization object + * is about to be destroyed (see xnthread_resume()). + * + * - XNBREAK should be set to indicate that the wait has been forcibly + * interrupted (see xnthread_unblock()). + * + * @return XNSYNCH_RESCHED is returned if at least one thread is + * unblocked, which means the caller should invoke xnsched_run() for + * applying the new scheduling state. Otherwise, XNSYNCH_DONE is + * returned. + * + * @sideeffect + * + * - The effective priority of the current resource owner might be + * lowered to its base priority value as a consequence of the priority + * inheritance boost being cleared. + * + * @coretags{unrestricted} + */ +int xnsynch_flush(struct xnsynch *synch, int reason) +{ + struct xnthread *sleeper, *tmp; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + trace_cobalt_synch_flush(synch); + + if (list_empty(&synch->pendq)) { + XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED); + ret = XNSYNCH_DONE; + } else { + ret = XNSYNCH_RESCHED; + list_for_each_entry_safe(sleeper, tmp, &synch->pendq, plink) { + list_del(&sleeper->plink); + xnthread_set_info(sleeper, reason); + sleeper->wchan = NULL; + xnthread_resume(sleeper, XNPEND); + } + if (synch->status & XNSYNCH_CLAIMED) + clear_pi_boost(synch, synch->owner); + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnsynch_flush); + +void xnsynch_forget_sleeper(struct xnthread *thread) +{ /* nklock held, irqs off */ + struct xnsynch *synch = thread->wchan; + struct xnthread *owner, *target; + + /* + * Do all the necessary housekeeping chores to stop a thread + * from waiting on a given synchronization object. Doing so + * may require to update a PI chain. + */ + trace_cobalt_synch_forget(synch); + + xnthread_clear_state(thread, XNPEND); + thread->wchan = NULL; + list_del(&thread->plink); /* synch->pendq */ + + /* + * Only a sleeper leaving a PI chain triggers an update. + * NOTE: PP objects never bear the CLAIMED bit. + */ + if ((synch->status & XNSYNCH_CLAIMED) == 0) + return; + + owner = synch->owner; + + if (list_empty(&synch->pendq)) { + /* No more sleepers: clear the PI boost. */ + clear_pi_boost(synch, owner); + return; + } + + /* + * Reorder the booster queue of the current owner after we + * left the wait list, then set its priority to the new + * required minimum required to prevent priority inversion. + */ + target = list_first_entry(&synch->pendq, struct xnthread, plink); + synch->wprio = target->wprio; + list_del(&synch->next); /* owner->boosters */ + list_add_priff(synch, &owner->boosters, wprio, next); + adjust_boost(owner, target); +} +EXPORT_SYMBOL_GPL(xnsynch_forget_sleeper); + +#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED + +/* + * Detect when a thread is about to sleep on a synchronization + * object currently owned by someone running in secondary mode. + */ +void xnsynch_detect_relaxed_owner(struct xnsynch *synch, + struct xnthread *sleeper) +{ + if (xnthread_test_state(sleeper, XNWARN) && + !xnthread_test_info(sleeper, XNPIALERT) && + xnthread_test_state(synch->owner, XNRELAX)) { + xnthread_set_info(sleeper, XNPIALERT); + __xnthread_signal(sleeper, SIGDEBUG, + SIGDEBUG_MIGRATE_PRIOINV); + } else + xnthread_clear_info(sleeper, XNPIALERT); +} + +/* + * Detect when a thread is about to relax while holding booster(s) + * (claimed PI or active PP object), which denotes a potential for + * priority inversion. In such an event, any sleeper bearing the + * XNWARN bit will receive a SIGDEBUG notification. + */ +void xnsynch_detect_boosted_relax(struct xnthread *owner) +{ + struct xnthread *sleeper; + struct xnsynch *synch; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + xnthread_for_each_booster(synch, owner) { + xnsynch_for_each_sleeper(sleeper, synch) { + if (xnthread_test_state(sleeper, XNWARN)) { + xnthread_set_info(sleeper, XNPIALERT); + __xnthread_signal(sleeper, SIGDEBUG, + SIGDEBUG_MIGRATE_PRIOINV); + } + } + } + + xnlock_put_irqrestore(&nklock, s); +} + +#endif /* CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */ + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c new file mode 100644 index 0000000..ff12f28 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c @@ -0,0 +1,2531 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2006-2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * Copyright (C) 2001-2013 The Xenomai project <http://www.xenomai.org> + * + * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org> + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/signal.h> +#include <linux/pid.h> +#include <linux/sched.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/synch.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/registry.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/stat.h> +#include <cobalt/kernel/trace.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/select.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/thread.h> +#include <pipeline/kevents.h> +#include <pipeline/inband_work.h> +#include <pipeline/sched.h> +#include <trace/events/cobalt-core.h> +#include "debug.h" + +static DECLARE_WAIT_QUEUE_HEAD(join_all); + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_thread Thread services + * @{ + */ + +static void timeout_handler(struct xntimer *timer) +{ + struct xnthread *thread = container_of(timer, struct xnthread, rtimer); + + xnthread_set_info(thread, XNTIMEO); /* Interrupts are off. */ + xnthread_resume(thread, XNDELAY); +} + +static void periodic_handler(struct xntimer *timer) +{ + struct xnthread *thread = container_of(timer, struct xnthread, ptimer); + /* + * Prevent unwanted round-robin, and do not wake up threads + * blocked on a resource. + */ + if (xnthread_test_state(thread, XNDELAY|XNPEND) == XNDELAY) + xnthread_resume(thread, XNDELAY); + + /* + * The periodic thread might have migrated to another CPU + * while passive, fix the timer affinity if need be. + */ + xntimer_set_affinity(&thread->ptimer, thread->sched); +} + +static inline void enlist_new_thread(struct xnthread *thread) +{ /* nklock held, irqs off */ + list_add_tail(&thread->glink, &nkthreadq); + cobalt_nrthreads++; + xnvfile_touch_tag(&nkthreadlist_tag); +} + +struct kthread_arg { + struct pipeline_inband_work inband_work; /* Must be first. */ + struct xnthread *thread; + struct completion *done; +}; + +static void do_parent_wakeup(struct pipeline_inband_work *inband_work) +{ + struct kthread_arg *ka; + + ka = container_of(inband_work, struct kthread_arg, inband_work); + complete(ka->done); +} + +static inline void init_kthread_info(struct xnthread *thread) +{ + struct cobalt_threadinfo *p; + + p = pipeline_current(); + p->thread = thread; + p->process = NULL; +} + +static int map_kthread(struct xnthread *thread, struct kthread_arg *ka) +{ + int ret; + spl_t s; + + if (xnthread_test_state(thread, XNUSER)) + return -EINVAL; + + if (xnthread_current() || xnthread_test_state(thread, XNMAPPED)) + return -EBUSY; + + thread->u_window = NULL; + xnthread_pin_initial(thread); + + pipeline_init_shadow_tcb(thread); + xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL); + init_kthread_info(thread); + xnthread_set_state(thread, XNMAPPED); + xndebug_shadow_init(thread); + xnthread_run_handler(thread, map_thread); + pipeline_enable_kevents(); + + /* + * CAUTION: Soon after xnthread_init() has returned, + * xnthread_start() is commonly invoked from the root domain, + * therefore the call site may expect the started kernel + * shadow to preempt immediately. As a result of such + * assumption, start attributes (struct xnthread_start_attr) + * are often laid on the caller's stack. + * + * For this reason, we raise the completion signal to wake up + * the xnthread_init() caller only once the emerging thread is + * hardened, and __never__ before that point. Since we run + * over the Xenomai domain upon return from xnthread_harden(), + * we schedule a virtual interrupt handler in the root domain + * to signal the completion object. + */ + xnthread_resume(thread, XNDORMANT); + ret = xnthread_harden(); + + trace_cobalt_lostage_request("wakeup", current); + + ka->inband_work = (struct pipeline_inband_work) + PIPELINE_INBAND_WORK_INITIALIZER(*ka, do_parent_wakeup); + pipeline_post_inband_work(ka); + + xnlock_get_irqsave(&nklock, s); + + enlist_new_thread(thread); + /* + * Make sure xnthread_start() did not slip in from another CPU + * while we were back from wakeup_parent(). + */ + if (thread->entry == NULL) + xnthread_suspend(thread, XNDORMANT, + XN_INFINITE, XN_RELATIVE, NULL); + + xnlock_put_irqrestore(&nklock, s); + + xnthread_test_cancel(); + + xntrace_pid(xnthread_host_pid(thread), + xnthread_current_priority(thread)); + + return ret; +} + +static int kthread_trampoline(void *arg) +{ + struct kthread_arg *ka = arg; + struct xnthread *thread = ka->thread; + struct sched_param param; + int ret, policy, prio; + + /* + * It only makes sense to create Xenomai kthreads with the + * SCHED_FIFO, SCHED_NORMAL or SCHED_WEAK policies. So + * anything that is not from Xenomai's RT class is assumed to + * belong to SCHED_NORMAL linux-wise. + */ + if (thread->sched_class != &xnsched_class_rt) { + policy = SCHED_NORMAL; + prio = 0; + } else { + policy = SCHED_FIFO; + prio = normalize_priority(thread->cprio); + } + + param.sched_priority = prio; + sched_setscheduler(current, policy, ¶m); + + ret = map_kthread(thread, ka); + if (ret) { + printk(XENO_WARNING "failed to create kernel shadow %s\n", + thread->name); + return ret; + } + + trace_cobalt_shadow_entry(thread); + + thread->entry(thread->cookie); + + xnthread_cancel(thread); + + return 0; +} + +static inline int spawn_kthread(struct xnthread *thread) +{ + DECLARE_COMPLETION_ONSTACK(done); + struct kthread_arg ka = { + .thread = thread, + .done = &done + }; + struct task_struct *p; + + p = kthread_run(kthread_trampoline, &ka, "%s", thread->name); + if (IS_ERR(p)) + return PTR_ERR(p); + + wait_for_completion(&done); + + return 0; +} + +int __xnthread_init(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched *sched, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param) +{ + int flags = attr->flags, ret, gravity; + + flags &= ~(XNSUSP|XNBOOST); +#ifndef CONFIG_XENO_ARCH_FPU + flags &= ~XNFPU; +#endif + if ((flags & XNROOT) == 0) + flags |= XNDORMANT; + + if (attr->name) + ksformat(thread->name, + sizeof(thread->name), "%s", attr->name); + else + ksformat(thread->name, + sizeof(thread->name), "@%p", thread); + + /* + * We mirror the global user debug state into the per-thread + * state, to speed up branch taking in lib/cobalt wherever + * this needs to be tested. + */ + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) + flags |= XNDEBUG; + + thread->personality = attr->personality; + cpumask_and(&thread->affinity, &attr->affinity, &cobalt_cpu_affinity); + thread->sched = sched; + thread->state = flags; + thread->info = 0; + thread->local_info = 0; + thread->wprio = XNSCHED_IDLE_PRIO; + thread->cprio = XNSCHED_IDLE_PRIO; + thread->bprio = XNSCHED_IDLE_PRIO; + thread->lock_count = 0; + thread->rrperiod = XN_INFINITE; + thread->wchan = NULL; + thread->wwake = NULL; + thread->wcontext = NULL; + thread->res_count = 0; + thread->handle = XN_NO_HANDLE; + memset(&thread->stat, 0, sizeof(thread->stat)); + thread->selector = NULL; + INIT_LIST_HEAD(&thread->glink); + INIT_LIST_HEAD(&thread->boosters); + /* These will be filled by xnthread_start() */ + thread->entry = NULL; + thread->cookie = NULL; + init_completion(&thread->exited); + memset(xnthread_archtcb(thread), 0, sizeof(struct xnarchtcb)); + memset(thread->sigarray, 0, sizeof(thread->sigarray)); + + gravity = flags & XNUSER ? XNTIMER_UGRAVITY : XNTIMER_KGRAVITY; + xntimer_init(&thread->rtimer, &nkclock, timeout_handler, + sched, gravity); + xntimer_set_name(&thread->rtimer, thread->name); + xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO); + xntimer_init(&thread->ptimer, &nkclock, periodic_handler, + sched, gravity); + xntimer_set_name(&thread->ptimer, thread->name); + xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO); + + thread->base_class = NULL; /* xnsched_set_policy() will set it. */ + ret = xnsched_init_thread(thread); + if (ret) + goto err_out; + + ret = xnsched_set_policy(thread, sched_class, sched_param); + if (ret) + goto err_out; + + if ((flags & (XNUSER|XNROOT)) == 0) { + ret = spawn_kthread(thread); + if (ret) + goto err_out; + } + + return 0; + +err_out: + xntimer_destroy(&thread->rtimer); + xntimer_destroy(&thread->ptimer); + + return ret; +} + +void xnthread_deregister(struct xnthread *thread) +{ + if (thread->handle != XN_NO_HANDLE) + xnregistry_remove(thread->handle); + + thread->handle = XN_NO_HANDLE; +} + +char *xnthread_format_status(unsigned long status, char *buf, int size) +{ + static const char labels[] = XNTHREAD_STATE_LABELS; + int pos, c, mask; + char *wp; + + for (mask = (int)status, pos = 0, wp = buf; + mask != 0 && wp - buf < size - 2; /* 1-letter label + \0 */ + mask >>= 1, pos++) { + if ((mask & 1) == 0) + continue; + + c = labels[pos]; + + switch (1 << pos) { + case XNROOT: + c = 'R'; /* Always mark root as runnable. */ + break; + case XNREADY: + if (status & XNROOT) + continue; /* Already reported on XNROOT. */ + break; + case XNDELAY: + /* + * Only report genuine delays here, not timed + * waits for resources. + */ + if (status & XNPEND) + continue; + break; + case XNPEND: + /* Report timed waits with lowercase symbol. */ + if (status & XNDELAY) + c |= 0x20; + break; + default: + if (c == '.') + continue; + } + *wp++ = c; + } + + *wp = '\0'; + + return buf; +} + +pid_t xnthread_host_pid(struct xnthread *thread) +{ + if (xnthread_test_state(thread, XNROOT)) + return 0; + if (!xnthread_host_task(thread)) + return -1; + + return task_pid_nr(xnthread_host_task(thread)); +} + +int xnthread_set_clock(struct xnthread *thread, struct xnclock *newclock) +{ + spl_t s; + + if (thread == NULL) { + thread = xnthread_current(); + if (thread == NULL) + return -EPERM; + } + + /* Change the clock the thread's periodic timer is paced by. */ + xnlock_get_irqsave(&nklock, s); + xntimer_set_clock(&thread->ptimer, newclock); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnthread_set_clock); + +xnticks_t xnthread_get_timeout(struct xnthread *thread, xnticks_t ns) +{ + struct xntimer *timer; + xnticks_t timeout; + + if (!xnthread_test_state(thread,XNDELAY)) + return 0LL; + + if (xntimer_running_p(&thread->rtimer)) + timer = &thread->rtimer; + else if (xntimer_running_p(&thread->ptimer)) + timer = &thread->ptimer; + else + return 0LL; + + timeout = xntimer_get_date(timer); + if (timeout <= ns) + return 1; + + return timeout - ns; +} +EXPORT_SYMBOL_GPL(xnthread_get_timeout); + +xnticks_t xnthread_get_period(struct xnthread *thread) +{ + xnticks_t period = 0; + /* + * The current thread period might be: + * - the value of the timer interval for periodic threads (ns/ticks) + * - or, the value of the alloted round-robin quantum (ticks) + * - or zero, meaning "no periodic activity". + */ + if (xntimer_running_p(&thread->ptimer)) + period = xntimer_interval(&thread->ptimer); + else if (xnthread_test_state(thread,XNRRB)) + period = thread->rrperiod; + + return period; +} +EXPORT_SYMBOL_GPL(xnthread_get_period); + +void xnthread_prepare_wait(struct xnthread_wait_context *wc) +{ + struct xnthread *curr = xnthread_current(); + + wc->posted = 0; + curr->wcontext = wc; +} +EXPORT_SYMBOL_GPL(xnthread_prepare_wait); + +static inline void release_all_ownerships(struct xnthread *curr) +{ + struct xnsynch *synch, *tmp; + + /* + * Release all the ownerships obtained by a thread on + * synchronization objects. This routine must be entered + * interrupts off. + */ + xnthread_for_each_booster_safe(synch, tmp, curr) { + xnsynch_release(synch, curr); + if (synch->cleanup) + synch->cleanup(synch); + } +} + +static inline void cleanup_tcb(struct xnthread *curr) /* nklock held, irqs off */ +{ + list_del(&curr->glink); + cobalt_nrthreads--; + xnvfile_touch_tag(&nkthreadlist_tag); + + if (xnthread_test_state(curr, XNREADY)) { + XENO_BUG_ON(COBALT, xnthread_test_state(curr, XNTHREAD_BLOCK_BITS)); + xnsched_dequeue(curr); + xnthread_clear_state(curr, XNREADY); + } + + if (xnthread_test_state(curr, XNPEND)) + xnsynch_forget_sleeper(curr); + + xnthread_set_state(curr, XNZOMBIE); + /* + * NOTE: we must be running over the root thread, or @curr + * is dormant, which means that we don't risk sched->curr to + * disappear due to voluntary rescheduling while holding the + * nklock, despite @curr bears the zombie bit. + */ + release_all_ownerships(curr); + + pipeline_finalize_thread(curr); + xnsched_forget(curr); + xnthread_deregister(curr); +} + +void __xnthread_cleanup(struct xnthread *curr) +{ + spl_t s; + + secondary_mode_only(); + + xntimer_destroy(&curr->rtimer); + xntimer_destroy(&curr->ptimer); + + if (curr->selector) { + xnselector_destroy(curr->selector); + curr->selector = NULL; + } + + xnlock_get_irqsave(&nklock, s); + cleanup_tcb(curr); + xnlock_put_irqrestore(&nklock, s); + + /* Wake up the joiner if any (we can't have more than one). */ + complete(&curr->exited); + + /* Notify our exit to xnthread_killall() if need be. */ + if (waitqueue_active(&join_all)) + wake_up(&join_all); + + /* Finalize last since this incurs releasing the TCB. */ + xnthread_run_handler_stack(curr, finalize_thread); +} + +/* + * Unwinds xnthread_init() ops for an unmapped thread. Since the + * latter must be dormant, it can't be part of any runqueue. + */ +void __xnthread_discard(struct xnthread *thread) +{ + spl_t s; + + secondary_mode_only(); + + xntimer_destroy(&thread->rtimer); + xntimer_destroy(&thread->ptimer); + + xnlock_get_irqsave(&nklock, s); + if (!list_empty(&thread->glink)) { + list_del(&thread->glink); + cobalt_nrthreads--; + xnvfile_touch_tag(&nkthreadlist_tag); + } + xnthread_deregister(thread); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(__xnthread_discard); + +/** + * @fn void xnthread_init(struct xnthread *thread,const struct xnthread_init_attr *attr,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param) + * @brief Initialize a new thread. + * + * Initializes a new thread. The thread is left dormant until it is + * actually started by xnthread_start(). + * + * @param thread The address of a thread descriptor Cobalt will use to + * store the thread-specific data. This descriptor must always be + * valid while the thread is active therefore it must be allocated in + * permanent memory. @warning Some architectures may require the + * descriptor to be properly aligned in memory; this is an additional + * reason for descriptors not to be laid in the program stack where + * alignement constraints might not always be satisfied. + * + * @param attr A pointer to an attribute block describing the initial + * properties of the new thread. Members of this structure are defined + * as follows: + * + * - name: An ASCII string standing for the symbolic name of the + * thread. This name is copied to a safe place into the thread + * descriptor. This name might be used in various situations by Cobalt + * for issuing human-readable diagnostic messages, so it is usually a + * good idea to provide a sensible value here. NULL is fine though + * and means "anonymous". + * + * - flags: A set of creation flags affecting the operation. The + * following flags can be part of this bitmask: + * + * - XNSUSP creates the thread in a suspended state. In such a case, + * the thread shall be explicitly resumed using the xnthread_resume() + * service for its execution to actually begin, additionally to + * issuing xnthread_start() for it. This flag can also be specified + * when invoking xnthread_start() as a starting mode. + * + * - XNUSER shall be set if @a thread will be mapped over an existing + * user-space task. Otherwise, a new kernel host task is created, then + * paired with the new Xenomai thread. + * + * - XNFPU (enable FPU) tells Cobalt that the new thread may use the + * floating-point unit. XNFPU is implicitly assumed for user-space + * threads even if not set in @a flags. + * + * - affinity: The processor affinity of this thread. Passing + * CPU_MASK_ALL means "any cpu" from the allowed core affinity mask + * (cobalt_cpu_affinity). Passing an empty set is invalid. + * + * @param sched_class The initial scheduling class the new thread + * should be assigned to. + * + * @param sched_param The initial scheduling parameters to set for the + * new thread; @a sched_param must be valid within the context of @a + * sched_class. + * + * @return 0 is returned on success. Otherwise, the following error + * code indicates the cause of the failure: + * + * - -EINVAL is returned if @a attr->flags has invalid bits set, or @a + * attr->affinity is invalid (e.g. empty). + * + * @coretags{secondary-only} + */ +int xnthread_init(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param) +{ + struct xnsched *sched; + cpumask_t affinity; + int ret; + + if (attr->flags & ~(XNFPU | XNUSER | XNSUSP)) + return -EINVAL; + + /* + * Pick an initial CPU for the new thread which is part of its + * affinity mask, and therefore also part of the supported + * CPUs. This CPU may change in pin_to_initial_cpu(). + */ + cpumask_and(&affinity, &attr->affinity, &cobalt_cpu_affinity); + if (cpumask_empty(&affinity)) + return -EINVAL; + + sched = xnsched_struct(cpumask_first(&affinity)); + + ret = __xnthread_init(thread, attr, sched, sched_class, sched_param); + if (ret) + return ret; + + trace_cobalt_thread_init(thread, attr, sched_class); + + return 0; +} +EXPORT_SYMBOL_GPL(xnthread_init); + +/** + * @fn int xnthread_start(struct xnthread *thread,const struct xnthread_start_attr *attr) + * @brief Start a newly created thread. + * + * Starts a (newly) created thread, scheduling it for the first + * time. This call releases the target thread from the XNDORMANT + * state. This service also sets the initial mode for the new thread. + * + * @param thread The descriptor address of the started thread which + * must have been previously initialized by a call to xnthread_init(). + * + * @param attr A pointer to an attribute block describing the + * execution properties of the new thread. Members of this structure + * are defined as follows: + * + * - mode: The initial thread mode. The following flags can be part of + * this bitmask: + * + * - XNLOCK causes the thread to lock the scheduler when it starts. + * The target thread will have to call the xnsched_unlock() + * service to unlock the scheduler. A non-preemptible thread may still + * block, in which case, the lock is reasserted when the thread is + * scheduled back in. + * + * - XNSUSP makes the thread start in a suspended state. In such a + * case, the thread will have to be explicitly resumed using the + * xnthread_resume() service for its execution to actually begin. + * + * - entry: The address of the thread's body routine. In other words, + * it is the thread entry point. + * + * - cookie: A user-defined opaque cookie Cobalt will pass to the + * emerging thread as the sole argument of its entry point. + * + * @retval 0 if @a thread could be started ; + * + * @retval -EBUSY if @a thread was not dormant or stopped ; + * + * @coretags{task-unrestricted, might-switch} + */ +int xnthread_start(struct xnthread *thread, + const struct xnthread_start_attr *attr) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (!xnthread_test_state(thread, XNDORMANT)) { + xnlock_put_irqrestore(&nklock, s); + return -EBUSY; + } + + xnthread_set_state(thread, attr->mode & (XNTHREAD_MODE_BITS | XNSUSP)); + thread->entry = attr->entry; + thread->cookie = attr->cookie; + if (attr->mode & XNLOCK) + thread->lock_count = 1; + + /* + * A user-space thread starts immediately Cobalt-wise since we + * already have an underlying Linux context for it, so we can + * enlist it now to make it visible from the /proc interface. + */ + if (xnthread_test_state(thread, XNUSER)) + enlist_new_thread(thread); + + trace_cobalt_thread_start(thread); + + xnthread_resume(thread, XNDORMANT); + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnthread_start); + +/** + * @fn void xnthread_set_mode(int clrmask,int setmask) + * @brief Change control mode of the current thread. + * + * Change the control mode of the current thread. The control mode + * affects several behaviours of the Cobalt core regarding this + * thread. + * + * @param clrmask Clears the corresponding bits from the control mode + * before setmask is applied. The scheduler lock held by the current + * thread can be forcibly released by passing the XNLOCK bit in this + * mask. In this case, the lock nesting count is also reset to zero. + * + * @param setmask The new thread mode. The following flags may be set + * in this bitmask: + * + * - XNLOCK makes the current thread non-preemptible by other threads. + * Unless XNTRAPLB is also set for the thread, the latter may still + * block, dropping the lock temporarily, in which case, the lock will + * be reacquired automatically when the thread resumes execution. + * + * - XNWARN enables debugging notifications for the current thread. A + * SIGDEBUG (Linux-originated) signal is sent when the following + * atypical or abnormal behavior is detected: + * + * - the current thread switches to secondary mode. Such notification + * comes in handy for detecting spurious relaxes. + * + * - CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED is enabled in the kernel + * configuration, and the current thread is sleeping on a Cobalt + * mutex currently owned by a thread running in secondary mode, + * which reveals a priority inversion. + * + * - the current thread is about to sleep while holding a Cobalt + * mutex, and CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP is enabled in the + * kernel configuration. Blocking for acquiring a mutex does not + * trigger such a signal though. + * + * - the current thread has both XNTRAPLB and XNLOCK set, and + * attempts to block on a Cobalt service, which would cause a + * lock break. + * + * - XNTRAPLB disallows breaking the scheduler lock. In the default + * case, a thread which holds the scheduler lock is allowed to drop it + * temporarily for sleeping. If this mode bit is set, such thread + * would return immediately with XNBREAK set from + * xnthread_suspend(). If XNWARN is set for the current thread, + * SIGDEBUG is sent in addition to raising the break condition. + * + * @coretags{primary-only, might-switch} + * + * @note Setting @a clrmask and @a setmask to zero leads to a nop, + * in which case xnthread_set_mode() returns the current mode. + */ +int xnthread_set_mode(int clrmask, int setmask) +{ + int oldmode, lock_count; + struct xnthread *curr; + spl_t s; + + primary_mode_only(); + + xnlock_get_irqsave(&nklock, s); + curr = xnsched_current_thread(); + oldmode = xnthread_get_state(curr) & XNTHREAD_MODE_BITS; + lock_count = curr->lock_count; + xnthread_clear_state(curr, clrmask & XNTHREAD_MODE_BITS); + xnthread_set_state(curr, setmask & XNTHREAD_MODE_BITS); + trace_cobalt_thread_set_mode(curr); + + if (setmask & XNLOCK) { + if (lock_count == 0) + xnsched_lock(); + } else if (clrmask & XNLOCK) { + if (lock_count > 0) { + curr->lock_count = 0; + xnthread_clear_localinfo(curr, XNLBALERT); + xnsched_run(); + } + } + + xnlock_put_irqrestore(&nklock, s); + + if (lock_count > 0) + oldmode |= XNLOCK; + + return oldmode; +} +EXPORT_SYMBOL_GPL(xnthread_set_mode); + +/** + * @fn void xnthread_suspend(struct xnthread *thread, int mask,xnticks_t timeout, xntmode_t timeout_mode,struct xnsynch *wchan) + * @brief Suspend a thread. + * + * Suspends the execution of a thread according to a given suspensive + * condition. This thread will not be eligible for scheduling until it + * all the pending suspensive conditions set by this service are + * removed by one or more calls to xnthread_resume(). + * + * @param thread The descriptor address of the suspended thread. + * + * @param mask The suspension mask specifying the suspensive condition + * to add to the thread's wait mask. Possible values usable by the + * caller are: + * + * - XNSUSP. This flag forcibly suspends a thread, regardless of any + * resource to wait for. A reverse call to xnthread_resume() + * specifying the XNSUSP bit must be issued to remove this condition, + * which is cumulative with other suspension bits.@a wchan should be + * NULL when using this suspending mode. + * + * - XNDELAY. This flags denotes a counted delay wait (in ticks) which + * duration is defined by the value of the timeout parameter. + * + * - XNPEND. This flag denotes a wait for a synchronization object to + * be signaled. The wchan argument must points to this object. A + * timeout value can be passed to bound the wait. This suspending mode + * should not be used directly by the client interface, but rather + * through the xnsynch_sleep_on() call. + * + * @param timeout The timeout which may be used to limit the time the + * thread pends on a resource. This value is a wait time given in + * nanoseconds. It can either be relative, absolute monotonic, or + * absolute adjustable depending on @a timeout_mode. + * + * Passing XN_INFINITE @b and setting @a timeout_mode to XN_RELATIVE + * specifies an unbounded wait. All other values are used to + * initialize a watchdog timer. If the current operation mode of the + * system timer is oneshot and @a timeout elapses before + * xnthread_suspend() has completed, then the target thread will not + * be suspended, and this routine leads to a null effect. + * + * @param timeout_mode The mode of the @a timeout parameter. It can + * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also + * xntimer_start()). + * + * @param wchan The address of a pended resource. This parameter is + * used internally by the synchronization object implementation code + * to specify on which object the suspended thread pends. NULL is a + * legitimate value when this parameter does not apply to the current + * suspending mode (e.g. XNSUSP). + * + * @note If the target thread has received a Linux-originated signal, + * then this service immediately exits without suspending the thread, + * but raises the XNBREAK condition in its information mask. + * + * @coretags{unrestricted, might-switch} + */ +void xnthread_suspend(struct xnthread *thread, int mask, + xnticks_t timeout, xntmode_t timeout_mode, + struct xnsynch *wchan) +{ + unsigned long oldstate; + struct xnsched *sched; + spl_t s; + + /* No, you certainly do not want to suspend the root thread. */ + XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT)); + /* No built-in support for conjunctive wait. */ + XENO_BUG_ON(COBALT, wchan && thread->wchan); + + xnlock_get_irqsave(&nklock, s); + + trace_cobalt_thread_suspend(thread, mask, timeout, timeout_mode, wchan); + + sched = thread->sched; + oldstate = thread->state; + + /* + * If attempting to suspend a runnable thread which is pending + * a forced switch to secondary mode (XNKICKED), just raise + * the XNBREAK status and return immediately, except if we + * are precisely doing such switch by applying XNRELAX. + * + * In the latter case, we also make sure to clear XNKICKED, + * since we won't go through prepare_for_signal() once + * relaxed. + */ + if (likely((oldstate & XNTHREAD_BLOCK_BITS) == 0)) { + if (likely((mask & XNRELAX) == 0)) { + if (xnthread_test_info(thread, XNKICKED)) + goto abort; + if (thread == sched->curr && + thread->lock_count > 0 && + (oldstate & XNTRAPLB) != 0) + goto lock_break; + } + /* + * Do not destroy the info left behind by yet unprocessed + * wakeups when suspending a remote thread. + */ + if (thread == sched->curr) + xnthread_clear_info(thread, XNRMID|XNTIMEO|XNBREAK| + XNWAKEN|XNROBBED|XNKICKED); + } + + /* + * Don't start the timer for a thread delayed indefinitely. + */ + if (timeout != XN_INFINITE || timeout_mode != XN_RELATIVE) { + xntimer_set_affinity(&thread->rtimer, thread->sched); + if (xntimer_start(&thread->rtimer, timeout, XN_INFINITE, + timeout_mode)) { + /* (absolute) timeout value in the past, bail out. */ + if (wchan) { + thread->wchan = wchan; + xnsynch_forget_sleeper(thread); + } + xnthread_set_info(thread, XNTIMEO); + goto out; + } + xnthread_set_state(thread, XNDELAY); + } + + if (oldstate & XNREADY) { + xnsched_dequeue(thread); + xnthread_clear_state(thread, XNREADY); + } + + xnthread_set_state(thread, mask); + + /* + * We must make sure that we don't clear the wait channel if a + * thread is first blocked (wchan != NULL) then forcibly + * suspended (wchan == NULL), since these are conjunctive + * conditions. + */ + if (wchan) + thread->wchan = wchan; + + if (likely(thread == sched->curr)) { + xnsched_set_resched(sched); + /* + * Transition to secondary mode (XNRELAX) is a + * separate path which is only available to + * xnthread_relax(). Using __xnsched_run() there for + * rescheduling allows us to break the scheduler lock + * temporarily. + */ + if (unlikely(mask & XNRELAX)) { + pipeline_leave_oob_unlock(); + __xnsched_run(sched); + return; + } + /* + * If the thread is runnning on a remote CPU, + * xnsched_run() will trigger the IPI as required. In + * this case, sched refers to a remote runqueue, so + * make sure to always kick the rescheduling procedure + * for the local one. + */ + __xnsched_run(xnsched_current()); + goto out; + } + + /* + * Ok, this one is an interesting corner case, which requires + * a bit of background first. Here, we handle the case of + * suspending a _relaxed_ user shadow which is _not_ the + * current thread. + * + * The net effect is that we are attempting to stop the + * shadow thread for Cobalt, whilst this thread is actually + * running some code under the control of the Linux scheduler + * (i.e. it's relaxed). + * + * To make this possible, we force the target Linux task to + * migrate back to the Xenomai domain by sending it a + * SIGSHADOW signal the interface libraries trap for this + * specific internal purpose, whose handler is expected to + * call back Cobalt's migration service. + * + * By forcing this migration, we make sure that Cobalt + * controls, hence properly stops, the target thread according + * to the requested suspension condition. Otherwise, the + * shadow thread in secondary mode would just keep running + * into the Linux domain, thus breaking the most common + * assumptions regarding suspended threads. + * + * We only care for threads that are not current, and for + * XNSUSP, XNDELAY, XNDORMANT and XNHELD conditions, because: + * + * - There is no point in dealing with a relaxed thread which + * is current, since personalities have to ask for primary + * mode switch when processing any syscall which may block the + * caller (i.e. __xn_exec_primary). + * + * - among all blocking bits (XNTHREAD_BLOCK_BITS), only + * XNSUSP, XNDELAY, XNHELD and XNDBGSTOP may be applied by the + * current thread to a non-current thread. XNPEND is always + * added by the caller to its own state, XNMIGRATE, XNRELAX + * and XNDBGSTOP have special semantics escaping this issue. + * + * We don't signal threads which are already in a dormant + * state, since they are suspended by definition. + */ + if (((oldstate & (XNTHREAD_BLOCK_BITS|XNUSER)) == (XNRELAX|XNUSER)) && + (mask & (XNDELAY | XNSUSP | XNHELD)) != 0) + __xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN); +out: + xnlock_put_irqrestore(&nklock, s); + return; + +lock_break: + /* NOTE: thread is current */ + if (xnthread_test_state(thread, XNWARN) && + !xnthread_test_localinfo(thread, XNLBALERT)) { + xnthread_set_info(thread, XNKICKED); + xnthread_set_localinfo(thread, XNLBALERT); + __xnthread_signal(thread, SIGDEBUG, SIGDEBUG_LOCK_BREAK); + } +abort: + if (wchan) { + thread->wchan = wchan; + xnsynch_forget_sleeper(thread); + } + xnthread_clear_info(thread, XNRMID | XNTIMEO); + xnthread_set_info(thread, XNBREAK); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnthread_suspend); + +/** + * @fn void xnthread_resume(struct xnthread *thread,int mask) + * @brief Resume a thread. + * + * Resumes the execution of a thread previously suspended by one or + * more calls to xnthread_suspend(). This call removes a suspensive + * condition affecting the target thread. When all suspensive + * conditions are gone, the thread is left in a READY state at which + * point it becomes eligible anew for scheduling. + * + * @param thread The descriptor address of the resumed thread. + * + * @param mask The suspension mask specifying the suspensive condition + * to remove from the thread's wait mask. Possible values usable by + * the caller are: + * + * - XNSUSP. This flag removes the explicit suspension condition. This + * condition might be additive to the XNPEND condition. + * + * - XNDELAY. This flag removes the counted delay wait condition. + * + * - XNPEND. This flag removes the resource wait condition. If a + * watchdog is armed, it is automatically disarmed by this + * call. Unlike the two previous conditions, only the current thread + * can set this condition for itself, i.e. no thread can force another + * one to pend on a resource. + * + * When the thread is eventually resumed by one or more calls to + * xnthread_resume(), the caller of xnthread_suspend() in the awakened + * thread that suspended itself should check for the following bits in + * its own information mask to determine what caused its wake up: + * + * - XNRMID means that the caller must assume that the pended + * synchronization object has been destroyed (see xnsynch_flush()). + * + * - XNTIMEO means that the delay elapsed, or the watchdog went off + * before the corresponding synchronization object was signaled. + * + * - XNBREAK means that the wait has been forcibly broken by a call to + * xnthread_unblock(). + * + * @coretags{unrestricted, might-switch} + */ +void xnthread_resume(struct xnthread *thread, int mask) +{ + unsigned long oldstate; + struct xnsched *sched; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + trace_cobalt_thread_resume(thread, mask); + + xntrace_pid(xnthread_host_pid(thread), xnthread_current_priority(thread)); + + sched = thread->sched; + oldstate = thread->state; + + if ((oldstate & XNTHREAD_BLOCK_BITS) == 0) { + if (oldstate & XNREADY) + xnsched_dequeue(thread); + goto enqueue; + } + + /* Clear the specified block bit(s) */ + xnthread_clear_state(thread, mask); + + /* + * If XNDELAY was set in the clear mask, xnthread_unblock() + * was called for the thread, or a timeout has elapsed. In the + * latter case, stopping the timer is a no-op. + */ + if (mask & XNDELAY) + xntimer_stop(&thread->rtimer); + + if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) + goto clear_wchan; + + if (mask & XNDELAY) { + mask = xnthread_test_state(thread, XNPEND); + if (mask == 0) + goto unlock_and_exit; + if (thread->wchan) + xnsynch_forget_sleeper(thread); + goto recheck_state; + } + + if (xnthread_test_state(thread, XNDELAY)) { + if (mask & XNPEND) { + /* + * A resource became available to the thread. + * Cancel the watchdog timer. + */ + xntimer_stop(&thread->rtimer); + xnthread_clear_state(thread, XNDELAY); + } + goto recheck_state; + } + + /* + * The thread is still suspended, but is no more pending on a + * resource. + */ + if ((mask & XNPEND) != 0 && thread->wchan) + xnsynch_forget_sleeper(thread); + + goto unlock_and_exit; + +recheck_state: + if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) + goto unlock_and_exit; + +clear_wchan: + if ((mask & ~XNDELAY) != 0 && thread->wchan != NULL) + /* + * If the thread was actually suspended, clear the + * wait channel. -- this allows requests like + * xnthread_suspend(thread,XNDELAY,...) not to run + * the following code when the suspended thread is + * woken up while undergoing a simple delay. + */ + xnsynch_forget_sleeper(thread); + + if (unlikely((oldstate & mask) & XNHELD)) { + xnsched_requeue(thread); + goto ready; + } +enqueue: + xnsched_enqueue(thread); +ready: + xnthread_set_state(thread, XNREADY); + xnsched_set_resched(sched); +unlock_and_exit: + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnthread_resume); + +/** + * @fn int xnthread_unblock(struct xnthread *thread) + * @brief Unblock a thread. + * + * Breaks the thread out of any wait it is currently in. This call + * removes the XNDELAY and XNPEND suspensive conditions previously put + * by xnthread_suspend() on the target thread. If all suspensive + * conditions are gone, the thread is left in a READY state at which + * point it becomes eligible anew for scheduling. + * + * @param thread The descriptor address of the unblocked thread. + * + * This call neither releases the thread from the XNSUSP, XNRELAX, + * XNDORMANT or XNHELD suspensive conditions. + * + * When the thread resumes execution, the XNBREAK bit is set in the + * unblocked thread's information mask. Unblocking a non-blocked + * thread is perfectly harmless. + * + * @return non-zero is returned if the thread was actually unblocked + * from a pending wait state, 0 otherwise. + * + * @coretags{unrestricted, might-switch} + */ +int xnthread_unblock(struct xnthread *thread) +{ + int ret = 1; + spl_t s; + + /* + * Attempt to abort an undergoing wait for the given thread. + * If this state is due to an alarm that has been armed to + * limit the sleeping thread's waiting time while it pends for + * a resource, the corresponding XNPEND state will be cleared + * by xnthread_resume() in the same move. Otherwise, this call + * may abort an undergoing infinite wait for a resource (if + * any). + */ + xnlock_get_irqsave(&nklock, s); + + trace_cobalt_thread_unblock(thread); + + if (xnthread_test_state(thread, XNDELAY)) + xnthread_resume(thread, XNDELAY); + else if (xnthread_test_state(thread, XNPEND)) + xnthread_resume(thread, XNPEND); + else + ret = 0; + + /* + * We should not clear a previous break state if this service + * is called more than once before the target thread actually + * resumes, so we only set the bit here and never clear + * it. However, we must not raise the XNBREAK bit if the + * target thread was already awake at the time of this call, + * so that downstream code does not get confused by some + * "successful but interrupted syscall" condition. IOW, a + * break state raised here must always trigger an error code + * downstream, and an already successful syscall cannot be + * marked as interrupted. + */ + if (ret) + xnthread_set_info(thread, XNBREAK); + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnthread_unblock); + +/** + * @fn int xnthread_set_periodic(struct xnthread *thread,xnticks_t idate, xntmode_t timeout_mode, xnticks_t period) + * @brief Make a thread periodic. + * + * Make a thread periodic by programming its first release point and + * its period in the processor time line. Subsequent calls to + * xnthread_wait_period() will delay the thread until the next + * periodic release point in the processor timeline is reached. + * + * @param thread The core thread to make periodic. If NULL, the + * current thread is assumed. + * + * @param idate The initial (absolute) date of the first release + * point, expressed in nanoseconds. The affected thread will be + * delayed by the first call to xnthread_wait_period() until this + * point is reached. If @a idate is equal to XN_INFINITE, the first + * release point is set to @a period nanoseconds after the current + * date. In the latter case, @a timeout_mode is not considered and can + * have any valid value. + * + * @param timeout_mode The mode of the @a idate parameter. It can + * either be set to XN_ABSOLUTE or XN_REALTIME with @a idate different + * from XN_INFINITE (see also xntimer_start()). + * + * @param period The period of the thread, expressed in nanoseconds. + * As a side-effect, passing XN_INFINITE attempts to stop the thread's + * periodic timer; in the latter case, the routine always exits + * succesfully, regardless of the previous state of this timer. + * + * @return 0 is returned upon success. Otherwise: + * + * - -ETIMEDOUT is returned @a idate is different from XN_INFINITE and + * represents a date in the past. + * + * - -EINVAL is returned if @a period is different from XN_INFINITE + * but shorter than the scheduling latency value for the target + * system, as available from /proc/xenomai/latency. -EINVAL is also + * returned if @a timeout_mode is not compatible with @a idate, such + * as XN_RELATIVE with @a idate different from XN_INFINITE. + * + * - -EPERM is returned if @a thread is NULL, but the caller is not a + * Xenomai thread. + * + * @coretags{task-unrestricted} + */ +int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, + xntmode_t timeout_mode, xnticks_t period) +{ + int ret = 0; + spl_t s; + + if (thread == NULL) { + thread = xnthread_current(); + if (thread == NULL) + return -EPERM; + } + + xnlock_get_irqsave(&nklock, s); + + if (period == XN_INFINITE) { + if (xntimer_running_p(&thread->ptimer)) + xntimer_stop(&thread->ptimer); + + goto unlock_and_exit; + } + + /* + * LART: detect periods which are shorter than the core clock + * gravity for kernel thread timers. This can't work, caller + * must have messed up arguments. + */ + if (period < xnclock_ticks_to_ns(&nkclock, + xnclock_get_gravity(&nkclock, kernel))) { + ret = -EINVAL; + goto unlock_and_exit; + } + + xntimer_set_affinity(&thread->ptimer, thread->sched); + + if (idate == XN_INFINITE) + xntimer_start(&thread->ptimer, period, period, XN_RELATIVE); + else { + if (timeout_mode == XN_REALTIME) + idate -= xnclock_get_offset(xntimer_clock(&thread->ptimer)); + else if (timeout_mode != XN_ABSOLUTE) { + ret = -EINVAL; + goto unlock_and_exit; + } + ret = xntimer_start(&thread->ptimer, idate, period, + XN_ABSOLUTE); + } + +unlock_and_exit: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnthread_set_periodic); + +/** + * @fn int xnthread_wait_period(unsigned long *overruns_r) + * @brief Wait for the next periodic release point. + * + * Make the current thread wait for the next periodic release point in + * the processor time line. + * + * @param overruns_r If non-NULL, @a overruns_r must be a pointer to a + * memory location which will be written with the count of pending + * overruns. This value is copied only when xnthread_wait_period() + * returns -ETIMEDOUT or success; the memory location remains + * unmodified otherwise. If NULL, this count will never be copied + * back. + * + * @return 0 is returned upon success; if @a overruns_r is valid, zero + * is copied to the pointed memory location. Otherwise: + * + * - -EWOULDBLOCK is returned if xnthread_set_periodic() has not + * previously been called for the calling thread. + * + * - -EINTR is returned if xnthread_unblock() has been called for the + * waiting thread before the next periodic release point has been + * reached. In this case, the overrun counter is reset too. + * + * - -ETIMEDOUT is returned if the timer has overrun, which indicates + * that one or more previous release points have been missed by the + * calling thread. If @a overruns_r is valid, the count of pending + * overruns is copied to the pointed memory location. + * + * @coretags{primary-only, might-switch} + */ +int xnthread_wait_period(unsigned long *overruns_r) +{ + unsigned long overruns = 0; + struct xnthread *thread; + struct xnclock *clock; + xnticks_t now; + int ret = 0; + spl_t s; + + thread = xnthread_current(); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(!xntimer_running_p(&thread->ptimer))) { + ret = -EWOULDBLOCK; + goto out; + } + + trace_cobalt_thread_wait_period(thread); + + clock = xntimer_clock(&thread->ptimer); + now = xnclock_read_raw(clock); + if (likely((xnsticks_t)(now - xntimer_pexpect(&thread->ptimer)) < 0)) { + xnthread_suspend(thread, XNDELAY, XN_INFINITE, XN_RELATIVE, NULL); + if (unlikely(xnthread_test_info(thread, XNBREAK))) { + ret = -EINTR; + goto out; + } + + now = xnclock_read_raw(clock); + } + + overruns = xntimer_get_overruns(&thread->ptimer, thread, now); + if (overruns) { + ret = -ETIMEDOUT; + trace_cobalt_thread_missed_period(thread); + } + + if (likely(overruns_r != NULL)) + *overruns_r = overruns; + out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnthread_wait_period); + +/** + * @fn int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum) + * @brief Set thread time-slicing information. + * + * Update the time-slicing information for a given thread. This + * service enables or disables round-robin scheduling for the thread, + * depending on the value of @a quantum. By default, times-slicing is + * disabled for a new thread initialized by a call to xnthread_init(). + * + * @param thread The descriptor address of the affected thread. + * + * @param quantum The time quantum assigned to the thread expressed in + * nanoseconds. If @a quantum is different from XN_INFINITE, the + * time-slice for the thread is set to that value and its current time + * credit is refilled (i.e. the thread is given a full time-slice to + * run next). Otherwise, if @a quantum equals XN_INFINITE, + * time-slicing is stopped for that thread. + * + * @return 0 is returned upon success. Otherwise, -EINVAL is returned + * if @a quantum is not XN_INFINITE and: + * + * - the base scheduling class of the target thread does not support + * time-slicing, + * + * - @a quantum is smaller than the master clock gravity for a user + * thread, which denotes a spurious value. + * + * @coretags{task-unrestricted} + */ +int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum) +{ + struct xnsched *sched; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sched = thread->sched; + thread->rrperiod = quantum; + + if (quantum != XN_INFINITE) { + if (quantum <= xnclock_get_gravity(&nkclock, user) || + thread->base_class->sched_tick == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + xnthread_set_state(thread, XNRRB); + if (sched->curr == thread) + xntimer_start(&sched->rrbtimer, + quantum, XN_INFINITE, XN_RELATIVE); + } else { + xnthread_clear_state(thread, XNRRB); + if (sched->curr == thread) + xntimer_stop(&sched->rrbtimer); + } + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(xnthread_set_slice); + +/** + * @fn void xnthread_cancel(struct xnthread *thread) + * @brief Cancel a thread. + * + * Request cancellation of a thread. This service forces @a thread to + * exit from any blocking call, then to switch to secondary mode. + * @a thread will terminate as soon as it reaches a cancellation + * point. Cancellation points are defined for the following + * situations: + * + * - @a thread self-cancels by a call to xnthread_cancel(). + * - @a thread invokes a Linux syscall (user-space shadow only). + * - @a thread receives a Linux signal (user-space shadow only). + * - @a thread unblocks from a Xenomai syscall (user-space shadow only). + * - @a thread attempts to block on a Xenomai syscall (user-space shadow only). + * - @a thread explicitly calls xnthread_test_cancel(). + * + * @param thread The descriptor address of the thread to terminate. + * + * @coretags{task-unrestricted, might-switch} + * + * @note In addition to the common actions taken upon cancellation, a + * thread which belongs to the SCHED_WEAK class is sent a regular + * SIGTERM signal. + */ +void xnthread_cancel(struct xnthread *thread) +{ + spl_t s; + + /* Right, so you want to kill the kernel?! */ + XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT)); + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_info(thread, XNCANCELD)) + goto check_self_cancel; + + trace_cobalt_thread_cancel(thread); + + xnthread_set_info(thread, XNCANCELD); + + /* + * If @thread is not started yet, fake a start request, + * raising the kicked condition bit to make sure it will reach + * xnthread_test_cancel() on its wakeup path. + */ + if (xnthread_test_state(thread, XNDORMANT)) { + xnthread_set_info(thread, XNKICKED); + xnthread_resume(thread, XNDORMANT); + goto out; + } + +check_self_cancel: + if (xnthread_current() == thread) { + xnlock_put_irqrestore(&nklock, s); + xnthread_test_cancel(); + /* + * May return if on behalf of an IRQ handler which has + * preempted @thread. + */ + return; + } + + /* + * Force the non-current thread to exit: + * + * - unblock a user thread, switch it to weak scheduling, + * then send it SIGTERM. + * + * - just unblock a kernel thread, it is expected to reach a + * cancellation point soon after + * (i.e. xnthread_test_cancel()). + */ + if (xnthread_test_state(thread, XNUSER)) { + __xnthread_demote(thread); + __xnthread_signal(thread, SIGTERM, 0); + } else + __xnthread_kick(thread); +out: + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnthread_cancel); + +struct wait_grace_struct { + struct completion done; + struct rcu_head rcu; +}; + +static void grace_elapsed(struct rcu_head *head) +{ + struct wait_grace_struct *wgs; + + wgs = container_of(head, struct wait_grace_struct, rcu); + complete(&wgs->done); +} + +static void wait_for_rcu_grace_period(struct pid *pid) +{ + struct wait_grace_struct wait = { + .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), + }; + struct task_struct *p; + + init_rcu_head_on_stack(&wait.rcu); + + for (;;) { + call_rcu(&wait.rcu, grace_elapsed); + wait_for_completion(&wait.done); + if (pid == NULL) + break; + rcu_read_lock(); + p = pid_task(pid, PIDTYPE_PID); + rcu_read_unlock(); + if (p == NULL) + break; + reinit_completion(&wait.done); + } +} + +/** + * @fn void xnthread_join(struct xnthread *thread, bool uninterruptible) + * @brief Join with a terminated thread. + * + * This service waits for @a thread to terminate after a call to + * xnthread_cancel(). If that thread has already terminated or is + * dormant at the time of the call, then xnthread_join() returns + * immediately. + * + * xnthread_join() adapts to the calling context (primary or + * secondary), switching to secondary mode if needed for the duration + * of the wait. Upon return, the original runtime mode is restored, + * unless a Linux signal is pending. + * + * @param thread The descriptor address of the thread to join with. + * + * @param uninterruptible Boolean telling whether the service should + * wait for completion uninterruptible. + * + * @return 0 is returned on success. Otherwise, the following error + * codes indicate the cause of the failure: + * + * - -EDEADLK is returned if the current thread attempts to join + * itself. + * + * - -EINTR is returned if the current thread was unblocked while + * waiting for @a thread to terminate. + * + * - -EBUSY indicates that another thread is already waiting for @a + * thread to terminate. + * + * @coretags{task-unrestricted, might-switch} + */ +int xnthread_join(struct xnthread *thread, bool uninterruptible) +{ + struct xnthread *curr = xnthread_current(); + int ret = 0, switched = 0; + struct pid *pid; + pid_t tpid; + spl_t s; + + XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT)); + + if (thread == curr) + return -EDEADLK; + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(thread, XNJOINED)) { + ret = -EBUSY; + goto out; + } + + if (xnthread_test_info(thread, XNDORMANT)) + goto out; + + trace_cobalt_thread_join(thread); + + xnthread_set_state(thread, XNJOINED); + tpid = xnthread_host_pid(thread); + + if (curr && !xnthread_test_state(curr, XNRELAX)) { + xnlock_put_irqrestore(&nklock, s); + xnthread_relax(0, 0); + switched = 1; + } else + xnlock_put_irqrestore(&nklock, s); + + /* + * Since in theory, we might be sleeping there for a long + * time, we get a reference on the pid struct holding our + * target, then we check for its existence upon wake up. + */ + pid = find_get_pid(tpid); + if (pid == NULL) + goto done; + + /* + * We have a tricky issue to deal with, which involves code + * relying on the assumption that a destroyed thread will have + * scheduled away from do_exit() before xnthread_join() + * returns. A typical example is illustrated by the following + * sequence, with a RTDM kernel task implemented in a + * dynamically loaded module: + * + * CPU0: rtdm_task_destroy(ktask) + * xnthread_cancel(ktask) + * xnthread_join(ktask) + * ...<back to user>.. + * rmmod(module) + * + * CPU1: in ktask() + * ... + * ... + * __xnthread_test_cancel() + * do_exit() + * schedule() + * + * In such a sequence, the code on CPU0 would expect the RTDM + * task to have scheduled away upon return from + * rtdm_task_destroy(), so that unmapping the destroyed task + * code and data memory when unloading the module is always + * safe. + * + * To address this, the joiner first waits for the joinee to + * signal completion from the Cobalt thread cleanup handler + * (__xnthread_cleanup), then waits for a full RCU grace + * period to have elapsed. Since the completion signal is sent + * on behalf of do_exit(), we may assume that the joinee has + * scheduled away before the RCU grace period ends. + */ + if (uninterruptible) + wait_for_completion(&thread->exited); + else { + ret = wait_for_completion_interruptible(&thread->exited); + if (ret < 0) { + put_pid(pid); + return -EINTR; + } + } + + /* Make sure the joinee has scheduled away ultimately. */ + wait_for_rcu_grace_period(pid); + + put_pid(pid); +done: + ret = 0; + if (switched) + ret = xnthread_harden(); + + return ret; +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnthread_join); + +#ifdef CONFIG_SMP + +void xnthread_migrate_passive(struct xnthread *thread, struct xnsched *sched) +{ /* nklocked, IRQs off */ + if (thread->sched == sched) + return; + + trace_cobalt_thread_migrate_passive(thread, xnsched_cpu(sched)); + /* + * Timer migration is postponed until the next timeout happens + * for the periodic and rrb timers. The resource timer will be + * moved to the right CPU next time it is armed in + * xnthread_suspend(). + */ + xnsched_migrate_passive(thread, sched); + + xnstat_exectime_reset_stats(&thread->stat.lastperiod); +} + +#endif /* CONFIG_SMP */ + +/** + * @fn int xnthread_set_schedparam(struct xnthread *thread,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param) + * @brief Change the base scheduling parameters of a thread. + * + * Changes the base scheduling policy and paramaters of a thread. If + * the thread is currently blocked, waiting in priority-pending mode + * (XNSYNCH_PRIO) for a synchronization object to be signaled, Cobalt + * will attempt to reorder the object's wait queue so that it reflects + * the new sleeper's priority, unless the XNSYNCH_DREORD flag has been + * set for the pended object. + * + * @param thread The descriptor address of the affected thread. See + * note. + * + * @param sched_class The new scheduling class the thread should be + * assigned to. + * + * @param sched_param The scheduling parameters to set for the thread; + * @a sched_param must be valid within the context of @a sched_class. + * + * It is absolutely required to use this service to change a thread + * priority, in order to have all the needed housekeeping chores + * correctly performed. i.e. Do *not* call xnsched_set_policy() + * directly or worse, change the thread.cprio field by hand in any + * case. + * + * @return 0 is returned on success. Otherwise, a negative error code + * indicates the cause of a failure that happened in the scheduling + * class implementation for @a sched_class. Invalid parameters passed + * into @a sched_param are common causes of error. + * + * @sideeffect + * + * - This service does not call the rescheduling procedure but may + * affect the state of the run queue for the previous and new + * scheduling classes. + * + * - Assigning the same scheduling class and parameters to a running + * or ready thread moves it to the end of the run queue, thus causing + * a manual round-robin, except if a priority boost is undergoing. + * + * @coretags{task-unregistred} + * + * @note The changes only apply to the Xenomai scheduling parameters + * for @a thread. There is no propagation/translation of such changes + * to the Linux scheduler for the task mated to the Xenomai target + * thread. + */ +int xnthread_set_schedparam(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param) +{ + spl_t s; + int ret; + + xnlock_get_irqsave(&nklock, s); + ret = __xnthread_set_schedparam(thread, sched_class, sched_param); + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(xnthread_set_schedparam); + +int __xnthread_set_schedparam(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param) +{ + int old_wprio, new_wprio, ret; + + old_wprio = thread->wprio; + + ret = xnsched_set_policy(thread, sched_class, sched_param); + if (ret) + return ret; + + new_wprio = thread->wprio; + + /* + * If the thread is waiting on a synchronization object, + * update its position in the corresponding wait queue, unless + * 1) reordering is explicitly disabled, or 2) the (weighted) + * priority has not changed (to prevent spurious round-robin + * effects). + */ + if (old_wprio != new_wprio && thread->wchan && + (thread->wchan->status & (XNSYNCH_DREORD|XNSYNCH_PRIO)) + == XNSYNCH_PRIO) + xnsynch_requeue_sleeper(thread); + /* + * We should not move the thread at the end of its priority + * group, if any of these conditions is true: + * + * - thread is not runnable; + * - thread bears the ready bit which means that xnsched_set_policy() + * already reordered the run queue; + * - thread currently holds the scheduler lock, so we don't want + * any round-robin effect to take place; + * - a priority boost is undergoing for this thread. + */ + if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY|XNBOOST) && + thread->lock_count == 0) + xnsched_putback(thread); + + xnthread_set_info(thread, XNSCHEDP); + /* Ask the target thread to call back if relaxed. */ + if (xnthread_test_state(thread, XNRELAX)) + __xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HOME); + + return ret; +} + +void __xnthread_test_cancel(struct xnthread *curr) +{ + /* + * Just in case xnthread_test_cancel() is called from an IRQ + * handler, in which case we may not take the exit path. + * + * NOTE: curr->sched is stable from our POV and can't change + * under our feet. + */ + if (curr->sched->lflags & XNINIRQ) + return; + + if (!xnthread_test_state(curr, XNRELAX)) + xnthread_relax(0, 0); + + do_exit(0); + /* ... won't return ... */ + XENO_BUG(COBALT); +} +EXPORT_SYMBOL_GPL(__xnthread_test_cancel); + +/** + * @internal + * @fn int xnthread_harden(void); + * @brief Migrate a Linux task to the Xenomai domain. + * + * This service causes the transition of "current" from the Linux + * domain to Xenomai. The shadow will resume in the Xenomai domain as + * returning from schedule(). + * + * @coretags{secondary-only, might-switch} + */ +int xnthread_harden(void) +{ + struct task_struct *p = current; + struct xnthread *thread; + int ret; + + secondary_mode_only(); + + thread = xnthread_current(); + if (thread == NULL) + return -EPERM; + + if (signal_pending(p)) + return -ERESTARTSYS; + + trace_cobalt_shadow_gohard(thread); + + xnthread_clear_sync_window(thread, XNRELAX); + + ret = pipeline_leave_inband(); + if (ret) { + xnthread_test_cancel(); + xnthread_set_sync_window(thread, XNRELAX); + return ret; + } + + /* "current" is now running on the out-of-band stage. */ + + xnlock_clear_irqon(&nklock); + xnthread_test_cancel(); + + trace_cobalt_shadow_hardened(thread); + + /* + * Recheck pending signals once again. As we block task + * wakeups during the migration and handle_sigwake_event() + * ignores signals until XNRELAX is cleared, any signal + * between entering TASK_HARDENING and starting the migration + * is just silently queued up to here. + */ + if (signal_pending(p)) { + xnthread_relax(!xnthread_test_state(thread, XNSSTEP), + SIGDEBUG_MIGRATE_SIGNAL); + return -ERESTARTSYS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xnthread_harden); + +struct lostage_wakeup { + struct pipeline_inband_work inband_work; /* Must be first. */ + struct task_struct *task; +}; + +static void lostage_task_wakeup(struct pipeline_inband_work *inband_work) +{ + struct lostage_wakeup *rq; + struct task_struct *p; + + rq = container_of(inband_work, struct lostage_wakeup, inband_work); + p = rq->task; + + trace_cobalt_lostage_wakeup(p); + + wake_up_process(p); +} + +void __xnthread_propagate_schedparam(struct xnthread *curr) +{ + int kpolicy = SCHED_FIFO, kprio = curr->bprio, ret; + struct task_struct *p = current; + struct sched_param param; + spl_t s; + + /* + * Test-set race for XNSCHEDP is ok, the propagation is meant + * to be done asap but not guaranteed to be carried out + * immediately, and the request will remain pending until it + * is eventually handled. We just have to protect against a + * set-clear race. + */ + xnlock_get_irqsave(&nklock, s); + xnthread_clear_info(curr, XNSCHEDP); + xnlock_put_irqrestore(&nklock, s); + + /* + * Map our policies/priorities to the regular kernel's + * (approximated). + */ + if (xnthread_test_state(curr, XNWEAK) && kprio == 0) + kpolicy = SCHED_NORMAL; + else if (kprio >= MAX_RT_PRIO) + kprio = MAX_RT_PRIO - 1; + + if (p->policy != kpolicy || (kprio > 0 && p->rt_priority != kprio)) { + param.sched_priority = kprio; + ret = sched_setscheduler_nocheck(p, kpolicy, ¶m); + XENO_WARN_ON(COBALT, ret != 0); + } +} + +/** + * @internal + * @fn void xnthread_relax(int notify, int reason); + * @brief Switch a shadow thread back to the Linux domain. + * + * This service yields the control of the running shadow back to + * Linux. This is obtained by suspending the shadow and scheduling a + * wake up call for the mated user task inside the Linux domain. The + * Linux task will resume on return from xnthread_suspend() on behalf + * of the root thread. + * + * @param notify A boolean flag indicating whether threads monitored + * from secondary mode switches should be sent a SIGDEBUG signal. For + * instance, some internal operations like task exit should not + * trigger such signal. + * + * @param reason The reason to report along with the SIGDEBUG signal. + * + * @coretags{primary-only, might-switch} + * + * @note "current" is valid here since the shadow runs with the + * properties of the Linux task. + */ +void xnthread_relax(int notify, int reason) +{ + struct task_struct *p = current; + struct lostage_wakeup wakework = { + .inband_work = PIPELINE_INBAND_WORK_INITIALIZER(wakework, + lostage_task_wakeup), + .task = p, + }; + struct xnthread *thread = xnthread_current(); + int cpu __maybe_unused, suspension; + kernel_siginfo_t si; + + primary_mode_only(); + + /* + * Enqueue the request to move the running shadow from the Xenomai + * domain to the Linux domain. This will cause the Linux task + * to resume using the register state of the shadow thread. + */ + trace_cobalt_shadow_gorelax(reason); + + /* + * If you intend to change the following interrupt-free + * sequence, /first/ make sure to check the special handling + * of XNRELAX in xnthread_suspend() when switching out the + * current thread, not to break basic assumptions we make + * there. + * + * We disable interrupts during the migration sequence, but + * xnthread_suspend() has an interrupts-on section built in. + */ + splmax(); + trace_cobalt_lostage_request("wakeup", p); + pipeline_post_inband_work(&wakework); + /* + * Grab the nklock to synchronize the Linux task state + * manipulation with handle_sigwake_event. This lock will be + * dropped by xnthread_suspend(). + */ + xnlock_get(&nklock); + xnthread_run_handler_stack(thread, relax_thread); + suspension = pipeline_leave_oob_prepare(); + xnthread_suspend(thread, suspension, XN_INFINITE, XN_RELATIVE, NULL); + splnone(); + + /* + * Basic sanity check after an expected transition to secondary + * mode. + */ + XENO_WARN(COBALT, is_primary_domain(), + "xnthread_relax() failed for thread %s[%d]", + thread->name, xnthread_host_pid(thread)); + + pipeline_leave_oob_finish(); + + /* Account for secondary mode switch. */ + xnstat_counter_inc(&thread->stat.ssw); + + /* + * When relaxing, we check for propagating to the regular + * kernel new Cobalt schedparams that might have been set for + * us while we were running in primary mode. + * + * CAUTION: This obviously won't update the schedparams cached + * by the glibc for the caller in user-space, but this is the + * deal: we don't relax threads which issue + * pthread_setschedparam[_ex]() from primary mode, but then + * only the kernel side (Cobalt and the host kernel) will be + * aware of the change, and glibc might cache obsolete + * information. + */ + xnthread_propagate_schedparam(thread); + + if (xnthread_test_state(thread, XNUSER) && notify) { + if (xnthread_test_state(thread, XNWARN)) { + /* Help debugging spurious relaxes. */ + xndebug_notify_relax(thread, reason); + memset(&si, 0, sizeof(si)); + si.si_signo = SIGDEBUG; + si.si_code = SI_QUEUE; + si.si_int = reason | sigdebug_marker; + send_sig_info(SIGDEBUG, &si, p); + } + xnsynch_detect_boosted_relax(thread); + } + + /* + * "current" is now running into the Linux domain on behalf of + * the root thread. + */ + xnthread_sync_window(thread); + +#ifdef CONFIG_SMP + if (xnthread_test_localinfo(thread, XNMOVED)) { + xnthread_clear_localinfo(thread, XNMOVED); + cpu = xnsched_cpu(thread->sched); + set_cpus_allowed_ptr(p, cpumask_of(cpu)); + } +#endif + /* + * After migration there will be no syscall restart (rather a signal + * delivery). + */ + xnthread_clear_localinfo(thread, XNSYSRST); + + pipeline_clear_mayday(); + + trace_cobalt_shadow_relaxed(thread); +} +EXPORT_SYMBOL_GPL(xnthread_relax); + +static void lostage_task_signal(struct pipeline_inband_work *inband_work) +{ + struct lostage_signal *rq; + struct task_struct *p; + kernel_siginfo_t si; + int signo, sigval; + spl_t s; + + rq = container_of(inband_work, struct lostage_signal, inband_work); + /* + * Revisit: I-pipe requirement. It passes a copy of the original work + * struct, so retrieve the original one first in order to update is. + */ + rq = rq->self; + + xnlock_get_irqsave(&nklock, s); + + p = rq->task; + signo = rq->signo; + sigval = rq->sigval; + rq->task = NULL; + + xnlock_put_irqrestore(&nklock, s); + + trace_cobalt_lostage_signal(p, signo); + + if (signo == SIGSHADOW || signo == SIGDEBUG) { + memset(&si, '\0', sizeof(si)); + si.si_signo = signo; + si.si_code = SI_QUEUE; + si.si_int = sigval; + send_sig_info(signo, &si, p); + } else { + send_sig(signo, p, 1); + } +} + +static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */ +{ + int ret = 0; + + if (xnthread_test_info(thread, XNKICKED)) + return 1; + + if (xnthread_unblock(thread)) { + xnthread_set_info(thread, XNKICKED); + ret = 1; + } + + /* + * CAUTION: we must NOT raise XNBREAK when clearing a forcible + * block state, such as XNSUSP, XNHELD. The caller of + * xnthread_suspend() we unblock shall proceed as for a normal + * return, until it traverses a cancellation point if + * XNCANCELD was raised earlier, or calls xnthread_suspend() + * which will detect XNKICKED and act accordingly. + * + * Rationale: callers of xnthread_suspend() may assume that + * receiving XNBREAK means that the process that motivated the + * blocking did not go to completion. E.g. the wait context + * (see. xnthread_prepare_wait()) was NOT posted before + * xnsynch_sleep_on() returned, leaving no useful data there. + * Therefore, in case only XNSUSP remains set for the thread + * on entry to force_wakeup(), after XNPEND was lifted earlier + * when the wait went to successful completion (i.e. no + * timeout), then we want the kicked thread to know that it + * did receive the requested resource, not finding XNBREAK in + * its state word. + * + * Callers of xnthread_suspend() may inquire for XNKICKED to + * detect forcible unblocks from XNSUSP, XNHELD, if they + * should act upon this case specifically. + */ + if (xnthread_test_state(thread, XNSUSP|XNHELD)) { + xnthread_resume(thread, XNSUSP|XNHELD); + xnthread_set_info(thread, XNKICKED); + } + + /* + * Tricky cases: + * + * - a thread which was ready on entry wasn't actually + * running, but nevertheless waits for the CPU in primary + * mode, so we have to make sure that it will be notified of + * the pending break condition as soon as it enters + * xnthread_suspend() from a blocking Xenomai syscall. + * + * - a ready/readied thread on exit may be prevented from + * running by the scheduling policy module it belongs + * to. Typically, policies enforcing a runtime budget do not + * block threads with no budget, but rather keep them out of + * their run queue, so that ->sched_pick() won't elect + * them. We tell the policy handler about the fact that we do + * want such thread to run until it relaxes, whatever this + * means internally for the implementation. + */ + if (xnthread_test_state(thread, XNREADY)) + xnsched_kick(thread); + + return ret; +} + +void __xnthread_kick(struct xnthread *thread) /* nklock locked, irqs off */ +{ + struct task_struct *p = xnthread_host_task(thread); + + /* Thread is already relaxed -- nop. */ + if (xnthread_test_state(thread, XNRELAX)) + return; + + /* + * First, try to kick the thread out of any blocking syscall + * Xenomai-wise. If that succeeds, then the thread will relax + * on its return path to user-space. + */ + if (force_wakeup(thread)) + return; + + /* + * If that did not work out because the thread was not blocked + * (i.e. XNPEND/XNDELAY) in a syscall, then force a mayday + * trap. Note that we don't want to send that thread any linux + * signal, we only want to force it to switch to secondary + * mode asap. + * + * It could happen that a thread is relaxed on a syscall + * return path after it was resumed from self-suspension + * (e.g. XNSUSP) then also forced to run a mayday trap right + * after: this is still correct, at worst we would get a + * useless mayday syscall leading to a no-op, no big deal. + */ + xnthread_set_info(thread, XNKICKED); + + /* + * We may send mayday signals to userland threads only. + * However, no need to run a mayday trap if the current thread + * kicks itself out of primary mode: it will relax on its way + * back to userland via the current syscall + * epilogue. Otherwise, we want that thread to enter the + * mayday trap asap, to call us back for relaxing. + */ + if (thread != xnsched_current_thread() && + xnthread_test_state(thread, XNUSER)) + pipeline_raise_mayday(p); +} + +void xnthread_kick(struct xnthread *thread) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + __xnthread_kick(thread); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnthread_kick); + +void __xnthread_demote(struct xnthread *thread) /* nklock locked, irqs off */ +{ + struct xnsched_class *sched_class; + union xnsched_policy_param param; + + /* + * First we kick the thread out of primary mode, and have it + * resume execution immediately over the regular linux + * context. + */ + __xnthread_kick(thread); + + /* + * Then we demote it, turning that thread into a non real-time + * Xenomai shadow, which still has access to Xenomai + * resources, but won't compete for real-time scheduling + * anymore. In effect, moving the thread to a weak scheduling + * class/priority will prevent it from sticking back to + * primary mode. + */ +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + param.weak.prio = 0; + sched_class = &xnsched_class_weak; +#else + param.rt.prio = 0; + sched_class = &xnsched_class_rt; +#endif + __xnthread_set_schedparam(thread, sched_class, ¶m); +} + +void xnthread_demote(struct xnthread *thread) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + __xnthread_demote(thread); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnthread_demote); + +static int get_slot_index_from_sig(int sig, int arg) +{ + int action; + + switch (sig) { + case SIGDEBUG: + return XNTHREAD_SIGDEBUG; + case SIGSHADOW: + action = sigshadow_action(arg); + switch (action) { + case SIGSHADOW_ACTION_HARDEN: + return XNTHREAD_SIGSHADOW_HARDEN; + case SIGSHADOW_ACTION_BACKTRACE: + return XNTHREAD_SIGSHADOW_BACKTRACE; + case SIGSHADOW_ACTION_HOME: + return XNTHREAD_SIGSHADOW_HOME; + } + break; + case SIGTERM: + return XNTHREAD_SIGTERM; + } + + return -1; +} + +/* nklock locked, irqs off */ +void __xnthread_signal(struct xnthread *thread, int sig, int arg) +{ + struct lostage_signal *sigwork; + int slot; + + if (XENO_WARN_ON(COBALT, !xnthread_test_state(thread, XNUSER))) + return; + + slot = get_slot_index_from_sig(sig, arg); + if (WARN_ON_ONCE(slot < 0)) + return; + + sigwork = &thread->sigarray[slot]; + if (sigwork->task) + return; + + sigwork->inband_work = (struct pipeline_inband_work) + PIPELINE_INBAND_WORK_INITIALIZER(*sigwork, + lostage_task_signal); + sigwork->task = xnthread_host_task(thread); + sigwork->signo = sig; + sigwork->sigval = sig == SIGDEBUG ? arg | sigdebug_marker : arg; + sigwork->self = sigwork; /* Revisit: I-pipe requirement */ + + trace_cobalt_lostage_request("signal", sigwork->task); + + pipeline_post_inband_work(sigwork); +} + +void xnthread_signal(struct xnthread *thread, int sig, int arg) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + __xnthread_signal(thread, sig, arg); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xnthread_signal); + +void xnthread_pin_initial(struct xnthread *thread) +{ + struct task_struct *p = current; + struct xnsched *sched; + int cpu; + spl_t s; + + /* + * @thread is the Xenomai extension of the current kernel + * task. If the current CPU is part of the affinity mask of + * this thread, pin the latter on this CPU. Otherwise pin it + * to the first CPU of that mask. + */ + cpu = task_cpu(p); + if (!cpumask_test_cpu(cpu, &thread->affinity)) + cpu = cpumask_first(&thread->affinity); + + set_cpus_allowed_ptr(p, cpumask_of(cpu)); + /* + * @thread is still unstarted Xenomai-wise, we are precisely + * in the process of mapping the current kernel task to + * it. Therefore xnthread_migrate_passive() is the right way + * to pin it on a real-time CPU. + */ + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + xnthread_migrate_passive(thread, sched); + xnlock_put_irqrestore(&nklock, s); +} + +/* nklock locked, irqs off */ +void xnthread_call_mayday(struct xnthread *thread, int reason) +{ + struct task_struct *p = xnthread_host_task(thread); + + /* Mayday traps are available to userland threads only. */ + XENO_BUG_ON(COBALT, !xnthread_test_state(thread, XNUSER)); + xnthread_set_info(thread, XNKICKED); + __xnthread_signal(thread, SIGDEBUG, reason); + pipeline_raise_mayday(p); +} +EXPORT_SYMBOL_GPL(xnthread_call_mayday); + +int xnthread_killall(int grace, int mask) +{ + struct xnthread *t, *curr = xnthread_current(); + int nrkilled = 0, nrthreads, count; + long ret; + spl_t s; + + secondary_mode_only(); + + /* + * We may hold the core lock across calls to xnthread_cancel() + * provided that we won't self-cancel. + */ + xnlock_get_irqsave(&nklock, s); + + nrthreads = cobalt_nrthreads; + + xnsched_for_each_thread(t) { + if (xnthread_test_state(t, XNROOT) || + xnthread_test_state(t, mask) != mask || + t == curr) + continue; + + if (XENO_DEBUG(COBALT)) + printk(XENO_INFO "terminating %s[%d]\n", + t->name, xnthread_host_pid(t)); + nrkilled++; + xnthread_cancel(t); + } + + xnlock_put_irqrestore(&nklock, s); + + /* + * Cancel then join all existing threads during the grace + * period. It is the caller's responsibility to prevent more + * threads to bind to the system if required, we won't make + * any provision for this here. + */ + count = nrthreads - nrkilled; + if (XENO_DEBUG(COBALT)) + printk(XENO_INFO "waiting for %d threads to exit\n", + nrkilled); + + if (grace > 0) { + ret = wait_event_interruptible_timeout(join_all, + cobalt_nrthreads == count, + grace * HZ); + if (ret == 0) + return -EAGAIN; + } else + ret = wait_event_interruptible(join_all, + cobalt_nrthreads == count); + + /* Wait for a full RCU grace period to expire. */ + wait_for_rcu_grace_period(NULL); + + if (XENO_DEBUG(COBALT)) + printk(XENO_INFO "joined %d threads\n", + count + nrkilled - cobalt_nrthreads); + + return ret < 0 ? -EINTR : 0; +} +EXPORT_SYMBOL_GPL(xnthread_killall); + +/* Xenomai's generic personality. */ +struct xnthread_personality xenomai_personality = { + .name = "core", + .magic = -1 +}; +EXPORT_SYMBOL_GPL(xenomai_personality); + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/time.c b/kernel/xenomai-v3.2.4/kernel/cobalt/time.c new file mode 100644 index 0000000..cb152fc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/time.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <asm-generic/xenomai/syscall.h> +#include <cobalt/kernel/time.h> +#include <linux/compat.h> + +int cobalt_get_timespec64(struct timespec64 *ts, + const struct __kernel_timespec __user *uts) +{ + struct __kernel_timespec kts; + int ret; + + ret = cobalt_copy_from_user(&kts, uts, sizeof(kts)); + if (ret) + return -EFAULT; + + ts->tv_sec = kts.tv_sec; + + /* Zero out the padding in compat mode */ + if (in_compat_syscall()) + kts.tv_nsec &= 0xFFFFFFFFUL; + + /* In 32-bit mode, this drops the padding */ + ts->tv_nsec = kts.tv_nsec; + + return 0; +} + +int cobalt_put_timespec64(const struct timespec64 *ts, + struct __kernel_timespec __user *uts) +{ + struct __kernel_timespec kts = { + .tv_sec = ts->tv_sec, + .tv_nsec = ts->tv_nsec + }; + + return cobalt_copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c b/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c new file mode 100644 index 0000000..4b9cea4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c @@ -0,0 +1,719 @@ +/* + * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>. + * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include <linux/sched.h> +#include <pipeline/tick.h> +#include <cobalt/kernel/sched.h> +#include <cobalt/kernel/thread.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/intr.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/trace.h> +#include <cobalt/kernel/arith.h> +#include <trace/events/cobalt-core.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_timer Timer services + * + * The Xenomai timer facility depends on a clock source (xnclock) for + * scheduling the next activation times. + * + * The core provides and depends on a monotonic clock source (nkclock) + * with nanosecond resolution, driving the platform timer hardware + * exposed by the interrupt pipeline. + * + * @{ + */ + +int xntimer_heading_p(struct xntimer *timer) +{ + struct xnsched *sched = timer->sched; + xntimerq_t *q; + xntimerh_t *h; + + q = xntimer_percpu_queue(timer); + h = xntimerq_head(q); + if (h == &timer->aplink) + return 1; + + if (sched->lflags & XNHDEFER) { + h = xntimerq_second(q, h); + if (h == &timer->aplink) + return 1; + } + + return 0; +} + +void xntimer_enqueue_and_program(struct xntimer *timer, xntimerq_t *q) +{ + struct xnsched *sched = xntimer_sched(timer); + + xntimer_enqueue(timer, q); + if (pipeline_must_force_program_tick(sched) || xntimer_heading_p(timer)) { + struct xnsched *sched = xntimer_sched(timer); + struct xnclock *clock = xntimer_clock(timer); + if (sched != xnsched_current()) + xnclock_remote_shot(clock, sched); + else + xnclock_program_shot(clock, sched); + } +} + +/** + * Arm a timer. + * + * Activates a timer so that the associated timeout handler will be + * fired after each expiration time. A timer can be either periodic or + * one-shot, depending on the reload value passed to this routine. The + * given timer must have been previously initialized. + * + * A timer is attached to the clock specified in xntimer_init(). + * + * @param timer The address of a valid timer descriptor. + * + * @param value The date of the initial timer shot, expressed in + * nanoseconds. + * + * @param interval The reload value of the timer. It is a periodic + * interval value to be used for reprogramming the next timer shot, + * expressed in nanoseconds. If @a interval is equal to XN_INFINITE, + * the timer will not be reloaded after it has expired. + * + * @param mode The timer mode. It can be XN_RELATIVE if @a value shall + * be interpreted as a relative date, XN_ABSOLUTE for an absolute date + * based on the monotonic clock of the related time base (as returned + * my xnclock_read_monotonic()), or XN_REALTIME if the absolute date + * is based on the adjustable real-time date for the relevant clock + * (obtained from xnclock_read_realtime()). + * + * @return 0 is returned upon success, or -ETIMEDOUT if an absolute + * date in the past has been given. In such an event, the timer is + * nevertheless armed for the next shot in the timeline if @a interval + * is different from XN_INFINITE. + * + * @coretags{unrestricted, atomic-entry} + */ +int xntimer_start(struct xntimer *timer, + xnticks_t value, xnticks_t interval, + xntmode_t mode) +{ + struct xnclock *clock = xntimer_clock(timer); + xntimerq_t *q = xntimer_percpu_queue(timer); + xnticks_t date, now, delay, period; + unsigned long gravity; + int ret = 0; + + atomic_only(); + + trace_cobalt_timer_start(timer, value, interval, mode); + + if ((timer->status & XNTIMER_DEQUEUED) == 0) + xntimer_dequeue(timer, q); + + now = xnclock_read_raw(clock); + + timer->status &= ~(XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC); + switch (mode) { + case XN_RELATIVE: + if ((xnsticks_t)value < 0) + return -ETIMEDOUT; + date = xnclock_ns_to_ticks(clock, value) + now; + break; + case XN_REALTIME: + timer->status |= XNTIMER_REALTIME; + value -= xnclock_get_offset(clock); + fallthrough; + default: /* XN_ABSOLUTE || XN_REALTIME */ + date = xnclock_ns_to_ticks(clock, value); + if ((xnsticks_t)(date - now) <= 0) { + if (interval == XN_INFINITE) + return -ETIMEDOUT; + /* + * We are late on arrival for the first + * delivery, wait for the next shot on the + * periodic time line. + */ + delay = now - date; + period = xnclock_ns_to_ticks(clock, interval); + date += period * (xnarch_div64(delay, period) + 1); + } + break; + } + + /* + * To cope with the basic system latency, we apply a clock + * gravity value, which is the amount of time expressed in + * clock ticks by which we should anticipate the shot for any + * outstanding timer. The gravity value varies with the type + * of context the timer wakes up, i.e. irq handler, kernel or + * user thread. + */ + gravity = xntimer_gravity(timer); + xntimerh_date(&timer->aplink) = date - gravity; + if (now >= xntimerh_date(&timer->aplink)) + xntimerh_date(&timer->aplink) += gravity / 2; + + timer->interval_ns = XN_INFINITE; + timer->interval = XN_INFINITE; + if (interval != XN_INFINITE) { + timer->interval_ns = interval; + timer->interval = xnclock_ns_to_ticks(clock, interval); + timer->periodic_ticks = 0; + timer->start_date = date; + timer->pexpect_ticks = 0; + timer->status |= XNTIMER_PERIODIC; + } + + timer->status |= XNTIMER_RUNNING; + xntimer_enqueue_and_program(timer, q); + + return ret; +} +EXPORT_SYMBOL_GPL(xntimer_start); + +/** + * @fn int xntimer_stop(struct xntimer *timer) + * + * @brief Disarm a timer. + * + * This service deactivates a timer previously armed using + * xntimer_start(). Once disarmed, the timer can be subsequently + * re-armed using the latter service. + * + * @param timer The address of a valid timer descriptor. + * + * @coretags{unrestricted, atomic-entry} + */ +void __xntimer_stop(struct xntimer *timer) +{ + struct xnclock *clock = xntimer_clock(timer); + xntimerq_t *q = xntimer_percpu_queue(timer); + struct xnsched *sched; + int heading = 1; + + atomic_only(); + + trace_cobalt_timer_stop(timer); + + if ((timer->status & XNTIMER_DEQUEUED) == 0) { + heading = xntimer_heading_p(timer); + xntimer_dequeue(timer, q); + } + timer->status &= ~(XNTIMER_FIRED|XNTIMER_RUNNING); + sched = xntimer_sched(timer); + + /* + * If we removed the heading timer, reprogram the next shot if + * any. If the timer was running on another CPU, let it tick. + */ + if (heading && sched == xnsched_current()) + xnclock_program_shot(clock, sched); +} +EXPORT_SYMBOL_GPL(__xntimer_stop); + +/** + * @fn xnticks_t xntimer_get_date(struct xntimer *timer) + * + * @brief Return the absolute expiration date. + * + * Return the next expiration date of a timer as an absolute count of + * nanoseconds. + * + * @param timer The address of a valid timer descriptor. + * + * @return The expiration date in nanoseconds. The special value + * XN_INFINITE is returned if @a timer is currently disabled. + * + * @coretags{unrestricted, atomic-entry} + */ +xnticks_t xntimer_get_date(struct xntimer *timer) +{ + atomic_only(); + + if (!xntimer_running_p(timer)) + return XN_INFINITE; + + return xnclock_ticks_to_ns(xntimer_clock(timer), xntimer_expiry(timer)); +} +EXPORT_SYMBOL_GPL(xntimer_get_date); + +/** + * @fn xnticks_t xntimer_get_timeout(struct xntimer *timer) + * + * @brief Return the relative expiration date. + * + * This call returns the count of nanoseconds remaining until the + * timer expires. + * + * @param timer The address of a valid timer descriptor. + * + * @return The count of nanoseconds until expiry. The special value + * XN_INFINITE is returned if @a timer is currently disabled. It + * might happen that the timer expires when this service runs (even if + * the associated handler has not been fired yet); in such a case, 1 + * is returned. + * + * @coretags{unrestricted, atomic-entry} + */ +xnticks_t __xntimer_get_timeout(struct xntimer *timer) +{ + struct xnclock *clock; + xnticks_t expiry, now; + + atomic_only(); + + clock = xntimer_clock(timer); + now = xnclock_read_raw(clock); + expiry = xntimer_expiry(timer); + if (expiry < now) + return 1; /* Will elapse shortly. */ + + return xnclock_ticks_to_ns(clock, expiry - now); +} +EXPORT_SYMBOL_GPL(__xntimer_get_timeout); + +/** + * @fn void xntimer_init(struct xntimer *timer,struct xnclock *clock,void (*handler)(struct xntimer *timer), struct xnsched *sched, int flags) + * @brief Initialize a timer object. + * + * Creates a timer. When created, a timer is left disarmed; it must be + * started using xntimer_start() in order to be activated. + * + * @param timer The address of a timer descriptor the nucleus will use + * to store the object-specific data. This descriptor must always be + * valid while the object is active therefore it must be allocated in + * permanent memory. + * + * @param clock The clock the timer relates to. Xenomai defines a + * monotonic system clock, with nanosecond resolution, named + * nkclock. In addition, external clocks driven by other tick sources + * may be created dynamically if CONFIG_XENO_OPT_EXTCLOCK is defined. + * + * @param handler The routine to call upon expiration of the timer. + * + * @param sched An optional pointer to the per-CPU scheduler slot the + * new timer is affine to. If non-NULL, the timer will fire on the CPU + * @a sched is bound to, otherwise it will fire either on the current + * CPU if real-time, or on the first real-time CPU. + * + * @param flags A set of flags describing the timer. A set of clock + * gravity hints can be passed via the @a flags argument, used for + * optimizing the built-in heuristics aimed at latency reduction: + * + * - XNTIMER_IGRAVITY, the timer activates a leaf timer handler. + * - XNTIMER_KGRAVITY, the timer activates a kernel thread. + * - XNTIMER_UGRAVITY, the timer activates a user-space thread. + * + * There is no limitation on the number of timers which can be + * created/active concurrently. + * + * @coretags{unrestricted} + */ +#ifdef DOXYGEN_CPP +void xntimer_init(struct xntimer *timer, struct xnclock *clock, + void (*handler)(struct xntimer *timer), + struct xnsched *sched, + int flags); +#endif + +void __xntimer_init(struct xntimer *timer, + struct xnclock *clock, + void (*handler)(struct xntimer *timer), + struct xnsched *sched, + int flags) +{ + spl_t s __maybe_unused; + +#ifdef CONFIG_XENO_OPT_EXTCLOCK + timer->clock = clock; +#endif + xntimerh_init(&timer->aplink); + xntimerh_date(&timer->aplink) = XN_INFINITE; + xntimer_set_priority(timer, XNTIMER_STDPRIO); + timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK)); + timer->handler = handler; + timer->interval_ns = 0; + timer->sched = NULL; + + /* + * Set the timer affinity, preferably to xnsched_cpu(sched) if + * sched was given, CPU0 otherwise. + */ + if (sched == NULL) + sched = xnsched_struct(0); + + xntimer_set_affinity(timer, sched); + +#ifdef CONFIG_XENO_OPT_STATS +#ifdef CONFIG_XENO_OPT_EXTCLOCK + timer->tracker = clock; +#endif + ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s", + task_pid_nr(current), current->comm); + xntimer_reset_stats(timer); + xnlock_get_irqsave(&nklock, s); + list_add_tail(&timer->next_stat, &clock->timerq); + clock->nrtimers++; + xnvfile_touch(&clock->timer_vfile); + xnlock_put_irqrestore(&nklock, s); +#endif /* CONFIG_XENO_OPT_STATS */ +} +EXPORT_SYMBOL_GPL(__xntimer_init); + +void xntimer_set_gravity(struct xntimer *timer, int gravity) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + timer->status &= ~XNTIMER_GRAVITY_MASK; + timer->status |= gravity; + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xntimer_set_gravity); + +#ifdef CONFIG_XENO_OPT_EXTCLOCK + +#ifdef CONFIG_XENO_OPT_STATS + +static void __xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock) +{ + struct xnclock *oldclock = timer->tracker; + + list_del(&timer->next_stat); + oldclock->nrtimers--; + xnvfile_touch(&oldclock->timer_vfile); + list_add_tail(&timer->next_stat, &newclock->timerq); + newclock->nrtimers++; + xnvfile_touch(&newclock->timer_vfile); + timer->tracker = newclock; +} + +void xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + __xntimer_switch_tracking(timer, newclock); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xntimer_switch_tracking); + +#else + +static inline +void __xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock) +{ } + +#endif /* CONFIG_XENO_OPT_STATS */ + +/** + * @brief Set the reference clock of a timer. + * + * This service changes the reference clock pacing a timer. If the + * clock timers are tracked, the tracking information is updated too. + * + * @param timer The address of a valid timer descriptor. + * + * @param newclock The address of a valid clock descriptor. + * + * @coretags{unrestricted, atomic-entry} + */ +void xntimer_set_clock(struct xntimer *timer, + struct xnclock *newclock) +{ + atomic_only(); + + if (timer->clock != newclock) { + xntimer_stop(timer); + timer->clock = newclock; + /* + * Since the timer was stopped, we can wait until it + * is restarted for fixing its CPU affinity. + */ + __xntimer_switch_tracking(timer, newclock); + } +} + +#endif /* CONFIG_XENO_OPT_EXTCLOCK */ + +/** + * @fn void xntimer_destroy(struct xntimer *timer) + * + * @brief Release a timer object. + * + * Destroys a timer. After it has been destroyed, all resources + * associated with the timer have been released. The timer is + * automatically deactivated before deletion if active on entry. + * + * @param timer The address of a valid timer descriptor. + * + * @coretags{unrestricted} + */ +void xntimer_destroy(struct xntimer *timer) +{ + struct xnclock *clock __maybe_unused = xntimer_clock(timer); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_stop(timer); + timer->status |= XNTIMER_KILLED; + timer->sched = NULL; +#ifdef CONFIG_XENO_OPT_STATS + list_del(&timer->next_stat); + clock->nrtimers--; + xnvfile_touch(&clock->timer_vfile); +#endif /* CONFIG_XENO_OPT_STATS */ + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(xntimer_destroy); + +#ifdef CONFIG_SMP + +/** + * Migrate a timer. + * + * This call migrates a timer to another cpu. In order to avoid + * pathological cases, it must be called from the CPU to which @a + * timer is currently attached. + * + * @param timer The address of the timer object to be migrated. + * + * @param sched The address of the destination per-CPU scheduler + * slot. + * + * @coretags{unrestricted, atomic-entry} + */ +void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched) +{ /* nklocked, IRQs off, sched != timer->sched */ + struct xnclock *clock; + xntimerq_t *q; + + trace_cobalt_timer_migrate(timer, xnsched_cpu(sched)); + + /* + * This assertion triggers when the timer is migrated to a CPU + * for which we do not expect any clock events/IRQs from the + * associated clock device. If so, the timer would never fire + * since clock ticks would never happen on that CPU. + */ + XENO_WARN_ON_SMP(COBALT, + !cpumask_empty(&xntimer_clock(timer)->affinity) && + !cpumask_test_cpu(xnsched_cpu(sched), + &xntimer_clock(timer)->affinity)); + + if (timer->status & XNTIMER_RUNNING) { + xntimer_stop(timer); + timer->sched = sched; + clock = xntimer_clock(timer); + q = xntimer_percpu_queue(timer); + xntimer_enqueue(timer, q); + if (xntimer_heading_p(timer)) + xnclock_remote_shot(clock, sched); + } else + timer->sched = sched; +} +EXPORT_SYMBOL_GPL(__xntimer_migrate); + +static inline int get_clock_cpu(struct xnclock *clock, int cpu) +{ + /* + * Check a CPU number against the possible set of CPUs + * receiving events from the underlying clock device. If the + * suggested CPU does not receive events from this device, + * return the first one which does instead. + * + * A global clock device with no particular IRQ affinity may + * tick on any CPU, but timers should always be queued on + * CPU0. + * + * NOTE: we have scheduler slots initialized for all online + * CPUs, we can program and receive clock ticks on any of + * them. So there is no point in restricting the valid CPU set + * to cobalt_cpu_affinity, which specifically refers to the + * set of CPUs which may run real-time threads. Although + * receiving a clock tick for waking up a thread living on a + * remote CPU is not optimal since this involves IPI-signaled + * rescheds, this is still a valid case. + */ + if (cpumask_empty(&clock->affinity)) + return 0; + + if (cpumask_test_cpu(cpu, &clock->affinity)) + return cpu; + + return cpumask_first(&clock->affinity); +} + +void __xntimer_set_affinity(struct xntimer *timer, struct xnsched *sched) +{ /* nklocked, IRQs off */ + struct xnclock *clock = xntimer_clock(timer); + int cpu; + + /* + * Figure out which CPU is best suited for managing this + * timer, preferably picking xnsched_cpu(sched) if the ticking + * device moving the timer clock beats on that CPU. Otherwise, + * pick the first CPU from the clock affinity mask if set. If + * not, the timer is backed by a global device with no + * particular IRQ affinity, so it should always be queued to + * CPU0. + */ + cpu = 0; + if (!cpumask_empty(&clock->affinity)) + cpu = get_clock_cpu(clock, xnsched_cpu(sched)); + + xntimer_migrate(timer, xnsched_struct(cpu)); +} +EXPORT_SYMBOL_GPL(__xntimer_set_affinity); + +#endif /* CONFIG_SMP */ + +/** + * Get the count of overruns for the last tick. + * + * This service returns the count of pending overruns for the last + * tick of a given timer, as measured by the difference between the + * expected expiry date of the timer and the date @a now passed as + * argument. + * + * @param timer The address of a valid timer descriptor. + * + * @param waiter The thread for which the overrun count is being + * collected. + * + * @param now current date (as + * xnclock_read_raw(xntimer_clock(timer))) + * + * @return the number of overruns of @a timer at date @a now + * + * @coretags{unrestricted, atomic-entry} + */ +unsigned long long xntimer_get_overruns(struct xntimer *timer, + struct xnthread *waiter, + xnticks_t now) +{ + xnticks_t period = timer->interval; + unsigned long long overruns = 0; + xnsticks_t delta; + xntimerq_t *q; + + atomic_only(); + + delta = now - xntimer_pexpect(timer); + if (unlikely(delta >= (xnsticks_t) period)) { + period = timer->interval_ns; + delta = xnclock_ticks_to_ns(xntimer_clock(timer), delta); + overruns = xnarch_div64(delta, period); + timer->pexpect_ticks += overruns; + if (xntimer_running_p(timer)) { + XENO_BUG_ON(COBALT, (timer->status & + (XNTIMER_DEQUEUED|XNTIMER_PERIODIC)) + != XNTIMER_PERIODIC); + q = xntimer_percpu_queue(timer); + xntimer_dequeue(timer, q); + while (xntimerh_date(&timer->aplink) < now) { + timer->periodic_ticks++; + xntimer_update_date(timer); + } + xntimer_enqueue_and_program(timer, q); + } + } + + timer->pexpect_ticks++; + + /* Hide overruns due to the most recent ptracing session. */ + if (xnthread_test_localinfo(waiter, XNHICCUP)) + return 0; + + return overruns; +} +EXPORT_SYMBOL_GPL(xntimer_get_overruns); + +char *xntimer_format_time(xnticks_t ns, char *buf, size_t bufsz) +{ + unsigned long ms, us, rem; + int len = (int)bufsz; + char *p = buf; + xnticks_t sec; + + if (ns == 0 && bufsz > 1) { + strcpy(buf, "-"); + return buf; + } + + sec = xnclock_divrem_billion(ns, &rem); + us = rem / 1000; + ms = us / 1000; + us %= 1000; + + if (sec) { + p += ksformat(p, bufsz, "%Lus", sec); + len = bufsz - (p - buf); + } + + if (len > 0 && (ms || (sec && us))) { + p += ksformat(p, bufsz - (p - buf), "%lums", ms); + len = bufsz - (p - buf); + } + + if (len > 0 && us) + p += ksformat(p, bufsz - (p - buf), "%luus", us); + + return buf; +} +EXPORT_SYMBOL_GPL(xntimer_format_time); + +#if defined(CONFIG_XENO_OPT_TIMER_RBTREE) +static inline bool xntimerh_is_lt(xntimerh_t *left, xntimerh_t *right) +{ + return left->date < right->date + || (left->date == right->date && left->prio > right->prio); +} + +void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder) +{ + struct rb_node **new = &q->root.rb_node, *parent = NULL; + + if (!q->head) + q->head = holder; + else if (xntimerh_is_lt(holder, q->head)) { + parent = &q->head->link; + new = &parent->rb_left; + q->head = holder; + } else while (*new) { + xntimerh_t *i = container_of(*new, xntimerh_t, link); + + parent = *new; + if (xntimerh_is_lt(holder, i)) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + rb_link_node(&holder->link, parent, new); + rb_insert_color(&holder->link, &q->root); +} +#endif + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h new file mode 100644 index 0000000..d98787c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h @@ -0,0 +1,908 @@ +/* + * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>. + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cobalt_core + +#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COBALT_CORE_H + +#include <linux/tracepoint.h> +#include <linux/math64.h> +#include <cobalt/kernel/timer.h> +#include <cobalt/kernel/registry.h> +#include <cobalt/uapi/kernel/types.h> + +struct xnsched; +struct xnthread; +struct xnsynch; +struct xnsched_class; +struct xnsched_quota_group; +struct xnthread_init_attr; + +DECLARE_EVENT_CLASS(thread_event, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(unsigned long, state) + __field(unsigned long, info) + ), + + TP_fast_assign( + __entry->state = thread->state; + __entry->info = thread->info; + __entry->pid = xnthread_host_pid(thread); + ), + + TP_printk("pid=%d state=0x%lx info=0x%lx", + __entry->pid, __entry->state, __entry->info) +); + +DECLARE_EVENT_CLASS(curr_thread_event, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(unsigned long, state) + __field(unsigned long, info) + ), + + TP_fast_assign( + __entry->state = thread->state; + __entry->info = thread->info; + ), + + TP_printk("state=0x%lx info=0x%lx", + __entry->state, __entry->info) +); + +DECLARE_EVENT_CLASS(synch_wait_event, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch), + + TP_STRUCT__entry( + __field(struct xnsynch *, synch) + ), + + TP_fast_assign( + __entry->synch = synch; + ), + + TP_printk("synch=%p", __entry->synch) +); + +DECLARE_EVENT_CLASS(synch_post_event, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch), + + TP_STRUCT__entry( + __field(struct xnsynch *, synch) + ), + + TP_fast_assign( + __entry->synch = synch; + ), + + TP_printk("synch=%p", __entry->synch) +); + +DECLARE_EVENT_CLASS(irq_event, + TP_PROTO(unsigned int irq), + TP_ARGS(irq), + + TP_STRUCT__entry( + __field(unsigned int, irq) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("irq=%u", __entry->irq) +); + +DECLARE_EVENT_CLASS(clock_event, + TP_PROTO(unsigned int irq), + TP_ARGS(irq), + + TP_STRUCT__entry( + __field(unsigned int, irq) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("clock_irq=%u", __entry->irq) +); + +DECLARE_EVENT_CLASS(timer_event, + TP_PROTO(struct xntimer *timer), + TP_ARGS(timer), + + TP_STRUCT__entry( + __field(struct xntimer *, timer) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer=%p", __entry->timer) +); + +DECLARE_EVENT_CLASS(registry_event, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr), + + TP_STRUCT__entry( + __string(key, key ?: "(anon)") + __field(void *, addr) + ), + + TP_fast_assign( + __assign_str(key, key ?: "(anon)"); + __entry->addr = addr; + ), + + TP_printk("key=%s, addr=%p", __get_str(key), __entry->addr) +); + +TRACE_EVENT(cobalt_schedule, + TP_PROTO(struct xnsched *sched), + TP_ARGS(sched), + + TP_STRUCT__entry( + __field(unsigned long, status) + ), + + TP_fast_assign( + __entry->status = sched->status; + ), + + TP_printk("status=0x%lx", __entry->status) +); + +TRACE_EVENT(cobalt_schedule_remote, + TP_PROTO(struct xnsched *sched), + TP_ARGS(sched), + + TP_STRUCT__entry( + __field(unsigned long, status) + ), + + TP_fast_assign( + __entry->status = sched->status; + ), + + TP_printk("status=0x%lx", __entry->status) +); + +TRACE_EVENT(cobalt_switch_context, + TP_PROTO(struct xnthread *prev, struct xnthread *next), + TP_ARGS(prev, next), + + TP_STRUCT__entry( + __field(struct xnthread *, prev) + __string(prev_name, prev->name) + __field(pid_t, prev_pid) + __field(int, prev_prio) + __field(unsigned long, prev_state) + __field(struct xnthread *, next) + __string(next_name, next->name) + __field(pid_t, next_pid) + __field(int, next_prio) + ), + + TP_fast_assign( + __entry->prev = prev; + __assign_str(prev_name, prev->name); + __entry->prev_pid = xnthread_host_pid(prev); + __entry->prev_prio = xnthread_current_priority(prev); + __entry->prev_state = prev->state; + __entry->next = next; + __assign_str(next_name, next->name); + __entry->next_pid = xnthread_host_pid(next); + __entry->next_prio = xnthread_current_priority(next); + ), + + TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d", + __get_str(prev_name), __entry->prev_pid, + __entry->prev_prio, __entry->prev_state, + __get_str(next_name), __entry->next_pid, __entry->next_prio) +); + +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + +TRACE_EVENT(cobalt_schedquota_refill, + TP_PROTO(int dummy), + TP_ARGS(dummy), + + TP_STRUCT__entry( + __field(int, dummy) + ), + + TP_fast_assign( + (void)dummy; + ), + + TP_printk("%s", "") +); + +DECLARE_EVENT_CLASS(schedquota_group_event, + TP_PROTO(struct xnsched_quota_group *tg), + TP_ARGS(tg), + + TP_STRUCT__entry( + __field(int, tgid) + ), + + TP_fast_assign( + __entry->tgid = tg->tgid; + ), + + TP_printk("tgid=%d", + __entry->tgid) +); + +DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group, + TP_PROTO(struct xnsched_quota_group *tg), + TP_ARGS(tg) +); + +DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group, + TP_PROTO(struct xnsched_quota_group *tg), + TP_ARGS(tg) +); + +TRACE_EVENT(cobalt_schedquota_set_limit, + TP_PROTO(struct xnsched_quota_group *tg, + int percent, + int peak_percent), + TP_ARGS(tg, percent, peak_percent), + + TP_STRUCT__entry( + __field(int, tgid) + __field(int, percent) + __field(int, peak_percent) + ), + + TP_fast_assign( + __entry->tgid = tg->tgid; + __entry->percent = percent; + __entry->peak_percent = peak_percent; + ), + + TP_printk("tgid=%d percent=%d peak_percent=%d", + __entry->tgid, __entry->percent, __entry->peak_percent) +); + +DECLARE_EVENT_CLASS(schedquota_thread_event, + TP_PROTO(struct xnsched_quota_group *tg, + struct xnthread *thread), + TP_ARGS(tg, thread), + + TP_STRUCT__entry( + __field(int, tgid) + __field(struct xnthread *, thread) + __field(pid_t, pid) + ), + + TP_fast_assign( + __entry->tgid = tg->tgid; + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + ), + + TP_printk("tgid=%d thread=%p pid=%d", + __entry->tgid, __entry->thread, __entry->pid) +); + +DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread, + TP_PROTO(struct xnsched_quota_group *tg, + struct xnthread *thread), + TP_ARGS(tg, thread) +); + +DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread, + TP_PROTO(struct xnsched_quota_group *tg, + struct xnthread *thread), + TP_ARGS(tg, thread) +); + +#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */ + +TRACE_EVENT(cobalt_thread_init, + TP_PROTO(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched_class *sched_class), + TP_ARGS(thread, attr, sched_class), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __string(thread_name, thread->name) + __string(class_name, sched_class->name) + __field(unsigned long, flags) + __field(int, cprio) + ), + + TP_fast_assign( + __entry->thread = thread; + __assign_str(thread_name, thread->name); + __entry->flags = attr->flags; + __assign_str(class_name, sched_class->name); + __entry->cprio = thread->cprio; + ), + + TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d", + __entry->thread, __get_str(thread_name), __entry->flags, + __get_str(class_name), __entry->cprio) +); + +TRACE_EVENT(cobalt_thread_suspend, + TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout, + xntmode_t timeout_mode, struct xnsynch *wchan), + TP_ARGS(thread, mask, timeout, timeout_mode, wchan), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(unsigned long, mask) + __field(xnticks_t, timeout) + __field(xntmode_t, timeout_mode) + __field(struct xnsynch *, wchan) + ), + + TP_fast_assign( + __entry->pid = xnthread_host_pid(thread); + __entry->mask = mask; + __entry->timeout = timeout; + __entry->timeout_mode = timeout_mode; + __entry->wchan = wchan; + ), + + TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p", + __entry->pid, __entry->mask, + __entry->timeout, __entry->timeout_mode, __entry->wchan) +); + +TRACE_EVENT(cobalt_thread_resume, + TP_PROTO(struct xnthread *thread, unsigned long mask), + TP_ARGS(thread, mask), + + TP_STRUCT__entry( + __string(name, thread->name) + __field(pid_t, pid) + __field(unsigned long, mask) + ), + + TP_fast_assign( + __assign_str(name, thread->name); + __entry->pid = xnthread_host_pid(thread); + __entry->mask = mask; + ), + + TP_printk("name=%s pid=%d mask=0x%lx", + __get_str(name), __entry->pid, __entry->mask) +); + +TRACE_EVENT(cobalt_thread_fault, + TP_PROTO(unsigned long ip, unsigned int type), + TP_ARGS(ip, type), + + TP_STRUCT__entry( + __field(unsigned long, ip) + __field(unsigned int, type) + ), + + TP_fast_assign( + __entry->ip = ip; + __entry->type = type; + ), + + TP_printk("ip=%#lx type=%#x", + __entry->ip, __entry->type) +); + +TRACE_EVENT(cobalt_thread_set_current_prio, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(pid_t, pid) + __field(int, cprio) + ), + + TP_fast_assign( + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + __entry->cprio = xnthread_current_priority(thread); + ), + + TP_printk("thread=%p pid=%d prio=%d", + __entry->thread, __entry->pid, __entry->cprio) +); + +DEFINE_EVENT(thread_event, cobalt_thread_start, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(thread_event, cobalt_thread_cancel, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(thread_event, cobalt_thread_join, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(thread_event, cobalt_thread_unblock, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +TRACE_EVENT(cobalt_thread_migrate, + TP_PROTO(unsigned int cpu), + TP_ARGS(cpu), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + ), + + TP_fast_assign( + __entry->cpu = cpu; + ), + + TP_printk("cpu=%u", __entry->cpu) +); + +TRACE_EVENT(cobalt_thread_migrate_passive, + TP_PROTO(struct xnthread *thread, unsigned int cpu), + TP_ARGS(thread, cpu), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(pid_t, pid) + __field(unsigned int, cpu) + ), + + TP_fast_assign( + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + __entry->cpu = cpu; + ), + + TP_printk("thread=%p pid=%d cpu=%u", + __entry->thread, __entry->pid, __entry->cpu) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +#define cobalt_print_relax_reason(reason) \ + __print_symbolic(reason, \ + { SIGDEBUG_UNDEFINED, "undefined" }, \ + { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \ + { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \ + { SIGDEBUG_MIGRATE_FAULT, "fault" }) + +TRACE_EVENT(cobalt_shadow_gorelax, + TP_PROTO(int reason), + TP_ARGS(reason), + + TP_STRUCT__entry( + __field(int, reason) + ), + + TP_fast_assign( + __entry->reason = reason; + ), + + TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason)) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +TRACE_EVENT(cobalt_shadow_map, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(pid_t, pid) + __field(int, prio) + ), + + TP_fast_assign( + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + __entry->prio = xnthread_base_priority(thread); + ), + + TP_printk("thread=%p pid=%d prio=%d", + __entry->thread, __entry->pid, __entry->prio) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +TRACE_EVENT(cobalt_lostage_request, + TP_PROTO(const char *type, struct task_struct *task), + TP_ARGS(type, task), + + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + __field(const char *, type) + ), + + TP_fast_assign( + __entry->type = type; + __entry->pid = task_pid_nr(task); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + ), + + TP_printk("request=%s pid=%d comm=%s", + __entry->type, __entry->pid, __entry->comm) +); + +TRACE_EVENT(cobalt_lostage_wakeup, + TP_PROTO(struct task_struct *task), + TP_ARGS(task), + + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + ), + + TP_fast_assign( + __entry->pid = task_pid_nr(task); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + ), + + TP_printk("pid=%d comm=%s", + __entry->pid, __entry->comm) +); + +TRACE_EVENT(cobalt_lostage_signal, + TP_PROTO(struct task_struct *task, int sig), + TP_ARGS(task, sig), + + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + __field(int, sig) + ), + + TP_fast_assign( + __entry->pid = task_pid_nr(task); + __entry->sig = sig; + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + ), + + TP_printk("pid=%d comm=%s sig=%d", + __entry->pid, __entry->comm, __entry->sig) +); + +DEFINE_EVENT(irq_event, cobalt_irq_entry, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_exit, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_attach, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_detach, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_enable, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_disable, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(clock_event, cobalt_clock_entry, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(clock_event, cobalt_clock_exit, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(timer_event, cobalt_timer_stop, + TP_PROTO(struct xntimer *timer), + TP_ARGS(timer) +); + +DEFINE_EVENT(timer_event, cobalt_timer_expire, + TP_PROTO(struct xntimer *timer), + TP_ARGS(timer) +); + +#define cobalt_print_timer_mode(mode) \ + __print_symbolic(mode, \ + { XN_RELATIVE, "rel" }, \ + { XN_ABSOLUTE, "abs" }, \ + { XN_REALTIME, "rt" }) + +TRACE_EVENT(cobalt_timer_start, + TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval, + xntmode_t mode), + TP_ARGS(timer, value, interval, mode), + + TP_STRUCT__entry( + __field(struct xntimer *, timer) +#ifdef CONFIG_XENO_OPT_STATS + __string(name, timer->name) +#endif + __field(xnticks_t, value) + __field(xnticks_t, interval) + __field(xntmode_t, mode) + ), + + TP_fast_assign( + __entry->timer = timer; +#ifdef CONFIG_XENO_OPT_STATS + __assign_str(name, timer->name); +#endif + __entry->value = value; + __entry->interval = interval; + __entry->mode = mode; + ), + + TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s", + __entry->timer, +#ifdef CONFIG_XENO_OPT_STATS + __get_str(name), +#else + "(anon)", +#endif + __entry->value, __entry->interval, + cobalt_print_timer_mode(__entry->mode)) +); + +#ifdef CONFIG_SMP + +TRACE_EVENT(cobalt_timer_migrate, + TP_PROTO(struct xntimer *timer, unsigned int cpu), + TP_ARGS(timer, cpu), + + TP_STRUCT__entry( + __field(struct xntimer *, timer) + __field(unsigned int, cpu) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->cpu = cpu; + ), + + TP_printk("timer=%p cpu=%u", + __entry->timer, __entry->cpu) +); + +#endif /* CONFIG_SMP */ + +DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_release, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_flush, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_forget, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(registry_event, cobalt_registry_enter, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr) +); + +DEFINE_EVENT(registry_event, cobalt_registry_remove, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr) +); + +DEFINE_EVENT(registry_event, cobalt_registry_unlink, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr) +); + +TRACE_EVENT(cobalt_tick_shot, + TP_PROTO(s64 delta), + TP_ARGS(delta), + + TP_STRUCT__entry( + __field(u64, secs) + __field(u32, nsecs) + __field(s64, delta) + ), + + TP_fast_assign( + __entry->delta = delta; + __entry->secs = div_u64_rem(trace_clock_local() + delta, + NSEC_PER_SEC, &__entry->nsecs); + ), + + TP_printk("next tick at %Lu.%06u (delay: %Ld us)", + (unsigned long long)__entry->secs, + __entry->nsecs / 1000, div_s64(__entry->delta, 1000)) +); + +TRACE_EVENT(cobalt_trace, + TP_PROTO(const char *msg), + TP_ARGS(msg), + TP_STRUCT__entry( + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(msg, msg); + ), + TP_printk("%s", __get_str(msg)) +); + +TRACE_EVENT(cobalt_trace_longval, + TP_PROTO(int id, u64 val), + TP_ARGS(id, val), + TP_STRUCT__entry( + __field(int, id) + __field(u64, val) + ), + TP_fast_assign( + __entry->id = id; + __entry->val = val; + ), + TP_printk("id=%#x, v=%llu", __entry->id, __entry->val) +); + +TRACE_EVENT(cobalt_trace_pid, + TP_PROTO(pid_t pid, int prio), + TP_ARGS(pid, prio), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(int, prio) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->prio = prio; + ), + TP_printk("pid=%d, prio=%d", __entry->pid, __entry->prio) +); + +TRACE_EVENT(cobalt_latpeak, + TP_PROTO(int latmax_ns), + TP_ARGS(latmax_ns), + TP_STRUCT__entry( + __field(int, latmax_ns) + ), + TP_fast_assign( + __entry->latmax_ns = latmax_ns; + ), + TP_printk("** latency peak: %d.%.3d us **", + __entry->latmax_ns / 1000, + __entry->latmax_ns % 1000) +); + +/* Basically cobalt_trace() + trigger point */ +TRACE_EVENT(cobalt_trigger, + TP_PROTO(const char *issuer), + TP_ARGS(issuer), + TP_STRUCT__entry( + __string(issuer, issuer) + ), + TP_fast_assign( + __assign_str(issuer, issuer); + ), + TP_printk("%s", __get_str(issuer)) +); + +#endif /* _TRACE_COBALT_CORE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cobalt-core +#include <trace/define_trace.h> diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h new file mode 100644 index 0000000..2bc004d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h @@ -0,0 +1,1186 @@ +/* + * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>. + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cobalt_posix + +#if !defined(_TRACE_COBALT_POSIX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COBALT_POSIX_H + +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> +#include <xenomai/posix/cond.h> +#include <xenomai/posix/mqueue.h> +#include <xenomai/posix/event.h> + +#define __timespec_fields(__name) \ + __field(time64_t, tv_sec_##__name) \ + __field(long, tv_nsec_##__name) + +#define __assign_timespec(__to, __from) \ + do { \ + __entry->tv_sec_##__to = (__from)->tv_sec; \ + __entry->tv_nsec_##__to = (__from)->tv_nsec; \ + } while (0) + +#define __timespec_args(__name) \ + (long long)__entry->tv_sec_##__name, __entry->tv_nsec_##__name + +#ifdef CONFIG_IA32_EMULATION +#define __sc_compat(__name) , { sc_cobalt_##__name + __COBALT_IA32_BASE, "compat-" #__name } +#else +#define __sc_compat(__name) +#endif + +#define __cobalt_symbolic_syscall(__name) \ + { sc_cobalt_##__name, #__name } \ + __sc_compat(__name) \ + +#define __cobalt_syscall_name(__nr) \ + __print_symbolic((__nr), \ + __cobalt_symbolic_syscall(bind), \ + __cobalt_symbolic_syscall(thread_create), \ + __cobalt_symbolic_syscall(thread_getpid), \ + __cobalt_symbolic_syscall(thread_setmode), \ + __cobalt_symbolic_syscall(thread_setname), \ + __cobalt_symbolic_syscall(thread_join), \ + __cobalt_symbolic_syscall(thread_kill), \ + __cobalt_symbolic_syscall(thread_setschedparam_ex), \ + __cobalt_symbolic_syscall(thread_getschedparam_ex), \ + __cobalt_symbolic_syscall(thread_setschedprio), \ + __cobalt_symbolic_syscall(thread_getstat), \ + __cobalt_symbolic_syscall(sem_init), \ + __cobalt_symbolic_syscall(sem_destroy), \ + __cobalt_symbolic_syscall(sem_post), \ + __cobalt_symbolic_syscall(sem_wait), \ + __cobalt_symbolic_syscall(sem_trywait), \ + __cobalt_symbolic_syscall(sem_getvalue), \ + __cobalt_symbolic_syscall(sem_open), \ + __cobalt_symbolic_syscall(sem_close), \ + __cobalt_symbolic_syscall(sem_unlink), \ + __cobalt_symbolic_syscall(sem_timedwait), \ + __cobalt_symbolic_syscall(sem_inquire), \ + __cobalt_symbolic_syscall(sem_broadcast_np), \ + __cobalt_symbolic_syscall(clock_getres), \ + __cobalt_symbolic_syscall(clock_gettime), \ + __cobalt_symbolic_syscall(clock_settime), \ + __cobalt_symbolic_syscall(clock_nanosleep), \ + __cobalt_symbolic_syscall(mutex_init), \ + __cobalt_symbolic_syscall(mutex_check_init), \ + __cobalt_symbolic_syscall(mutex_destroy), \ + __cobalt_symbolic_syscall(mutex_lock), \ + __cobalt_symbolic_syscall(mutex_timedlock), \ + __cobalt_symbolic_syscall(mutex_trylock), \ + __cobalt_symbolic_syscall(mutex_unlock), \ + __cobalt_symbolic_syscall(cond_init), \ + __cobalt_symbolic_syscall(cond_destroy), \ + __cobalt_symbolic_syscall(cond_wait_prologue), \ + __cobalt_symbolic_syscall(cond_wait_epilogue), \ + __cobalt_symbolic_syscall(mq_open), \ + __cobalt_symbolic_syscall(mq_close), \ + __cobalt_symbolic_syscall(mq_unlink), \ + __cobalt_symbolic_syscall(mq_getattr), \ + __cobalt_symbolic_syscall(mq_timedsend), \ + __cobalt_symbolic_syscall(mq_timedreceive), \ + __cobalt_symbolic_syscall(mq_notify), \ + __cobalt_symbolic_syscall(sched_minprio), \ + __cobalt_symbolic_syscall(sched_maxprio), \ + __cobalt_symbolic_syscall(sched_weightprio), \ + __cobalt_symbolic_syscall(sched_yield), \ + __cobalt_symbolic_syscall(sched_setscheduler_ex), \ + __cobalt_symbolic_syscall(sched_getscheduler_ex), \ + __cobalt_symbolic_syscall(sched_setconfig_np), \ + __cobalt_symbolic_syscall(sched_getconfig_np), \ + __cobalt_symbolic_syscall(timer_create), \ + __cobalt_symbolic_syscall(timer_delete), \ + __cobalt_symbolic_syscall(timer_settime), \ + __cobalt_symbolic_syscall(timer_gettime), \ + __cobalt_symbolic_syscall(timer_getoverrun), \ + __cobalt_symbolic_syscall(timerfd_create), \ + __cobalt_symbolic_syscall(timerfd_settime), \ + __cobalt_symbolic_syscall(timerfd_gettime), \ + __cobalt_symbolic_syscall(sigwait), \ + __cobalt_symbolic_syscall(sigwaitinfo), \ + __cobalt_symbolic_syscall(sigtimedwait), \ + __cobalt_symbolic_syscall(sigpending), \ + __cobalt_symbolic_syscall(kill), \ + __cobalt_symbolic_syscall(sigqueue), \ + __cobalt_symbolic_syscall(monitor_init), \ + __cobalt_symbolic_syscall(monitor_destroy), \ + __cobalt_symbolic_syscall(monitor_enter), \ + __cobalt_symbolic_syscall(monitor_wait), \ + __cobalt_symbolic_syscall(monitor_sync), \ + __cobalt_symbolic_syscall(monitor_exit), \ + __cobalt_symbolic_syscall(event_init), \ + __cobalt_symbolic_syscall(event_wait), \ + __cobalt_symbolic_syscall(event_sync), \ + __cobalt_symbolic_syscall(event_destroy), \ + __cobalt_symbolic_syscall(event_inquire), \ + __cobalt_symbolic_syscall(open), \ + __cobalt_symbolic_syscall(socket), \ + __cobalt_symbolic_syscall(close), \ + __cobalt_symbolic_syscall(ioctl), \ + __cobalt_symbolic_syscall(read), \ + __cobalt_symbolic_syscall(write), \ + __cobalt_symbolic_syscall(recvmsg), \ + __cobalt_symbolic_syscall(sendmsg), \ + __cobalt_symbolic_syscall(mmap), \ + __cobalt_symbolic_syscall(select), \ + __cobalt_symbolic_syscall(fcntl), \ + __cobalt_symbolic_syscall(migrate), \ + __cobalt_symbolic_syscall(archcall), \ + __cobalt_symbolic_syscall(trace), \ + __cobalt_symbolic_syscall(corectl), \ + __cobalt_symbolic_syscall(get_current), \ + __cobalt_symbolic_syscall(backtrace), \ + __cobalt_symbolic_syscall(serialdbg), \ + __cobalt_symbolic_syscall(extend), \ + __cobalt_symbolic_syscall(ftrace_puts), \ + __cobalt_symbolic_syscall(recvmmsg), \ + __cobalt_symbolic_syscall(sendmmsg), \ + __cobalt_symbolic_syscall(clock_adjtime), \ + __cobalt_symbolic_syscall(sem_timedwait64), \ + __cobalt_symbolic_syscall(clock_gettime64), \ + __cobalt_symbolic_syscall(clock_settime64), \ + __cobalt_symbolic_syscall(clock_nanosleep64), \ + __cobalt_symbolic_syscall(clock_getres64), \ + __cobalt_symbolic_syscall(clock_adjtime64), \ + __cobalt_symbolic_syscall(mutex_timedlock64), \ + __cobalt_symbolic_syscall(mq_timedsend64), \ + __cobalt_symbolic_syscall(mq_timedreceive64), \ + __cobalt_symbolic_syscall(sigtimedwait64), \ + __cobalt_symbolic_syscall(monitor_wait64), \ + __cobalt_symbolic_syscall(event_wait64), \ + __cobalt_symbolic_syscall(recvmmsg64)) + +DECLARE_EVENT_CLASS(cobalt_syscall_entry, + TP_PROTO(unsigned int nr), + TP_ARGS(nr), + + TP_STRUCT__entry( + __field(unsigned int, nr) + ), + + TP_fast_assign( + __entry->nr = nr; + ), + + TP_printk("syscall=%s", __cobalt_syscall_name(__entry->nr)) +); + +DECLARE_EVENT_CLASS(cobalt_syscall_exit, + TP_PROTO(long result), + TP_ARGS(result), + + TP_STRUCT__entry( + __field(long, result) + ), + + TP_fast_assign( + __entry->result = result; + ), + + TP_printk("result=%ld", __entry->result) +); + +#define cobalt_print_sched_policy(__policy) \ + __print_symbolic(__policy, \ + {SCHED_NORMAL, "normal"}, \ + {SCHED_FIFO, "fifo"}, \ + {SCHED_RR, "rr"}, \ + {SCHED_TP, "tp"}, \ + {SCHED_QUOTA, "quota"}, \ + {SCHED_SPORADIC, "sporadic"}, \ + {SCHED_COBALT, "cobalt"}, \ + {SCHED_WEAK, "weak"}) + +const char *cobalt_trace_parse_sched_params(struct trace_seq *, int, + struct sched_param_ex *); + +#define __parse_sched_params(policy, params) \ + cobalt_trace_parse_sched_params(p, policy, \ + (struct sched_param_ex *)(params)) + +DECLARE_EVENT_CLASS(cobalt_posix_schedparam, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex), + + TP_STRUCT__entry( + __field(unsigned long, pth) + __field(int, policy) + __dynamic_array(char, param_ex, sizeof(struct sched_param_ex)) + ), + + TP_fast_assign( + __entry->pth = pth; + __entry->policy = policy; + memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex)); + ), + + TP_printk("pth=%p policy=%s param={ %s }", + (void *)__entry->pth, + cobalt_print_sched_policy(__entry->policy), + __parse_sched_params(__entry->policy, + __get_dynamic_array(param_ex)) + ) +); + +DECLARE_EVENT_CLASS(cobalt_posix_scheduler, + TP_PROTO(pid_t pid, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pid, policy, param_ex), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(int, policy) + __dynamic_array(char, param_ex, sizeof(struct sched_param_ex)) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->policy = policy; + memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex)); + ), + + TP_printk("pid=%d policy=%s param={ %s }", + __entry->pid, + cobalt_print_sched_policy(__entry->policy), + __parse_sched_params(__entry->policy, + __get_dynamic_array(param_ex)) + ) +); + +DECLARE_EVENT_CLASS(cobalt_void, + TP_PROTO(int dummy), + TP_ARGS(dummy), + TP_STRUCT__entry( + __field(int, dummy) + ), + TP_fast_assign( + (void)dummy; + ), + TP_printk("%s", "") +); + +DEFINE_EVENT(cobalt_syscall_entry, cobalt_head_sysentry, + TP_PROTO(unsigned int nr), + TP_ARGS(nr) +); + +DEFINE_EVENT(cobalt_syscall_exit, cobalt_head_sysexit, + TP_PROTO(long result), + TP_ARGS(result) +); + +DEFINE_EVENT(cobalt_syscall_entry, cobalt_root_sysentry, + TP_PROTO(unsigned int nr), + TP_ARGS(nr) +); + +DEFINE_EVENT(cobalt_syscall_exit, cobalt_root_sysexit, + TP_PROTO(long result), + TP_ARGS(result) +); + +DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_create, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex) +); + +DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_setschedparam, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex) +); + +DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_getschedparam, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex) +); + +TRACE_EVENT(cobalt_pthread_setschedprio, + TP_PROTO(unsigned long pth, int prio), + TP_ARGS(pth, prio), + TP_STRUCT__entry( + __field(unsigned long, pth) + __field(int, prio) + ), + TP_fast_assign( + __entry->pth = pth; + __entry->prio = prio; + ), + TP_printk("pth=%p prio=%d", (void *)__entry->pth, __entry->prio) +); + +#define cobalt_print_thread_mode(__mode) \ + __print_flags(__mode, "|", \ + {PTHREAD_WARNSW, "warnsw"}, \ + {PTHREAD_LOCK_SCHED, "lock"}, \ + {PTHREAD_DISABLE_LOCKBREAK, "nolockbreak"}) + +TRACE_EVENT(cobalt_pthread_setmode, + TP_PROTO(int clrmask, int setmask), + TP_ARGS(clrmask, setmask), + TP_STRUCT__entry( + __field(int, clrmask) + __field(int, setmask) + ), + TP_fast_assign( + __entry->clrmask = clrmask; + __entry->setmask = setmask; + ), + TP_printk("clrmask=%#x(%s) setmask=%#x(%s)", + __entry->clrmask, cobalt_print_thread_mode(__entry->clrmask), + __entry->setmask, cobalt_print_thread_mode(__entry->setmask)) +); + +TRACE_EVENT(cobalt_pthread_setname, + TP_PROTO(unsigned long pth, const char *name), + TP_ARGS(pth, name), + TP_STRUCT__entry( + __field(unsigned long, pth) + __string(name, name) + ), + TP_fast_assign( + __entry->pth = pth; + __assign_str(name, name); + ), + TP_printk("pth=%p name=%s", (void *)__entry->pth, __get_str(name)) +); + +DECLARE_EVENT_CLASS(cobalt_posix_pid, + TP_PROTO(pid_t pid), + TP_ARGS(pid), + TP_STRUCT__entry( + __field(pid_t, pid) + ), + TP_fast_assign( + __entry->pid = pid; + ), + TP_printk("pid=%d", __entry->pid) +); + +DEFINE_EVENT(cobalt_posix_pid, cobalt_pthread_stat, + TP_PROTO(pid_t pid), + TP_ARGS(pid) +); + +TRACE_EVENT(cobalt_pthread_kill, + TP_PROTO(unsigned long pth, int sig), + TP_ARGS(pth, sig), + TP_STRUCT__entry( + __field(unsigned long, pth) + __field(int, sig) + ), + TP_fast_assign( + __entry->pth = pth; + __entry->sig = sig; + ), + TP_printk("pth=%p sig=%d", (void *)__entry->pth, __entry->sig) +); + +TRACE_EVENT(cobalt_pthread_join, + TP_PROTO(unsigned long pth), + TP_ARGS(pth), + TP_STRUCT__entry( + __field(unsigned long, pth) + ), + TP_fast_assign( + __entry->pth = pth; + ), + TP_printk("pth=%p", (void *)__entry->pth) +); + +TRACE_EVENT(cobalt_pthread_pid, + TP_PROTO(unsigned long pth), + TP_ARGS(pth), + TP_STRUCT__entry( + __field(unsigned long, pth) + ), + TP_fast_assign( + __entry->pth = pth; + ), + TP_printk("pth=%p", (void *)__entry->pth) +); + +TRACE_EVENT(cobalt_pthread_extend, + TP_PROTO(unsigned long pth, const char *name), + TP_ARGS(pth, name), + TP_STRUCT__entry( + __field(unsigned long, pth) + __string(name, name) + ), + TP_fast_assign( + __entry->pth = pth; + __assign_str(name, name); + ), + TP_printk("pth=%p +personality=%s", (void *)__entry->pth, __get_str(name)) +); + +TRACE_EVENT(cobalt_pthread_restrict, + TP_PROTO(unsigned long pth, const char *name), + TP_ARGS(pth, name), + TP_STRUCT__entry( + __field(unsigned long, pth) + __string(name, name) + ), + TP_fast_assign( + __entry->pth = pth; + __assign_str(name, name); + ), + TP_printk("pth=%p -personality=%s", (void *)__entry->pth, __get_str(name)) +); + +DEFINE_EVENT(cobalt_void, cobalt_pthread_yield, + TP_PROTO(int dummy), + TP_ARGS(dummy) +); + +TRACE_EVENT(cobalt_sched_setconfig, + TP_PROTO(int cpu, int policy, size_t len), + TP_ARGS(cpu, policy, len), + TP_STRUCT__entry( + __field(int, cpu) + __field(int, policy) + __field(size_t, len) + ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->policy = policy; + __entry->len = len; + ), + TP_printk("cpu=%d policy=%d(%s) len=%zu", + __entry->cpu, __entry->policy, + cobalt_print_sched_policy(__entry->policy), + __entry->len) +); + +TRACE_EVENT(cobalt_sched_get_config, + TP_PROTO(int cpu, int policy, size_t rlen), + TP_ARGS(cpu, policy, rlen), + TP_STRUCT__entry( + __field(int, cpu) + __field(int, policy) + __field(ssize_t, rlen) + ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->policy = policy; + __entry->rlen = rlen; + ), + TP_printk("cpu=%d policy=%d(%s) rlen=%Zd", + __entry->cpu, __entry->policy, + cobalt_print_sched_policy(__entry->policy), + __entry->rlen) +); + +DEFINE_EVENT(cobalt_posix_scheduler, cobalt_sched_setscheduler, + TP_PROTO(pid_t pid, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pid, policy, param_ex) +); + +DEFINE_EVENT(cobalt_posix_pid, cobalt_sched_getscheduler, + TP_PROTO(pid_t pid), + TP_ARGS(pid) +); + +DECLARE_EVENT_CLASS(cobalt_posix_prio_bound, + TP_PROTO(int policy, int prio), + TP_ARGS(policy, prio), + TP_STRUCT__entry( + __field(int, policy) + __field(int, prio) + ), + TP_fast_assign( + __entry->policy = policy; + __entry->prio = prio; + ), + TP_printk("policy=%d(%s) prio=%d", + __entry->policy, + cobalt_print_sched_policy(__entry->policy), + __entry->prio) +); + +DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_min_prio, + TP_PROTO(int policy, int prio), + TP_ARGS(policy, prio) +); + +DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_max_prio, + TP_PROTO(int policy, int prio), + TP_ARGS(policy, prio) +); + +DECLARE_EVENT_CLASS(cobalt_posix_sem, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle), + TP_STRUCT__entry( + __field(xnhandle_t, handle) + ), + TP_fast_assign( + __entry->handle = handle; + ), + TP_printk("sem=%#x", __entry->handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_wait, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_trywait, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_timedwait, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_post, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_destroy, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_broadcast, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_inquire, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +TRACE_EVENT(cobalt_psem_getvalue, + TP_PROTO(xnhandle_t handle, int value), + TP_ARGS(handle, value), + TP_STRUCT__entry( + __field(xnhandle_t, handle) + __field(int, value) + ), + TP_fast_assign( + __entry->handle = handle; + __entry->value = value; + ), + TP_printk("sem=%#x value=%d", __entry->handle, __entry->value) +); + +#define cobalt_print_sem_flags(__flags) \ + __print_flags(__flags, "|", \ + {SEM_FIFO, "fifo"}, \ + {SEM_PULSE, "pulse"}, \ + {SEM_PSHARED, "pshared"}, \ + {SEM_REPORT, "report"}, \ + {SEM_WARNDEL, "warndel"}, \ + {SEM_RAWCLOCK, "rawclock"}, \ + {SEM_NOBUSYDEL, "nobusydel"}) + +TRACE_EVENT(cobalt_psem_init, + TP_PROTO(const char *name, xnhandle_t handle, + int flags, unsigned int value), + TP_ARGS(name, handle, flags, value), + TP_STRUCT__entry( + __string(name, name) + __field(xnhandle_t, handle) + __field(int, flags) + __field(unsigned int, value) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->handle = handle; + __entry->flags = flags; + __entry->value = value; + ), + TP_printk("sem=%#x(%s) flags=%#x(%s) value=%u", + __entry->handle, + __get_str(name), + __entry->flags, + cobalt_print_sem_flags(__entry->flags), + __entry->value) +); + +TRACE_EVENT(cobalt_psem_init_failed, + TP_PROTO(const char *name, int flags, unsigned int value, int status), + TP_ARGS(name, flags, value, status), + TP_STRUCT__entry( + __string(name, name) + __field(int, flags) + __field(unsigned int, value) + __field(int, status) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->flags = flags; + __entry->value = value; + __entry->status = status; + ), + TP_printk("name=%s flags=%#x(%s) value=%u error=%d", + __get_str(name), + __entry->flags, + cobalt_print_sem_flags(__entry->flags), + __entry->value, __entry->status) +); + +#define cobalt_print_oflags(__flags) \ + __print_flags(__flags, "|", \ + {O_RDONLY, "rdonly"}, \ + {O_WRONLY, "wronly"}, \ + {O_RDWR, "rdwr"}, \ + {O_CREAT, "creat"}, \ + {O_EXCL, "excl"}, \ + {O_DIRECT, "direct"}, \ + {O_NONBLOCK, "nonblock"}, \ + {O_TRUNC, "trunc"}) + +TRACE_EVENT(cobalt_psem_open, + TP_PROTO(const char *name, xnhandle_t handle, + int oflags, mode_t mode, unsigned int value), + TP_ARGS(name, handle, oflags, mode, value), + TP_STRUCT__entry( + __string(name, name) + __field(xnhandle_t, handle) + __field(int, oflags) + __field(mode_t, mode) + __field(unsigned int, value) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->handle = handle; + __entry->oflags = oflags; + if (oflags & O_CREAT) { + __entry->mode = mode; + __entry->value = value; + } else { + __entry->mode = 0; + __entry->value = 0; + } + ), + TP_printk("named_sem=%#x=(%s) oflags=%#x(%s) mode=%o value=%u", + __entry->handle, __get_str(name), + __entry->oflags, cobalt_print_oflags(__entry->oflags), + __entry->mode, __entry->value) +); + +TRACE_EVENT(cobalt_psem_open_failed, + TP_PROTO(const char *name, int oflags, mode_t mode, + unsigned int value, int status), + TP_ARGS(name, oflags, mode, value, status), + TP_STRUCT__entry( + __string(name, name) + __field(int, oflags) + __field(mode_t, mode) + __field(unsigned int, value) + __field(int, status) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->oflags = oflags; + __entry->status = status; + if (oflags & O_CREAT) { + __entry->mode = mode; + __entry->value = value; + } else { + __entry->mode = 0; + __entry->value = 0; + } + ), + TP_printk("named_sem=%s oflags=%#x(%s) mode=%o value=%u error=%d", + __get_str(name), + __entry->oflags, cobalt_print_oflags(__entry->oflags), + __entry->mode, __entry->value, __entry->status) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_close, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +TRACE_EVENT(cobalt_psem_unlink, + TP_PROTO(const char *name), + TP_ARGS(name), + TP_STRUCT__entry( + __string(name, name) + ), + TP_fast_assign( + __assign_str(name, name); + ), + TP_printk("name=%s", __get_str(name)) +); + +DECLARE_EVENT_CLASS(cobalt_clock_timespec, + TP_PROTO(clockid_t clk_id, const struct timespec64 *val), + TP_ARGS(clk_id, val), + + TP_STRUCT__entry( + __field(clockid_t, clk_id) + __timespec_fields(val) + ), + + TP_fast_assign( + __entry->clk_id = clk_id; + __assign_timespec(val, val); + ), + + TP_printk("clock_id=%d timeval=(%lld.%09ld)", + __entry->clk_id, + __timespec_args(val) + ) +); + +DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_getres, + TP_PROTO(clockid_t clk_id, const struct timespec64 *res), + TP_ARGS(clk_id, res) +); + +DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_gettime, + TP_PROTO(clockid_t clk_id, const struct timespec64 *time), + TP_ARGS(clk_id, time) +); + +DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_settime, + TP_PROTO(clockid_t clk_id, const struct timespec64 *time), + TP_ARGS(clk_id, time) +); + +TRACE_EVENT(cobalt_clock_adjtime, + TP_PROTO(clockid_t clk_id, struct __kernel_timex *tx), + TP_ARGS(clk_id, tx), + + TP_STRUCT__entry( + __field(clockid_t, clk_id) + __field(struct __kernel_timex *, tx) + ), + + TP_fast_assign( + __entry->clk_id = clk_id; + __entry->tx = tx; + ), + + TP_printk("clock_id=%d timex=%p", + __entry->clk_id, + __entry->tx + ) +); + +#define cobalt_print_timer_flags(__flags) \ + __print_flags(__flags, "|", \ + {TIMER_ABSTIME, "TIMER_ABSTIME"}) + +TRACE_EVENT(cobalt_clock_nanosleep, + TP_PROTO(clockid_t clk_id, int flags, const struct timespec64 *time), + TP_ARGS(clk_id, flags, time), + + TP_STRUCT__entry( + __field(clockid_t, clk_id) + __field(int, flags) + __timespec_fields(time) + ), + + TP_fast_assign( + __entry->clk_id = clk_id; + __entry->flags = flags; + __assign_timespec(time, time); + ), + + TP_printk("clock_id=%d flags=%#x(%s) rqt=(%lld.%09ld)", + __entry->clk_id, + __entry->flags, cobalt_print_timer_flags(__entry->flags), + __timespec_args(time) + ) +); + +DECLARE_EVENT_CLASS(cobalt_clock_ident, + TP_PROTO(const char *name, clockid_t clk_id), + TP_ARGS(name, clk_id), + TP_STRUCT__entry( + __string(name, name) + __field(clockid_t, clk_id) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->clk_id = clk_id; + ), + TP_printk("name=%s, id=%#x", __get_str(name), __entry->clk_id) +); + +DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_register, + TP_PROTO(const char *name, clockid_t clk_id), + TP_ARGS(name, clk_id) +); + +DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_deregister, + TP_PROTO(const char *name, clockid_t clk_id), + TP_ARGS(name, clk_id) +); + +#define cobalt_print_clock(__clk_id) \ + __print_symbolic(__clk_id, \ + {CLOCK_MONOTONIC, "CLOCK_MONOTONIC"}, \ + {CLOCK_MONOTONIC_RAW, "CLOCK_MONOTONIC_RAW"}, \ + {CLOCK_REALTIME, "CLOCK_REALTIME"}) + +TRACE_EVENT(cobalt_cond_init, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_condattr *attr), + TP_ARGS(u_cnd, attr), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + __field(clockid_t, clk_id) + __field(int, pshared) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + __entry->clk_id = attr->clock; + __entry->pshared = attr->pshared; + ), + TP_printk("cond=%p attr={ .clock=%s, .pshared=%d }", + __entry->u_cnd, + cobalt_print_clock(__entry->clk_id), + __entry->pshared) +); + +TRACE_EVENT(cobalt_cond_destroy, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd), + TP_ARGS(u_cnd), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + ), + TP_printk("cond=%p", __entry->u_cnd) +); + +TRACE_EVENT(cobalt_cond_timedwait, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_mutex_shadow __user *u_mx, + const struct timespec64 *timeout), + TP_ARGS(u_cnd, u_mx, timeout), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + __field(const struct cobalt_mutex_shadow __user *, u_mx) + __timespec_fields(timeout) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + __entry->u_mx = u_mx; + __assign_timespec(timeout, timeout); + ), + TP_printk("cond=%p, mutex=%p, timeout=(%lld.%09ld)", + __entry->u_cnd, __entry->u_mx, __timespec_args(timeout)) +); + +TRACE_EVENT(cobalt_cond_wait, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_mutex_shadow __user *u_mx), + TP_ARGS(u_cnd, u_mx), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + __field(const struct cobalt_mutex_shadow __user *, u_mx) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + __entry->u_mx = u_mx; + ), + TP_printk("cond=%p, mutex=%p", + __entry->u_cnd, __entry->u_mx) +); + +TRACE_EVENT(cobalt_mq_open, + TP_PROTO(const char *name, int oflags, mode_t mode), + TP_ARGS(name, oflags, mode), + + TP_STRUCT__entry( + __string(name, name) + __field(int, oflags) + __field(mode_t, mode) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->oflags = oflags; + __entry->mode = (oflags & O_CREAT) ? mode : 0; + ), + + TP_printk("name=%s oflags=%#x(%s) mode=%o", + __get_str(name), + __entry->oflags, cobalt_print_oflags(__entry->oflags), + __entry->mode) +); + +TRACE_EVENT(cobalt_mq_notify, + TP_PROTO(mqd_t mqd, const struct sigevent *sev), + TP_ARGS(mqd, sev), + + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(int, signo) + ), + + TP_fast_assign( + __entry->mqd = mqd; + __entry->signo = sev && sev->sigev_notify != SIGEV_NONE ? + sev->sigev_signo : 0; + ), + + TP_printk("mqd=%d signo=%d", + __entry->mqd, __entry->signo) +); + +TRACE_EVENT(cobalt_mq_close, + TP_PROTO(mqd_t mqd), + TP_ARGS(mqd), + + TP_STRUCT__entry( + __field(mqd_t, mqd) + ), + + TP_fast_assign( + __entry->mqd = mqd; + ), + + TP_printk("mqd=%d", __entry->mqd) +); + +TRACE_EVENT(cobalt_mq_unlink, + TP_PROTO(const char *name), + TP_ARGS(name), + + TP_STRUCT__entry( + __string(name, name) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("name=%s", __get_str(name)) +); + +TRACE_EVENT(cobalt_mq_send, + TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len, + unsigned int prio), + TP_ARGS(mqd, u_buf, len, prio), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(const void __user *, u_buf) + __field(size_t, len) + __field(unsigned int, prio) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->u_buf = u_buf; + __entry->len = len; + __entry->prio = prio; + ), + TP_printk("mqd=%d buf=%p len=%zu prio=%u", + __entry->mqd, __entry->u_buf, __entry->len, + __entry->prio) +); + +TRACE_EVENT(cobalt_mq_timedreceive, + TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len, + const struct timespec64 *timeout), + TP_ARGS(mqd, u_buf, len, timeout), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(const void __user *, u_buf) + __field(size_t, len) + __timespec_fields(timeout) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->u_buf = u_buf; + __entry->len = len; + __assign_timespec(timeout, timeout); + ), + TP_printk("mqd=%d buf=%p len=%zu timeout=(%lld.%09ld)", + __entry->mqd, __entry->u_buf, __entry->len, + __timespec_args(timeout)) +); + +TRACE_EVENT(cobalt_mq_receive, + TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len), + TP_ARGS(mqd, u_buf, len), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(const void __user *, u_buf) + __field(size_t, len) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->u_buf = u_buf; + __entry->len = len; + ), + TP_printk("mqd=%d buf=%p len=%zu", + __entry->mqd, __entry->u_buf, __entry->len) +); + +DECLARE_EVENT_CLASS(cobalt_posix_mqattr, + TP_PROTO(mqd_t mqd, const struct mq_attr *attr), + TP_ARGS(mqd, attr), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(long, flags) + __field(long, curmsgs) + __field(long, msgsize) + __field(long, maxmsg) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->flags = attr->mq_flags; + __entry->curmsgs = attr->mq_curmsgs; + __entry->msgsize = attr->mq_msgsize; + __entry->maxmsg = attr->mq_maxmsg; + ), + TP_printk("mqd=%d flags=%#lx(%s) curmsgs=%ld msgsize=%ld maxmsg=%ld", + __entry->mqd, + __entry->flags, cobalt_print_oflags(__entry->flags), + __entry->curmsgs, + __entry->msgsize, + __entry->maxmsg + ) +); + +DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_getattr, + TP_PROTO(mqd_t mqd, const struct mq_attr *attr), + TP_ARGS(mqd, attr) +); + +DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_setattr, + TP_PROTO(mqd_t mqd, const struct mq_attr *attr), + TP_ARGS(mqd, attr) +); + +#define cobalt_print_evflags(__flags) \ + __print_flags(__flags, "|", \ + {COBALT_EVENT_SHARED, "shared"}, \ + {COBALT_EVENT_PRIO, "prio"}) + +TRACE_EVENT(cobalt_event_init, + TP_PROTO(const struct cobalt_event_shadow __user *u_event, + unsigned long value, int flags), + TP_ARGS(u_event, value, flags), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + __field(unsigned long, value) + __field(int, flags) + ), + TP_fast_assign( + __entry->u_event = u_event; + __entry->value = value; + __entry->flags = flags; + ), + TP_printk("event=%p value=%lu flags=%#x(%s)", + __entry->u_event, __entry->value, + __entry->flags, cobalt_print_evflags(__entry->flags)) +); + +#define cobalt_print_evmode(__mode) \ + __print_symbolic(__mode, \ + {COBALT_EVENT_ANY, "any"}, \ + {COBALT_EVENT_ALL, "all"}) + +TRACE_EVENT(cobalt_event_timedwait, + TP_PROTO(const struct cobalt_event_shadow __user *u_event, + unsigned long bits, int mode, + const struct timespec64 *timeout), + TP_ARGS(u_event, bits, mode, timeout), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + __field(unsigned long, bits) + __field(int, mode) + __timespec_fields(timeout) + ), + TP_fast_assign( + __entry->u_event = u_event; + __entry->bits = bits; + __entry->mode = mode; + __assign_timespec(timeout, timeout); + ), + TP_printk("event=%p bits=%#lx mode=%#x(%s) timeout=(%lld.%09ld)", + __entry->u_event, __entry->bits, __entry->mode, + cobalt_print_evmode(__entry->mode), + __timespec_args(timeout)) +); + +TRACE_EVENT(cobalt_event_wait, + TP_PROTO(const struct cobalt_event_shadow __user *u_event, + unsigned long bits, int mode), + TP_ARGS(u_event, bits, mode), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + __field(unsigned long, bits) + __field(int, mode) + ), + TP_fast_assign( + __entry->u_event = u_event; + __entry->bits = bits; + __entry->mode = mode; + ), + TP_printk("event=%p bits=%#lx mode=%#x(%s)", + __entry->u_event, __entry->bits, __entry->mode, + cobalt_print_evmode(__entry->mode)) +); + +DECLARE_EVENT_CLASS(cobalt_event_ident, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + ), + TP_fast_assign( + __entry->u_event = u_event; + ), + TP_printk("event=%p", __entry->u_event) +); + +DEFINE_EVENT(cobalt_event_ident, cobalt_event_destroy, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event) +); + +DEFINE_EVENT(cobalt_event_ident, cobalt_event_sync, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event) +); + +DEFINE_EVENT(cobalt_event_ident, cobalt_event_inquire, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event) +); + +#endif /* _TRACE_COBALT_POSIX_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cobalt-posix +#include <trace/define_trace.h> diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h new file mode 100644 index 0000000..91b6390 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h @@ -0,0 +1,554 @@ +/* + * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>. + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cobalt_rtdm + +#if !defined(_TRACE_COBALT_RTDM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COBALT_RTDM_H + +#include <linux/tracepoint.h> +#include <linux/mman.h> +#include <linux/sched.h> + +struct rtdm_fd; +struct rtdm_event; +struct rtdm_sem; +struct rtdm_mutex; +struct xnthread; +struct rtdm_device; +struct rtdm_dev_context; +struct _rtdm_mmap_request; + +DECLARE_EVENT_CLASS(fd_event, + TP_PROTO(struct rtdm_fd *fd, int ufd), + TP_ARGS(fd, ufd), + + TP_STRUCT__entry( + __field(struct rtdm_device *, dev) + __field(int, ufd) + ), + + TP_fast_assign( + __entry->dev = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + ), + + TP_printk("device=%p fd=%d", + __entry->dev, __entry->ufd) +); + +DECLARE_EVENT_CLASS(fd_request, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, unsigned long arg), + TP_ARGS(task, fd, ufd, arg), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, dev) + __field(int, ufd) + __field(unsigned long, arg) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(task); + __entry->dev = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + __entry->arg = arg; + ), + + TP_printk("device=%p fd=%d arg=%#lx pid=%d comm=%s", + __entry->dev, __entry->ufd, __entry->arg, + __entry->pid, __entry->comm) +); + +DECLARE_EVENT_CLASS(fd_request_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, int status), + TP_ARGS(task, fd, ufd, status), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, dev) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(task); + __entry->dev = + !IS_ERR(fd) ? rtdm_fd_to_context(fd)->device : NULL; + __entry->ufd = ufd; + ), + + TP_printk("device=%p fd=%d pid=%d comm=%s", + __entry->dev, __entry->ufd, __entry->pid, __entry->comm) +); + +DECLARE_EVENT_CLASS(task_op, + TP_PROTO(struct xnthread *task), + TP_ARGS(task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + ), + + TP_printk("task %p(%s)", __entry->task, __get_str(task_name)) +); + +DECLARE_EVENT_CLASS(event_op, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev), + + TP_STRUCT__entry( + __field(struct rtdm_event *, ev) + ), + + TP_fast_assign( + __entry->ev = ev; + ), + + TP_printk("event=%p", __entry->ev) +); + +DECLARE_EVENT_CLASS(sem_op, + TP_PROTO(struct rtdm_sem *sem), + TP_ARGS(sem), + + TP_STRUCT__entry( + __field(struct rtdm_sem *, sem) + ), + + TP_fast_assign( + __entry->sem = sem; + ), + + TP_printk("sem=%p", __entry->sem) +); + +DECLARE_EVENT_CLASS(mutex_op, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex), + + TP_STRUCT__entry( + __field(struct rtdm_mutex *, mutex) + ), + + TP_fast_assign( + __entry->mutex = mutex; + ), + + TP_printk("mutex=%p", __entry->mutex) +); + +TRACE_EVENT(cobalt_device_register, + TP_PROTO(struct rtdm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(struct rtdm_device *, dev) + __string(device_name, dev->name) + __field(int, flags) + __field(int, class_id) + __field(int, subclass_id) + __field(int, profile_version) + ), + + TP_fast_assign( + __entry->dev = dev; + __assign_str(device_name, dev->name); + __entry->flags = dev->driver->device_flags; + __entry->class_id = dev->driver->profile_info.class_id; + __entry->subclass_id = dev->driver->profile_info.subclass_id; + __entry->profile_version = dev->driver->profile_info.version; + ), + + TP_printk("%s device %s=%p flags=0x%x, class=%d.%d profile=%d", + (__entry->flags & RTDM_DEVICE_TYPE_MASK) + == RTDM_NAMED_DEVICE ? "named" : "protocol", + __get_str(device_name), __entry->dev, + __entry->flags, __entry->class_id, __entry->subclass_id, + __entry->profile_version) +); + +TRACE_EVENT(cobalt_device_unregister, + TP_PROTO(struct rtdm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(struct rtdm_device *, dev) + __string(device_name, dev->name) + ), + + TP_fast_assign( + __entry->dev = dev; + __assign_str(device_name, dev->name); + ), + + TP_printk("device %s=%p", + __get_str(device_name), __entry->dev) +); + +DEFINE_EVENT(fd_event, cobalt_fd_created, + TP_PROTO(struct rtdm_fd *fd, int ufd), + TP_ARGS(fd, ufd) +); + +DEFINE_EVENT(fd_request, cobalt_fd_open, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long oflags), + TP_ARGS(task, fd, ufd, oflags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_close, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long lock_count), + TP_ARGS(task, fd, ufd, lock_count) +); + +DEFINE_EVENT(fd_request, cobalt_fd_socket, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long protocol_family), + TP_ARGS(task, fd, ufd, protocol_family) +); + +DEFINE_EVENT(fd_request, cobalt_fd_read, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long len), + TP_ARGS(task, fd, ufd, len) +); + +DEFINE_EVENT(fd_request, cobalt_fd_write, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long len), + TP_ARGS(task, fd, ufd, len) +); + +DEFINE_EVENT(fd_request, cobalt_fd_ioctl, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long request), + TP_ARGS(task, fd, ufd, request) +); + +DEFINE_EVENT(fd_request, cobalt_fd_sendmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_sendmmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_recvmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_recvmmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +#define cobalt_print_protbits(__prot) \ + __print_flags(__prot, "|", \ + {PROT_EXEC, "exec"}, \ + {PROT_READ, "read"}, \ + {PROT_WRITE, "write"}) + +#define cobalt_print_mapbits(__flags) \ + __print_flags(__flags, "|", \ + {MAP_SHARED, "shared"}, \ + {MAP_PRIVATE, "private"}, \ + {MAP_ANONYMOUS, "anon"}, \ + {MAP_FIXED, "fixed"}, \ + {MAP_HUGETLB, "huge"}, \ + {MAP_NONBLOCK, "nonblock"}, \ + {MAP_NORESERVE, "noreserve"}, \ + {MAP_POPULATE, "populate"}, \ + {MAP_UNINITIALIZED, "uninit"}) + +TRACE_EVENT(cobalt_fd_mmap, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, struct _rtdm_mmap_request *rma), + TP_ARGS(task, fd, ufd, rma), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, dev) + __field(int, ufd) + __field(size_t, length) + __field(off_t, offset) + __field(int, prot) + __field(int, flags) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(task); + __entry->dev = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + __entry->length = rma->length; + __entry->offset = rma->offset; + __entry->prot = rma->prot; + __entry->flags = rma->flags; + ), + + TP_printk("device=%p fd=%d area={ len:%zu, off:%Lu }" + " prot=%#x(%s) flags=%#x(%s) pid=%d comm=%s", + __entry->dev, __entry->ufd, __entry->length, + (unsigned long long)__entry->offset, + __entry->prot, cobalt_print_protbits(__entry->prot), + __entry->flags, cobalt_print_mapbits(__entry->flags), + __entry->pid, __entry->comm) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_ioctl_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_read_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_write_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_recvmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_recvmmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_sendmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_sendmmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_mmap_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(task_op, cobalt_driver_task_join, + TP_PROTO(struct xnthread *task), + TP_ARGS(task) +); + +TRACE_EVENT(cobalt_driver_event_init, + TP_PROTO(struct rtdm_event *ev, unsigned long pending), + TP_ARGS(ev, pending), + + TP_STRUCT__entry( + __field(struct rtdm_event *, ev) + __field(unsigned long, pending) + ), + + TP_fast_assign( + __entry->ev = ev; + __entry->pending = pending; + ), + + TP_printk("event=%p pending=%#lx", + __entry->ev, __entry->pending) +); + +TRACE_EVENT(cobalt_driver_event_wait, + TP_PROTO(struct rtdm_event *ev, struct xnthread *task), + TP_ARGS(ev, task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + __field(struct rtdm_event *, ev) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + __entry->ev = ev; + ), + + TP_printk("event=%p task=%p(%s)", + __entry->ev, __entry->task, __get_str(task_name)) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_signal, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_clear, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_pulse, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_destroy, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +TRACE_EVENT(cobalt_driver_sem_init, + TP_PROTO(struct rtdm_sem *sem, unsigned long value), + TP_ARGS(sem, value), + + TP_STRUCT__entry( + __field(struct rtdm_sem *, sem) + __field(unsigned long, value) + ), + + TP_fast_assign( + __entry->sem = sem; + __entry->value = value; + ), + + TP_printk("sem=%p value=%lu", + __entry->sem, __entry->value) +); + +TRACE_EVENT(cobalt_driver_sem_wait, + TP_PROTO(struct rtdm_sem *sem, struct xnthread *task), + TP_ARGS(sem, task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + __field(struct rtdm_sem *, sem) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + __entry->sem = sem; + ), + + TP_printk("sem=%p task=%p(%s)", + __entry->sem, __entry->task, __get_str(task_name)) +); + +DEFINE_EVENT(sem_op, cobalt_driver_sem_up, + TP_PROTO(struct rtdm_sem *sem), + TP_ARGS(sem) +); + +DEFINE_EVENT(sem_op, cobalt_driver_sem_destroy, + TP_PROTO(struct rtdm_sem *sem), + TP_ARGS(sem) +); + +DEFINE_EVENT(mutex_op, cobalt_driver_mutex_init, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex) +); + +DEFINE_EVENT(mutex_op, cobalt_driver_mutex_release, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex) +); + +DEFINE_EVENT(mutex_op, cobalt_driver_mutex_destroy, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex) +); + +TRACE_EVENT(cobalt_driver_mutex_wait, + TP_PROTO(struct rtdm_mutex *mutex, struct xnthread *task), + TP_ARGS(mutex, task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + __field(struct rtdm_mutex *, mutex) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + __entry->mutex = mutex; + ), + + TP_printk("mutex=%p task=%p(%s)", + __entry->mutex, __entry->task, __get_str(task_name)) +); + +#endif /* _TRACE_COBALT_RTDM_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cobalt-rtdm +#include <trace/define_trace.h> diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c b/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c new file mode 100644 index 0000000..8e2c9bb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <cobalt/kernel/tree.h> + +void xntree_cleanup(struct rb_root *t, void *cookie, + void (*destroy)(void *cookie, struct xnid *id)) +{ + struct rb_node *node, *next; + + node = rb_first(t); + while (node) { + next = rb_next(node); + + /* destroy is expected to remove the node from the rbtree */ + destroy(cookie, container_of(node, struct xnid, link)); + + node = next; + } +} + +int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key) +{ + struct rb_node **new = &t->rb_node, *parent = NULL; + + while (*new) { + struct xnid *i = container_of(*new, struct xnid, link); + + parent = *new; + if (key < i->key) + new = &((*new)->rb_left); + else if (key > i->key) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + xnid->key = key; + rb_link_node(&xnid->link, parent, new); + rb_insert_color(&xnid->link, t); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules new file mode 100644 index 0000000..39df24e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules @@ -0,0 +1,2 @@ +# Don't let udev mess with our special network names +KERNEL=="vnic*|rteth*|rtlo", NAME="$env{INTERFACE_NAME}" diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules new file mode 100644 index 0000000..d549eda --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules @@ -0,0 +1,2 @@ +# Xenomai real-time devices +SUBSYSTEM=="rtdm", MODE="0660", GROUP="xenomai" diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c b/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c new file mode 100644 index 0000000..05fa48a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c @@ -0,0 +1,976 @@ +/* + * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/stdarg.h> +#include <linux/ctype.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <cobalt/kernel/lock.h> +#include <cobalt/kernel/assert.h> +#include <cobalt/kernel/vfile.h> +#include <asm/xenomai/wrappers.h> + +/** + * @ingroup cobalt_core + * @defgroup cobalt_core_vfile Virtual file services + * + * Virtual files provide a mean to export Xenomai object states to + * user-space, based on common kernel interfaces. This encapsulation + * is aimed at: + * + * - supporting consistent collection of very large record-based + * output, without encurring latency peaks for undergoing real-time + * activities. + * + * - in the future, hiding discrepancies between linux kernel + * releases, regarding the proper way to export kernel object states + * to userland, either via the /proc interface or by any other mean. + * + * This virtual file implementation offers record-based read support + * based on seq_files, single-buffer write support, directory and link + * handling, all visible from the /proc namespace. + * + * The vfile support exposes four filesystem object types: + * + * - snapshot-driven file (struct xnvfile_snapshot). This is commonly + * used to export real-time object states via the /proc filesystem. To + * minimize the latency involved in protecting the vfile routines from + * changes applied by real-time code on such objects, a snapshot of + * the data to output is first taken under proper locking, before the + * collected data is formatted and sent out in a lockless manner. + * + * Because a large number of records may have to be output, the data + * collection phase is not strictly atomic as a whole, but only + * protected at record level. The vfile implementation can be notified + * of updates to the underlying data set, and restart the collection + * from scratch until the snapshot is fully consistent. + * + * - regular sequential file (struct xnvfile_regular). This is + * basically an encapsulated sequential file object as available from + * the host kernel (i.e. seq_file), with a few additional features to + * make it more handy in a Xenomai environment, like implicit locking + * support and shortened declaration for simplest, single-record + * output. + * + * - virtual link (struct xnvfile_link). This is a symbolic link + * feature integrated with the vfile semantics. The link target is + * computed dynamically at creation time from a user-given helper + * routine. + * + * - virtual directory (struct xnvfile_directory). A directory object, + * which can be used to create a hierarchy for ordering a set of vfile + * objects. + * + *@{*/ + +/** + * @var struct xnvfile_directory cobalt_vfroot + * @brief Xenomai vfile root directory + * + * This vdir maps the /proc/xenomai directory. It can be used to + * create a hierarchy of Xenomai-related vfiles under this root. + */ +struct xnvfile_directory cobalt_vfroot; +EXPORT_SYMBOL_GPL(cobalt_vfroot); + +static struct xnvfile_directory sysroot; + +static void *vfile_snapshot_start(struct seq_file *seq, loff_t *offp) +{ + struct xnvfile_snapshot_iterator *it = seq->private; + loff_t pos = *offp; + + if (pos > it->nrdata) + return NULL; + + if (pos == 0) + return SEQ_START_TOKEN; + + return it->databuf + (pos - 1) * it->vfile->datasz; +} + +static void *vfile_snapshot_next(struct seq_file *seq, void *v, loff_t *offp) +{ + struct xnvfile_snapshot_iterator *it = seq->private; + loff_t pos = *offp; + + ++*offp; + + if (pos >= it->nrdata) + return NULL; + + return it->databuf + pos * it->vfile->datasz; +} + +static void vfile_snapshot_stop(struct seq_file *seq, void *v) +{ +} + +static int vfile_snapshot_show(struct seq_file *seq, void *v) +{ + struct xnvfile_snapshot_iterator *it = seq->private; + void *data = v == SEQ_START_TOKEN ? NULL : v; + int ret; + + ret = it->vfile->ops->show(it, data); + + return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret; +} + +static struct seq_operations vfile_snapshot_ops = { + .start = vfile_snapshot_start, + .next = vfile_snapshot_next, + .stop = vfile_snapshot_stop, + .show = vfile_snapshot_show +}; + +static void vfile_snapshot_free(struct xnvfile_snapshot_iterator *it, void *buf) +{ + kfree(buf); +} + +static int vfile_snapshot_open(struct inode *inode, struct file *file) +{ + struct xnvfile_snapshot *vfile = pde_data(inode); + struct xnvfile_snapshot_ops *ops = vfile->ops; + struct xnvfile_snapshot_iterator *it; + int revtag, ret, nrdata; + struct seq_file *seq; + caddr_t data; + + WARN_ON_ONCE(file->private_data != NULL); + + if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL) + return -EACCES; + + /* + * Make sure to create the seq_file backend only when reading + * from the v-file is possible. + */ + if ((file->f_mode & FMODE_READ) == 0) { + file->private_data = NULL; + return 0; + } + + if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0) + return -EBUSY; + + it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL); + if (it == NULL) + return -ENOMEM; + + it->vfile = vfile; + xnvfile_file(vfile) = file; + + ret = vfile->entry.lockops->get(&vfile->entry); + if (ret) + goto fail; +redo: + /* + * The ->rewind() method is optional; there may be cases where + * we don't have to take an atomic snapshot of the v-file + * contents before proceeding. In case ->rewind() detects a + * stale backend object, it can force us to bail out. + * + * If present, ->rewind() may return a strictly positive + * value, indicating how many records at most may be returned + * by ->next(). We use this hint to allocate the snapshot + * buffer, in case ->begin() is not provided. The size of this + * buffer would then be vfile->datasz * hint value. + * + * If ->begin() is given, we always expect the latter do the + * allocation for us regardless of the hint value. Otherwise, + * a NULL return from ->rewind() tells us that the vfile won't + * output any snapshot data via ->show(). + */ + nrdata = 0; + if (ops->rewind) { + nrdata = ops->rewind(it); + if (nrdata < 0) { + ret = nrdata; + vfile->entry.lockops->put(&vfile->entry); + goto fail; + } + } + revtag = vfile->tag->rev; + + vfile->entry.lockops->put(&vfile->entry); + + /* Release the data buffer, in case we had to restart. */ + if (it->databuf) { + it->endfn(it, it->databuf); + it->databuf = NULL; + } + + /* + * Having no record to output is fine, in which case ->begin() + * shall return VFILE_SEQ_EMPTY if present. ->begin() may be + * absent, meaning that no allocation is even required to + * collect the records to output. NULL is kept for allocation + * errors in all other cases. + */ + if (ops->begin) { + XENO_BUG_ON(COBALT, ops->end == NULL); + data = ops->begin(it); + if (data == NULL) { + kfree(it); + return -ENOMEM; + } + if (data != VFILE_SEQ_EMPTY) { + it->databuf = data; + it->endfn = ops->end; + } + } else if (nrdata > 0 && vfile->datasz > 0) { + /* We have a hint for auto-allocation. */ + data = kmalloc(vfile->datasz * nrdata, GFP_KERNEL); + if (data == NULL) { + kfree(it); + return -ENOMEM; + } + it->databuf = data; + it->endfn = vfile_snapshot_free; + } + + it->nrdata = 0; + data = it->databuf; + if (data == NULL) + goto done; + + /* + * Take a snapshot of the vfile contents, redo if the revision + * tag of the scanned data set changed concurrently. + */ + for (;;) { + ret = vfile->entry.lockops->get(&vfile->entry); + if (ret) + goto fail; + if (vfile->tag->rev != revtag) + goto redo; + ret = ops->next(it, data); + vfile->entry.lockops->put(&vfile->entry); + if (ret < 0) + goto fail; + if (ret == 0) + break; + if (ret != VFILE_SEQ_SKIP) { + data += vfile->datasz; + it->nrdata++; + } + } + +done: + ret = seq_open(file, &vfile_snapshot_ops); + if (ret) + goto fail; + + seq = file->private_data; + it->seq = seq; + seq->private = it; + xnvfile_nref(vfile)++; + + return 0; + +fail: + if (it->databuf) + it->endfn(it, it->databuf); + kfree(it); + + return ret; +} + +static int vfile_snapshot_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct xnvfile_snapshot_iterator *it; + + if (seq) { + it = seq->private; + if (it) { + --xnvfile_nref(it->vfile); + XENO_BUG_ON(COBALT, it->vfile->entry.refcnt < 0); + if (it->databuf) + it->endfn(it, it->databuf); + kfree(it); + } + + return seq_release(inode, file); + } + + return 0; +} + +ssize_t vfile_snapshot_write(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct xnvfile_snapshot *vfile = + pde_data(file->f_path.dentry->d_inode); + struct xnvfile_input input; + ssize_t ret; + + if (vfile->entry.lockops) { + ret = vfile->entry.lockops->get(&vfile->entry); + if (ret) + return ret; + } + + input.u_buf = buf; + input.size = size; + input.vfile = &vfile->entry; + + ret = vfile->ops->store(&input); + + if (vfile->entry.lockops) + vfile->entry.lockops->put(&vfile->entry); + + return ret; +} + +static const DEFINE_PROC_OPS(vfile_snapshot_fops, + vfile_snapshot_open, + vfile_snapshot_release, + seq_read, + vfile_snapshot_write); + +/** + * @fn int xnvfile_init_snapshot(const char *name, struct xnvfile_snapshot *vfile, struct xnvfile_directory *parent) + * @brief Initialize a snapshot-driven vfile. + * + * @param name The name which should appear in the pseudo-filesystem, + * identifying the vfile entry. + * + * @param vfile A pointer to a vfile descriptor to initialize + * from. The following fields in this structure should be filled in + * prior to call this routine: + * + * - .privsz is the size (in bytes) of the private data area to be + * reserved in the @ref snapshot_iterator "vfile iterator". A NULL + * value indicates that no private area should be reserved. + * + * - .datasz is the size (in bytes) of a single record to be collected + * by the @ref snapshot_next "next() handler" from the @ref + * snapshot_ops "operation descriptor". + * + * - .tag is a pointer to a mandatory vfile revision tag structure + * (struct xnvfile_rev_tag). This tag will be monitored for changes by + * the vfile core while collecting data to output, so that any update + * detected will cause the current snapshot data to be dropped, and + * the collection to restart from the beginning. To this end, any + * change to the data which may be part of the collected records, + * should also invoke xnvfile_touch() on the associated tag. + * + * - entry.lockops is a pointer to a @ref vfile_lockops "lock descriptor", + * defining the lock and unlock operations for the vfile. This pointer + * may be left to NULL, in which case the operations on the nucleus + * lock (i.e. nklock) will be used internally around calls to data + * collection handlers (see @ref snapshot_ops "operation descriptor"). + * + * - .ops is a pointer to an @ref snapshot_ops "operation descriptor". + * + * @param parent A pointer to a virtual directory descriptor; the + * vfile entry will be created into this directory. If NULL, the /proc + * root directory will be used. /proc/xenomai is mapped on the + * globally available @a cobalt_vfroot vdir. + * + * @return 0 is returned on success. Otherwise: + * + * - -ENOMEM is returned if the virtual file entry cannot be created + * in the /proc hierarchy. + * + * @coretags{secondary-only} + */ +int xnvfile_init_snapshot(const char *name, + struct xnvfile_snapshot *vfile, + struct xnvfile_directory *parent) +{ + struct proc_dir_entry *ppde, *pde; + int mode; + + XENO_BUG_ON(COBALT, vfile->tag == NULL); + + if (vfile->entry.lockops == NULL) + /* Defaults to nucleus lock */ + vfile->entry.lockops = &xnvfile_nucleus_lock.ops; + + if (parent == NULL) + parent = &sysroot; + + mode = vfile->ops->store ? 0644 : 0444; + ppde = parent->entry.pde; + pde = proc_create_data(name, mode, ppde, &vfile_snapshot_fops, vfile); + if (pde == NULL) + return -ENOMEM; + + vfile->entry.pde = pde; + + return 0; +} +EXPORT_SYMBOL_GPL(xnvfile_init_snapshot); + +static void *vfile_regular_start(struct seq_file *seq, loff_t *offp) +{ + struct xnvfile_regular_iterator *it = seq->private; + struct xnvfile_regular *vfile = it->vfile; + int ret; + + it->pos = *offp; + + if (vfile->entry.lockops) { + ret = vfile->entry.lockops->get(&vfile->entry); + if (ret) + return ERR_PTR(ret); + } + + /* + * If we have no begin() op, then we allow a single call only + * to ->show(), by returning the start token once. Otherwise, + * we are done. + */ + if (vfile->ops->begin == NULL) + return it->pos > 0 ? NULL : SEQ_START_TOKEN; + + return vfile->ops->begin(it); +} + +static void *vfile_regular_next(struct seq_file *seq, void *v, loff_t *offp) +{ + struct xnvfile_regular_iterator *it = seq->private; + struct xnvfile_regular *vfile = it->vfile; + void *data; + + it->pos = ++(*offp); + + if (vfile->ops->next == NULL) + return NULL; + + data = vfile->ops->next(it); + if (data == NULL) + return NULL; + + return data; +} + +static void vfile_regular_stop(struct seq_file *seq, void *v) +{ + struct xnvfile_regular_iterator *it = seq->private; + struct xnvfile_regular *vfile = it->vfile; + + if (vfile->entry.lockops) + vfile->entry.lockops->put(&vfile->entry); + + if (vfile->ops->end) + vfile->ops->end(it); +} + +static int vfile_regular_show(struct seq_file *seq, void *v) +{ + struct xnvfile_regular_iterator *it = seq->private; + struct xnvfile_regular *vfile = it->vfile; + void *data = v == SEQ_START_TOKEN ? NULL : v; + int ret; + + ret = vfile->ops->show(it, data); + + return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret; +} + +static struct seq_operations vfile_regular_ops = { + .start = vfile_regular_start, + .next = vfile_regular_next, + .stop = vfile_regular_stop, + .show = vfile_regular_show +}; + +static int vfile_regular_open(struct inode *inode, struct file *file) +{ + struct xnvfile_regular *vfile = pde_data(inode); + struct xnvfile_regular_ops *ops = vfile->ops; + struct xnvfile_regular_iterator *it; + struct seq_file *seq; + int ret; + + if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0) + return -EBUSY; + + if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL) + return -EACCES; + + if ((file->f_mode & FMODE_READ) == 0) { + file->private_data = NULL; + return 0; + } + + it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL); + if (it == NULL) + return -ENOMEM; + + it->vfile = vfile; + it->pos = -1; + xnvfile_file(vfile) = file; + + if (ops->rewind) { + ret = ops->rewind(it); + if (ret) { + fail: + kfree(it); + return ret; + } + } + + ret = seq_open(file, &vfile_regular_ops); + if (ret) + goto fail; + + seq = file->private_data; + it->seq = seq; + seq->private = it; + xnvfile_nref(vfile)++; + + return 0; +} + +static int vfile_regular_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct xnvfile_regular_iterator *it; + + if (seq) { + it = seq->private; + if (it) { + --xnvfile_nref(it->vfile); + XENO_BUG_ON(COBALT, xnvfile_nref(it->vfile) < 0); + kfree(it); + } + + return seq_release(inode, file); + } + + return 0; +} + +ssize_t vfile_regular_write(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct xnvfile_regular *vfile = + pde_data(file->f_path.dentry->d_inode); + struct xnvfile_input input; + ssize_t ret; + + if (vfile->entry.lockops) { + ret = vfile->entry.lockops->get(&vfile->entry); + if (ret) + return ret; + } + + input.u_buf = buf; + input.size = size; + input.vfile = &vfile->entry; + + ret = vfile->ops->store(&input); + + if (vfile->entry.lockops) + vfile->entry.lockops->put(&vfile->entry); + + return ret; +} + +static const DEFINE_PROC_OPS(vfile_regular_fops, + vfile_regular_open, + vfile_regular_release, + seq_read, + vfile_regular_write); + +/** + * @fn int xnvfile_init_regular(const char *name, struct xnvfile_regular *vfile, struct xnvfile_directory *parent) + * @brief Initialize a regular vfile. + * + * @param name The name which should appear in the pseudo-filesystem, + * identifying the vfile entry. + * + * @param vfile A pointer to a vfile descriptor to initialize + * from. The following fields in this structure should be filled in + * prior to call this routine: + * + * - .privsz is the size (in bytes) of the private data area to be + * reserved in the @ref regular_iterator "vfile iterator". A NULL + * value indicates that no private area should be reserved. + * + * - entry.lockops is a pointer to a @ref vfile_lockops "locking + * descriptor", defining the lock and unlock operations for the + * vfile. This pointer may be left to NULL, in which case no + * locking will be applied. + * + * - .ops is a pointer to an @ref regular_ops "operation descriptor". + * + * @param parent A pointer to a virtual directory descriptor; the + * vfile entry will be created into this directory. If NULL, the /proc + * root directory will be used. /proc/xenomai is mapped on the + * globally available @a cobalt_vfroot vdir. + * + * @return 0 is returned on success. Otherwise: + * + * - -ENOMEM is returned if the virtual file entry cannot be created + * in the /proc hierarchy. + * + * @coretags{secondary-only} + */ +int xnvfile_init_regular(const char *name, + struct xnvfile_regular *vfile, + struct xnvfile_directory *parent) +{ + struct proc_dir_entry *ppde, *pde; + int mode; + + if (parent == NULL) + parent = &sysroot; + + mode = vfile->ops->store ? 0644 : 0444; + ppde = parent->entry.pde; + pde = proc_create_data(name, mode, ppde, &vfile_regular_fops, vfile); + if (pde == NULL) + return -ENOMEM; + + vfile->entry.pde = pde; + + return 0; +} +EXPORT_SYMBOL_GPL(xnvfile_init_regular); + +/** + * @fn int xnvfile_init_dir(const char *name, struct xnvfile_directory *vdir, struct xnvfile_directory *parent) + * @brief Initialize a virtual directory entry. + * + * @param name The name which should appear in the pseudo-filesystem, + * identifying the vdir entry. + * + * @param vdir A pointer to the virtual directory descriptor to + * initialize. + * + * @param parent A pointer to a virtual directory descriptor standing + * for the parent directory of the new vdir. If NULL, the /proc root + * directory will be used. /proc/xenomai is mapped on the globally + * available @a cobalt_vfroot vdir. + * + * @return 0 is returned on success. Otherwise: + * + * - -ENOMEM is returned if the virtual directory entry cannot be + * created in the /proc hierarchy. + * + * @coretags{secondary-only} + */ +int xnvfile_init_dir(const char *name, + struct xnvfile_directory *vdir, + struct xnvfile_directory *parent) +{ + struct proc_dir_entry *ppde, *pde; + + if (parent == NULL) + parent = &sysroot; + + ppde = parent->entry.pde; + pde = proc_mkdir(name, ppde); + if (pde == NULL) + return -ENOMEM; + + vdir->entry.pde = pde; + vdir->entry.lockops = NULL; + vdir->entry.private = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(xnvfile_init_dir); + +/** + * @fn int xnvfile_init_link(const char *from, const char *to, struct xnvfile_link *vlink, struct xnvfile_directory *parent) + * @brief Initialize a virtual link entry. + * + * @param from The name which should appear in the pseudo-filesystem, + * identifying the vlink entry. + * + * @param to The target file name which should be referred to + * symbolically by @a name. + * + * @param vlink A pointer to the virtual link descriptor to + * initialize. + * + * @param parent A pointer to a virtual directory descriptor standing + * for the parent directory of the new vlink. If NULL, the /proc root + * directory will be used. /proc/xenomai is mapped on the globally + * available @a cobalt_vfroot vdir. + * + * @return 0 is returned on success. Otherwise: + * + * - -ENOMEM is returned if the virtual link entry cannot be created + * in the /proc hierarchy. + * + * @coretags{secondary-only} + */ +int xnvfile_init_link(const char *from, + const char *to, + struct xnvfile_link *vlink, + struct xnvfile_directory *parent) +{ + struct proc_dir_entry *ppde, *pde; + + if (parent == NULL) + parent = &sysroot; + + ppde = parent->entry.pde; + pde = proc_symlink(from, ppde, to); + if (pde == NULL) + return -ENOMEM; + + vlink->entry.pde = pde; + vlink->entry.lockops = NULL; + vlink->entry.private = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(xnvfile_init_link); + +/** + * @fn void xnvfile_destroy(struct xnvfile *vfile) + * @brief Removes a virtual file entry. + * + * @param vfile A pointer to the virtual file descriptor to + * remove. + * + * @coretags{secondary-only} + */ +void xnvfile_destroy(struct xnvfile *vfile) +{ + proc_remove(vfile->pde); +} +EXPORT_SYMBOL_GPL(xnvfile_destroy); + +/** + * @fn ssize_t xnvfile_get_blob(struct xnvfile_input *input, void *data, size_t size) + * @brief Read in a data bulk written to the vfile. + * + * When writing to a vfile, the associated store() handler from the + * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store + * "regular vfile" is called, with a single argument describing the + * input data. xnvfile_get_blob() retrieves this data as an untyped + * binary blob, and copies it back to the caller's buffer. + * + * @param input A pointer to the input descriptor passed to the + * store() handler. + * + * @param data The address of the destination buffer to copy the input + * data to. + * + * @param size The maximum number of bytes to copy to the destination + * buffer. If @a size is larger than the actual data size, the input + * is truncated to @a size. + * + * @return The number of bytes read and copied to the destination + * buffer upon success. Otherwise, a negative error code is returned: + * + * - -EFAULT indicates an invalid source buffer address. + * + * @coretags{secondary-only} + */ +ssize_t xnvfile_get_blob(struct xnvfile_input *input, + void *data, size_t size) +{ + ssize_t nbytes = input->size; + + if (nbytes > size) + nbytes = size; + + if (nbytes > 0 && copy_from_user(data, input->u_buf, nbytes)) + return -EFAULT; + + return nbytes; +} +EXPORT_SYMBOL_GPL(xnvfile_get_blob); + +/** + * @fn ssize_t xnvfile_get_string(struct xnvfile_input *input, char *s, size_t maxlen) + * @brief Read in a C-string written to the vfile. + * + * When writing to a vfile, the associated store() handler from the + * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store + * "regular vfile" is called, with a single argument describing the + * input data. xnvfile_get_string() retrieves this data as a + * null-terminated character string, and copies it back to the + * caller's buffer. + * + * @param input A pointer to the input descriptor passed to the + * store() handler. + * + * @param s The address of the destination string buffer to copy the + * input data to. + * + * @param maxlen The maximum number of bytes to copy to the + * destination buffer, including the ending null character. If @a + * maxlen is larger than the actual string length, the input is + * truncated to @a maxlen. + * + * @return The number of characters read upon success. Otherwise, a + * negative error code is returned: + * + * - -EFAULT indicates an invalid source buffer address. + * + * @coretags{secondary-only} + */ +ssize_t xnvfile_get_string(struct xnvfile_input *input, + char *s, size_t maxlen) +{ + ssize_t nbytes, eol; + + if (maxlen < 1) + return -EINVAL; + + nbytes = xnvfile_get_blob(input, s, maxlen - 1); + if (nbytes < 0) + return nbytes; + + eol = nbytes; + if (eol > 0 && s[eol - 1] == '\n') + eol--; + + s[eol] = '\0'; + + return nbytes; +} +EXPORT_SYMBOL_GPL(xnvfile_get_string); + +/** + * @fn ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp) + * @brief Evaluate the string written to the vfile as a long integer. + * + * When writing to a vfile, the associated store() handler from the + * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store + * "regular vfile" is called, with a single argument describing the + * input data. xnvfile_get_integer() retrieves and interprets this + * data as a long integer, and copies the resulting value back to @a + * valp. + * + * The long integer can be expressed in decimal, octal or hexadecimal + * bases depending on the prefix found. + * + * @param input A pointer to the input descriptor passed to the + * store() handler. + * + * @param valp The address of a long integer variable to receive the + * value. + * + * @return The number of characters read while evaluating the input as + * a long integer upon success. Otherwise, a negative error code is + * returned: + * + * - -EINVAL indicates a parse error on the input stream; the written + * text cannot be evaluated as a long integer. + * + * - -EFAULT indicates an invalid source buffer address. + * + * @coretags{secondary-only} + */ +ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp) +{ + char *end, buf[32]; + ssize_t nbytes; + long val; + + nbytes = xnvfile_get_blob(input, buf, sizeof(buf) - 1); + if (nbytes < 0) + return nbytes; + + if (nbytes == 0) + return -EINVAL; + + buf[nbytes] = '\0'; + val = simple_strtol(buf, &end, 0); + + if (*end != '\0' && !isspace(*end)) + return -EINVAL; + + *valp = val; + + return nbytes; +} +EXPORT_SYMBOL_GPL(xnvfile_get_integer); + +int __vfile_hostlock_get(struct xnvfile *vfile) +{ + struct xnvfile_hostlock_class *lc; + + lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops); + mutex_lock(&lc->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(__vfile_hostlock_get); + +void __vfile_hostlock_put(struct xnvfile *vfile) +{ + struct xnvfile_hostlock_class *lc; + + lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops); + mutex_unlock(&lc->mutex); +} +EXPORT_SYMBOL_GPL(__vfile_hostlock_put); + +static int __vfile_nklock_get(struct xnvfile *vfile) +{ + struct xnvfile_nklock_class *lc; + + lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops); + xnlock_get_irqsave(&nklock, lc->s); + + return 0; +} + +static void __vfile_nklock_put(struct xnvfile *vfile) +{ + struct xnvfile_nklock_class *lc; + + lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops); + xnlock_put_irqrestore(&nklock, lc->s); +} + +struct xnvfile_nklock_class xnvfile_nucleus_lock = { + .ops = { + .get = __vfile_nklock_get, + .put = __vfile_nklock_put, + }, +}; + +int __init xnvfile_init_root(void) +{ + struct xnvfile_directory *vdir = &cobalt_vfroot; + struct proc_dir_entry *pde; + + pde = proc_mkdir("xenomai", NULL); + if (pde == NULL) + return -ENOMEM; + + vdir->entry.pde = pde; + vdir->entry.lockops = NULL; + vdir->entry.private = NULL; + + return 0; +} + +void xnvfile_destroy_root(void) +{ + cobalt_vfroot.entry.pde = NULL; + remove_proc_entry("xenomai", NULL); +} + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig new file mode 100644 index 0000000..197a48e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig @@ -0,0 +1,35 @@ +menu "Drivers" + +config XENO_OPT_RTDM_COMPAT_DEVNODE + bool "Enable legacy pathnames for named RTDM devices" + default y + help + This compatibility option allows applications to open named + RTDM devices using the legacy naming scheme, i.e. + + fd = open("devname", ...); + or + fd = open("/dev/devname", ...); + + When such a request is received by RTDM, a warning message is + issued to the kernel log whenever XENO_OPT_DEBUG_LEGACY is + also enabled in the kernel configuration. + + Applications should open named devices via their actual device + nodes instead, i.e. + + fd = open("/dev/rtdm/devname", ...); + +source "drivers/xenomai/autotune/Kconfig" +source "drivers/xenomai/serial/Kconfig" +source "drivers/xenomai/testing/Kconfig" +source "drivers/xenomai/can/Kconfig" +source "drivers/xenomai/net/Kconfig" +source "drivers/xenomai/analogy/Kconfig" +source "drivers/xenomai/ipc/Kconfig" +source "drivers/xenomai/udd/Kconfig" +source "drivers/xenomai/gpio/Kconfig" +source "drivers/xenomai/gpiopwm/Kconfig" +source "drivers/xenomai/spi/Kconfig" + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/Makefile new file mode 100644 index 0000000..b8fe1b3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_XENOMAI) += autotune/ serial/ testing/ can/ net/ analogy/ ipc/ udd/ gpio/ gpiopwm/ spi/ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig new file mode 100644 index 0000000..858762b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig @@ -0,0 +1,56 @@ +menu "ANALOGY drivers" + +config XENO_DRIVERS_ANALOGY + tristate "ANALOGY interface" + help + + ANALOGY is a framework aimed at supporting data acquisition + devices. + +config XENO_DRIVERS_ANALOGY_DEBUG + depends on XENO_DRIVERS_ANALOGY + bool "Analogy debug trace" + default n + help + + Enable debugging traces in Analogy so as to monitor Analogy's + core and drivers behaviours. + +config XENO_DRIVERS_ANALOGY_DEBUG_FTRACE + depends on XENO_DRIVERS_ANALOGY_DEBUG + bool "Analogy debug ftrace" + default n + help + + Route the Analogy a4l_dbg and a4l_info statements to /sys/kernel/debug/ + +config XENO_DRIVERS_ANALOGY_DEBUG_LEVEL + depends on XENO_DRIVERS_ANALOGY_DEBUG + int "Analogy core debug level threshold" + default 0 + help + + Define the level above which the debugging traces will not be + displayed. + + WARNING: this threshold is only applied on the Analogy + core. That will not affect the driver. + +config XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL + depends on XENO_DRIVERS_ANALOGY_DEBUG + int "Analogy driver debug level threshold" + default 0 + help + + Define the level above which the debugging traces will not be + displayed. + + WARNING: this threshold is only applied on the Analogy + driver. That will not affect the core. + +source "drivers/xenomai/analogy/testing/Kconfig" +source "drivers/xenomai/analogy/intel/Kconfig" +source "drivers/xenomai/analogy/national_instruments/Kconfig" +source "drivers/xenomai/analogy/sensoray/Kconfig" + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile new file mode 100644 index 0000000..8dcb7e7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile @@ -0,0 +1,16 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/analogy + +obj-$(CONFIG_XENO_DRIVERS_ANALOGY) += xeno_analogy.o testing/ intel/ national_instruments/ sensoray/ + +xeno_analogy-y := \ + buffer.o \ + command.o \ + device.o \ + driver.o \ + driver_facilities.o \ + instruction.o \ + rtdm_helpers.o \ + subdevice.o \ + transfer.o \ + rtdm_interface.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c new file mode 100644 index 0000000..df22894 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c @@ -0,0 +1,1145 @@ +/* + * Analogy for Linux, buffer related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mman.h> +#include <linux/vmalloc.h> +#include <asm/errno.h> +#include <asm/io.h> +#include <rtdm/analogy/device.h> + +/* --- Initialization functions (init, alloc, free) --- */ + +/* The buffer charactistic is very close to the Comedi one: it is + allocated with vmalloc() and all physical addresses of the pages which + compose the virtual buffer are hold in a table */ + +void a4l_free_buffer(struct a4l_buffer * buf_desc) +{ + __a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf); + + if (buf_desc->pg_list != NULL) { + rtdm_free(buf_desc->pg_list); + buf_desc->pg_list = NULL; + } + + if (buf_desc->buf != NULL) { + char *vaddr, *vabase = buf_desc->buf; + for (vaddr = vabase; vaddr < vabase + buf_desc->size; + vaddr += PAGE_SIZE) + ClearPageReserved(vmalloc_to_page(vaddr)); + vfree(buf_desc->buf); + buf_desc->buf = NULL; + } +} + +int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size) +{ + int ret = 0; + char *vaddr, *vabase; + + buf_desc->size = buf_size; + buf_desc->size = PAGE_ALIGN(buf_desc->size); + + buf_desc->buf = vmalloc_32(buf_desc->size); + if (buf_desc->buf == NULL) { + ret = -ENOMEM; + goto out_virt_contig_alloc; + } + + vabase = buf_desc->buf; + + for (vaddr = vabase; vaddr < vabase + buf_desc->size; + vaddr += PAGE_SIZE) + SetPageReserved(vmalloc_to_page(vaddr)); + + buf_desc->pg_list = rtdm_malloc(((buf_desc->size) >> PAGE_SHIFT) * + sizeof(unsigned long)); + if (buf_desc->pg_list == NULL) { + ret = -ENOMEM; + goto out_virt_contig_alloc; + } + + for (vaddr = vabase; vaddr < vabase + buf_desc->size; + vaddr += PAGE_SIZE) + buf_desc->pg_list[(vaddr - vabase) >> PAGE_SHIFT] = + (unsigned long) page_to_phys(vmalloc_to_page(vaddr)); + + __a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf); + +out_virt_contig_alloc: + if (ret != 0) + a4l_free_buffer(buf_desc); + + return ret; +} + +static void a4l_reinit_buffer(struct a4l_buffer *buf_desc) +{ + /* No command to process yet */ + buf_desc->cur_cmd = NULL; + + /* No more (or not yet) linked with a subdevice */ + buf_desc->subd = NULL; + + /* Initializes counts and flags */ + buf_desc->end_count = 0; + buf_desc->prd_count = 0; + buf_desc->cns_count = 0; + buf_desc->tmp_count = 0; + buf_desc->mng_count = 0; + + /* Flush pending events */ + buf_desc->flags = 0; + a4l_flush_sync(&buf_desc->sync); +} + +void a4l_init_buffer(struct a4l_buffer *buf_desc) +{ + memset(buf_desc, 0, sizeof(struct a4l_buffer)); + a4l_init_sync(&buf_desc->sync); + a4l_reinit_buffer(buf_desc); +} + +void a4l_cleanup_buffer(struct a4l_buffer *buf_desc) +{ + a4l_cleanup_sync(&buf_desc->sync); +} + +int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd) +{ + struct a4l_buffer *buf_desc = cxt->buffer; + int i; + + /* Retrieve the related subdevice */ + buf_desc->subd = a4l_get_subd(cxt->dev, cmd->idx_subd); + if (buf_desc->subd == NULL) { + __a4l_err("a4l_setup_buffer: subdevice index " + "out of range (%d)\n", cmd->idx_subd); + return -EINVAL; + } + + if (test_and_set_bit(A4L_SUBD_BUSY_NR, &buf_desc->subd->status)) { + __a4l_err("a4l_setup_buffer: subdevice %d already busy\n", + cmd->idx_subd); + return -EBUSY; + } + + /* Checks if the transfer system has to work in bulk mode */ + if (cmd->flags & A4L_CMD_BULK) + set_bit(A4L_BUF_BULK_NR, &buf_desc->flags); + + /* Sets the working command */ + buf_desc->cur_cmd = cmd; + + /* Link the subdevice with the context's buffer */ + buf_desc->subd->buf = buf_desc; + + /* Computes the count to reach, if need be */ + if (cmd->stop_src == TRIG_COUNT) { + for (i = 0; i < cmd->nb_chan; i++) { + struct a4l_channel *chft; + chft = a4l_get_chfeat(buf_desc->subd, + CR_CHAN(cmd->chan_descs[i])); + buf_desc->end_count += chft->nb_bits / 8; + } + buf_desc->end_count *= cmd->stop_arg; + } + + __a4l_dbg(1, core_dbg, "end_count=%lu\n", buf_desc->end_count); + + return 0; +} + +void a4l_cancel_buffer(struct a4l_device_context *cxt) +{ + struct a4l_buffer *buf_desc = cxt->buffer; + struct a4l_subdevice *subd = buf_desc->subd; + + if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return; + + /* If a "cancel" function is registered, call it + (Note: this function is called before having checked + if a command is under progress; we consider that + the "cancel" function can be used as as to (re)initialize + some component) */ + if (subd->cancel != NULL) + subd->cancel(subd); + + if (buf_desc->cur_cmd != NULL) { + a4l_free_cmddesc(buf_desc->cur_cmd); + rtdm_free(buf_desc->cur_cmd); + buf_desc->cur_cmd = NULL; + } + + a4l_reinit_buffer(buf_desc); + + clear_bit(A4L_SUBD_BUSY_NR, &subd->status); + subd->buf = NULL; +} + +/* --- Munge related function --- */ + +int a4l_get_chan(struct a4l_subdevice *subd) +{ + int i, j, tmp_count, tmp_size = 0; + struct a4l_cmd_desc *cmd; + + cmd = a4l_get_cmd(subd); + if (!cmd) + return -EINVAL; + + /* There is no need to check the channel idx, + it has already been controlled in command_test */ + + /* We assume channels can have different sizes; + so, we have to compute the global size of the channels + in this command... */ + for (i = 0; i < cmd->nb_chan; i++) { + j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? + CR_CHAN(cmd->chan_descs[i]) : 0; + tmp_size += subd->chan_desc->chans[j].nb_bits; + } + + /* Translation bits -> bytes */ + tmp_size /= 8; + + tmp_count = subd->buf->mng_count % tmp_size; + + /* Translation bytes -> bits */ + tmp_count *= 8; + + /* ...and find the channel the last munged sample + was related with */ + for (i = 0; tmp_count > 0 && i < cmd->nb_chan; i++) { + j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? + CR_CHAN(cmd->chan_descs[i]) : 0; + tmp_count -= subd->chan_desc->chans[j].nb_bits; + } + + if (tmp_count == 0) + return i; + else + return -EINVAL; +} + +/* --- Transfer / copy functions --- */ + +/* The following functions are explained in the Doxygen section + "Buffer management services" in driver_facilities.c */ + +int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_input(subd)) + return -EINVAL; + + return __pre_abs_put(buf, count); +} + + +int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_input(subd)) + return -EINVAL; + + return __abs_put(buf, count); +} + +int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_input(subd)) + return -EINVAL; + + return __pre_put(buf, count); +} + +int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_input(subd)) + return -EINVAL; + + return __put(buf, count); +} + +int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + int err; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_input(subd)) + return -EINVAL; + + if (__count_to_put(buf) < count) + return -EAGAIN; + + err = __produce(NULL, buf, bufdata, count); + if (err < 0) + return err; + + err = __put(buf, count); + + return err; +} + +int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_output(subd)) + return -EINVAL; + + return __pre_abs_get(buf, count); +} + +int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_output(subd)) + return -EINVAL; + + return __abs_get(buf, count); +} + +int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_output(subd)) + return -EINVAL; + + return __pre_get(buf, count); +} + +int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + + /* Basic checkings */ + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_output(subd)) + return -EINVAL; + + return __get(buf, count); +} + +int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count) +{ + struct a4l_buffer *buf = subd->buf; + int err; + + /* Basic checkings */ + + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (!a4l_subd_is_output(subd)) + return -EINVAL; + + if (__count_to_get(buf) < count) + return -EAGAIN; + + /* Update the counter */ + err = __consume(NULL, buf, bufdata, count); + if (err < 0) + return err; + + /* Perform the transfer */ + err = __get(buf, count); + + return err; +} + +int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts) +{ + struct a4l_buffer *buf = subd->buf; + int tmp; + unsigned long wake = 0, count = ULONG_MAX; + + /* Warning: here, there may be a condition race : the cancel + function is called by the user side and a4l_buf_evt and all + the a4l_buf_... functions are called by the kernel + side. Nonetheless, the driver should be in charge of such + race conditions, not the framework */ + + /* Basic checking */ + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + /* Here we save the data count available for the user side */ + if (evts == 0) { + count = a4l_subd_is_input(subd) ? + __count_to_get(buf) : __count_to_put(buf); + wake = __count_to_end(buf) < buf->wake_count ? + __count_to_end(buf) : buf->wake_count; + } else { + /* Even if it is a little more complex, atomic + operations are used so as to prevent any kind of + corner case */ + while ((tmp = ffs(evts) - 1) != -1) { + set_bit(tmp, &buf->flags); + clear_bit(tmp, &evts); + } + } + + if (count >= wake) + /* Notify the user-space side */ + a4l_signal_sync(&buf->sync); + + return 0; +} + +unsigned long a4l_buf_count(struct a4l_subdevice *subd) +{ + struct a4l_buffer *buf = subd->buf; + unsigned long ret = 0; + + /* Basic checking */ + if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) + return -ENOENT; + + if (a4l_subd_is_input(subd)) + ret = __count_to_put(buf); + else if (a4l_subd_is_output(subd)) + ret = __count_to_get(buf); + + return ret; +} + +/* --- Mmap functions --- */ + +void a4l_map(struct vm_area_struct *area) +{ + unsigned long *status = (unsigned long *)area->vm_private_data; + set_bit(A4L_BUF_MAP_NR, status); +} + +void a4l_unmap(struct vm_area_struct *area) +{ + unsigned long *status = (unsigned long *)area->vm_private_data; + clear_bit(A4L_BUF_MAP_NR, status); +} + +static struct vm_operations_struct a4l_vm_ops = { + .open = a4l_map, + .close = a4l_unmap, +}; + +int a4l_ioctl_mmap(struct a4l_device_context *cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + a4l_mmap_t map_cfg; + struct a4l_device *dev; + struct a4l_buffer *buf; + int ret; + + /* The mmap operation cannot be performed in a + real-time context */ + if (rtdm_in_rt_context()) { + return -ENOSYS; + } + + dev = a4l_get_dev(cxt); + buf = cxt->buffer; + + /* Basic checkings */ + + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_mmap: cannot mmap on " + "an unattached device\n"); + return -EINVAL; + } + + if (test_bit(A4L_BUF_MAP_NR, &buf->flags)) { + __a4l_err("a4l_ioctl_mmap: buffer already mapped\n"); + return -EBUSY; + } + + if (rtdm_safe_copy_from_user(fd, + &map_cfg, arg, sizeof(a4l_mmap_t)) != 0) + return -EFAULT; + + /* Check the size to be mapped */ + if ((map_cfg.size & ~(PAGE_MASK)) != 0 || map_cfg.size > buf->size) + return -EFAULT; + + /* All the magic is here */ + ret = rtdm_mmap_to_user(fd, + buf->buf, + map_cfg.size, + PROT_READ | PROT_WRITE, + &map_cfg.ptr, &a4l_vm_ops, &buf->flags); + + if (ret < 0) { + __a4l_err("a4l_ioctl_mmap: internal error, " + "rtdm_mmap_to_user failed (err=%d)\n", ret); + return ret; + } + + return rtdm_safe_copy_to_user(fd, + arg, &map_cfg, sizeof(a4l_mmap_t)); +} + +/* --- IOCTL / FOPS functions --- */ + +int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg) +{ + unsigned int idx_subd = (unsigned long)arg; + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_subdevice *subd; + + /* Basically check the device */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_cancel: operation not supported on " + "an unattached device\n"); + return -EINVAL; + } + + if (cxt->buffer->subd == NULL) { + __a4l_err("a4l_ioctl_cancel: " + "no acquisition to cancel on this context\n"); + return -EINVAL; + } + + if (idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_ioctl_cancel: bad subdevice index\n"); + return -EINVAL; + } + + subd = dev->transfer.subds[idx_subd]; + + if (subd != cxt->buffer->subd) { + __a4l_err("a4l_ioctl_cancel: " + "current context works on another subdevice " + "(%d!=%d)\n", cxt->buffer->subd->idx, subd->idx); + return -EINVAL; + } + + a4l_cancel_buffer(cxt); + return 0; +} + +/* The ioctl BUFCFG is only useful for changing the size of the + asynchronous buffer. + (BUFCFG = free of the current buffer + allocation of a new one) */ + +int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + struct a4l_subdevice *subd = buf->subd; + a4l_bufcfg_t buf_cfg; + + /* As Linux API is used to allocate a virtual buffer, + the calling process must not be in primary mode */ + if (rtdm_in_rt_context()) { + return -ENOSYS; + } + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_bufcfg: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &buf_cfg, + arg, sizeof(a4l_bufcfg_t)) != 0) + return -EFAULT; + + if (buf_cfg.buf_size > A4L_BUF_MAXSIZE) { + __a4l_err("a4l_ioctl_bufcfg: buffer size too big (<=16MB)\n"); + return -EINVAL; + } + + if (buf_cfg.idx_subd == A4L_BUF_DEFMAGIC) { + cxt->dev->transfer.default_bufsize = buf_cfg.buf_size; + return 0; + } + + if (subd && test_bit(A4L_SUBD_BUSY_NR, &subd->status)) { + __a4l_err("a4l_ioctl_bufcfg: acquisition in progress\n"); + return -EBUSY; + } + + if (test_bit(A4L_BUF_MAP, &buf->flags)) { + __a4l_err("a4l_ioctl_bufcfg: please unmap before " + "configuring buffer\n"); + return -EPERM; + } + + /* Free the buffer... */ + a4l_free_buffer(buf); + + /* ...to reallocate it */ + return a4l_alloc_buffer(buf, buf_cfg.buf_size); +} + +/* The ioctl BUFCFG2 allows the user space process to define the + minimal amount of data which should trigger a wake-up. If the ABI + could be broken, this facility would be handled by the original + BUFCFG ioctl. At the next major release, this ioctl will vanish. */ + +int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + a4l_bufcfg2_t buf_cfg; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_bufcfg2: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &buf_cfg, + arg, sizeof(a4l_bufcfg2_t)) != 0) + return -EFAULT; + + if (buf_cfg.wake_count > buf->size) { + __a4l_err("a4l_ioctl_bufcfg2: " + "wake-up threshold too big (> buffer size: %lu)\n", + buf->size); + return -EINVAL; + } + + buf->wake_count = buf_cfg.wake_count; + + return 0; +} + +/* The BUFINFO ioctl provides two basic roles: + - tell the user app the size of the asynchronous buffer + - display the read/write counters (how many bytes to read/write) */ + +int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + struct a4l_subdevice *subd = buf->subd; + a4l_bufinfo_t info; + + unsigned long tmp_cnt; + int ret; + + if (!rtdm_in_rt_context() && rtdm_rt_capable(fd)) + return -ENOSYS; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_bufinfo: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &info, arg, sizeof(a4l_bufinfo_t)) != 0) + return -EFAULT; + + + /* If a transfer is not occuring, simply return buffer + informations, otherwise make the transfer progress */ + if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) { + info.rw_count = 0; + goto a4l_ioctl_bufinfo_out; + } + + ret = __handle_event(buf); + + if (a4l_subd_is_input(subd)) { + + /* Updates consume count if rw_count is not null */ + if (info.rw_count != 0) + buf->cns_count += info.rw_count; + + /* Retrieves the data amount to read */ + tmp_cnt = info.rw_count = __count_to_get(buf); + + __a4l_dbg(1, core_dbg, "count to read=%lu\n", tmp_cnt); + + if ((ret < 0 && ret != -ENOENT) || + (ret == -ENOENT && tmp_cnt == 0)) { + a4l_cancel_buffer(cxt); + return ret; + } + } else if (a4l_subd_is_output(subd)) { + + if (ret < 0) { + a4l_cancel_buffer(cxt); + if (info.rw_count != 0) + return ret; + } + + /* If rw_count is not null, + there is something to write / munge */ + if (info.rw_count != 0 && info.rw_count <= __count_to_put(buf)) { + + /* Updates the production pointer */ + buf->prd_count += info.rw_count; + + /* Sets the munge count */ + tmp_cnt = info.rw_count; + } else + tmp_cnt = 0; + + /* Retrieves the data amount which is writable */ + info.rw_count = __count_to_put(buf); + + __a4l_dbg(1, core_dbg, " count to write=%lu\n", info.rw_count); + + } else { + __a4l_err("a4l_ioctl_bufinfo: inappropriate subdevice\n"); + return -EINVAL; + } + + /* Performs the munge if need be */ + if (subd->munge != NULL) { + + /* Call the munge callback */ + __munge(subd, subd->munge, buf, tmp_cnt); + + /* Updates munge count */ + buf->mng_count += tmp_cnt; + } + +a4l_ioctl_bufinfo_out: + + /* Sets the buffer size */ + info.buf_size = buf->size; + + /* Sends the structure back to user space */ + if (rtdm_safe_copy_to_user(fd, + arg, &info, sizeof(a4l_bufinfo_t)) != 0) + return -EFAULT; + + return 0; +} + +/* The ioctl BUFINFO2 tells the user application the minimal amount of +data which should trigger a wake-up. If the ABI could be broken, this +facility would be handled by the original BUFINFO ioctl. At the next +major release, this ioctl will vanish. */ + +int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + a4l_bufcfg2_t buf_cfg; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_bufcfg2: unattached device\n"); + return -EINVAL; + } + + buf_cfg.wake_count = buf->wake_count; + + if (rtdm_safe_copy_to_user(fd, + arg, &buf_cfg, sizeof(a4l_bufcfg2_t)) != 0) + return -EFAULT; + + return 0; +} + +/* The function a4l_read_buffer can be considered as the kernel entry + point of the RTDM syscall read. This syscall is supposed to be used + only during asynchronous acquisitions */ +ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + struct a4l_subdevice *subd = buf->subd; + ssize_t count = 0; + + /* Basic checkings */ + + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_read: unattached device\n"); + return -EINVAL; + } + + if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) { + __a4l_err("a4l_read: idle subdevice on this context\n"); + return -ENOENT; + } + + if (!a4l_subd_is_input(subd)) { + __a4l_err("a4l_read: operation requires an input subdevice \n"); + return -EINVAL; + } + + while (count < nbytes) { + + unsigned long tmp_cnt; + + /* Check the events */ + int ret = __handle_event(buf); + + __dump_buffer_counters(buf); + + /* Compute the data amount to copy */ + tmp_cnt = __count_to_get(buf); + + /* Check tmp_cnt count is not higher than + the global count to read */ + if (tmp_cnt > nbytes - count) + tmp_cnt = nbytes - count; + + /* We check whether there is an error */ + if (ret < 0 && ret != -ENOENT) { + __a4l_err("a4l_read: failed to handle event %d \n", ret); + a4l_cancel_buffer(cxt); + count = ret; + goto out_a4l_read; + } + + /* We check whether the acquisition is over */ + if (ret == -ENOENT && tmp_cnt == 0) { + __a4l_info("a4l_read: acquisition done - all data " + "requested by the client was delivered \n"); + a4l_cancel_buffer(cxt); + count = 0; + goto out_a4l_read; + } + + if (tmp_cnt > 0) { + + /* Performs the munge if need be */ + if (subd->munge != NULL) { + __munge(subd, subd->munge, buf, tmp_cnt); + + /* Updates munge count */ + buf->mng_count += tmp_cnt; + } + + /* Performs the copy */ + ret = __consume(cxt, buf, bufdata + count, tmp_cnt); + + if (ret < 0) { + count = ret; + goto out_a4l_read; + } + + /* Updates consume count */ + buf->cns_count += tmp_cnt; + a4l_dbg(1, core_dbg, dev, "buf->cns_cnt=%ld \n", buf->cns_count); + + /* Updates the return value */ + count += tmp_cnt; + + /* If the driver does not work in bulk mode, + we must leave this function */ + if (!test_bit(A4L_BUF_BULK, &buf->flags)) + goto out_a4l_read; + } + else { + /* If the acquisition is not over, we must not + leave the function without having read a least byte */ + ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context()); + if (ret < 0) { + if (ret == -ERESTARTSYS) + ret = -EINTR; + count = ret; + goto out_a4l_read; + } + } + } + +out_a4l_read: + + return count; +} + +/* The function a4l_write_buffer can be considered as the kernel entry + point of the RTDM syscall write. This syscall is supposed to be + used only during asynchronous acquisitions */ +ssize_t a4l_write_buffer(struct a4l_device_context *cxt, const void *bufdata, size_t nbytes) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + struct a4l_subdevice *subd = buf->subd; + ssize_t count = 0; + + /* Basic checkings */ + + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_write: unattached device\n"); + return -EINVAL; + } + + if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) { + __a4l_err("a4l_write: idle subdevice on this context\n"); + return -ENOENT; + } + + if (!a4l_subd_is_output(subd)) { + __a4l_err("a4l_write: operation requires an output subdevice \n"); + return -EINVAL; + } + + while (count < nbytes) { + + unsigned long tmp_cnt; + + /* Check the events */ + int ret = __handle_event(buf); + + __dump_buffer_counters(buf); + + /* Compute the data amount to copy */ + tmp_cnt = __count_to_put(buf); + + /* Check tmp_cnt count is not higher than + the global count to write */ + if (tmp_cnt > nbytes - count) + tmp_cnt = nbytes - count; + + if (ret < 0) { + count = (ret == -ENOENT) ? -EINVAL : ret; + __a4l_err("a4l_write: failed to handle event %d \n", ret); + a4l_cancel_buffer(cxt); + goto out_a4l_write; + } + + if (tmp_cnt > 0) { + + + /* Performs the copy */ + ret = __produce(cxt, + buf, (void *)bufdata + count, tmp_cnt); + if (ret < 0) { + count = ret; + goto out_a4l_write; + } + + /* Performs the munge if need be */ + if (subd->munge != NULL) { + __munge(subd, subd->munge, buf, tmp_cnt); + + /* Updates munge count */ + buf->mng_count += tmp_cnt; + } + + /* Updates produce count */ + buf->prd_count += tmp_cnt; + a4l_dbg(1, core_dbg, dev , "buf->prd_cnt=%ld \n", buf->prd_count); + + /* Updates the return value */ + count += tmp_cnt; + + /* If the driver does not work in bulk mode, + we must leave this function */ + if (!test_bit(A4L_BUF_BULK, &buf->flags)) + goto out_a4l_write; + } else { + /* The buffer is full, we have to wait for a slot to free */ + ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context()); + if (ret < 0) { + __a4l_err("a4l_write: failed to wait for free slot (%d)\n", ret); + if (ret == -ERESTARTSYS) + ret = -EINTR; + count = ret; + goto out_a4l_write; + } + } + } + +out_a4l_write: + + return count; +} + +int a4l_select(struct a4l_device_context *cxt, + rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + struct a4l_subdevice *subd = buf->subd; + + /* Basic checkings */ + + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_select: unattached device\n"); + return -EINVAL; + } + + if (!subd || !test_bit(A4L_SUBD_BUSY, &subd->status)) { + __a4l_err("a4l_select: idle subdevice on this context\n"); + return -ENOENT; + } + + /* Check the RTDM select type + (RTDM_SELECTTYPE_EXCEPT is not supported) */ + + if(type != RTDM_SELECTTYPE_READ && + type != RTDM_SELECTTYPE_WRITE) { + __a4l_err("a4l_select: wrong select argument\n"); + return -EINVAL; + } + + if (type == RTDM_SELECTTYPE_READ && !a4l_subd_is_input(subd)) { + __a4l_err("a4l_select: current context " + "does not work with an input subdevice\n"); + return -EINVAL; + } + + if (type == RTDM_SELECTTYPE_WRITE && !a4l_subd_is_output(subd)) { + __a4l_err("a4l_select: current context " + "does not work with an input subdevice\n"); + return -EINVAL; + } + + /* Performs a bind on the Analogy synchronization element */ + return a4l_select_sync(&(buf->sync), selector, type, fd_index); +} + +int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int ret = 0; + unsigned long tmp_cnt = 0; + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_buffer *buf = cxt->buffer; + struct a4l_subdevice *subd = buf->subd; + a4l_poll_t poll; + + if (!rtdm_in_rt_context() && rtdm_rt_capable(fd)) + return -ENOSYS; + + /* Basic checking */ + + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_poll: unattached device\n"); + return -EINVAL; + } + + if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) { + __a4l_err("a4l_poll: idle subdevice on this context\n"); + return -ENOENT; + } + + if (rtdm_safe_copy_from_user(fd, + &poll, arg, sizeof(a4l_poll_t)) != 0) + return -EFAULT; + + /* Checks the buffer events */ + a4l_flush_sync(&buf->sync); + ret = __handle_event(buf); + + /* Retrieves the data amount to compute + according to the subdevice type */ + if (a4l_subd_is_input(subd)) { + + tmp_cnt = __count_to_get(buf); + + /* Check if some error occured */ + if (ret < 0 && ret != -ENOENT) { + a4l_cancel_buffer(cxt); + return ret; + } + + /* Check whether the acquisition is over */ + if (ret == -ENOENT && tmp_cnt == 0) { + a4l_cancel_buffer(cxt); + return 0; + } + } else { + + /* If some error was detected, cancel the transfer */ + if (ret < 0) { + a4l_cancel_buffer(cxt); + return ret; + } + + tmp_cnt = __count_to_put(buf); + } + + if (poll.arg == A4L_NONBLOCK || tmp_cnt != 0) + goto out_poll; + + if (poll.arg == A4L_INFINITE) + ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context()); + else { + unsigned long long ns = ((unsigned long long)poll.arg) * + ((unsigned long long)NSEC_PER_MSEC); + ret = a4l_timedwait_sync(&(buf->sync), rtdm_in_rt_context(), ns); + } + + if (ret == 0) { + /* Retrieves the count once more */ + if (a4l_subd_is_input(dev->transfer.subds[poll.idx_subd])) + tmp_cnt = __count_to_get(buf); + else + tmp_cnt = __count_to_put(buf); + } + else + return ret; + +out_poll: + + poll.arg = tmp_cnt; + + ret = rtdm_safe_copy_to_user(fd, + arg, &poll, sizeof(a4l_poll_t)); + + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c new file mode 100644 index 0000000..7420bc5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c @@ -0,0 +1,392 @@ +/* + * Analogy for Linux, command related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/mman.h> +#include <asm/io.h> +#include <asm/errno.h> +#include <rtdm/analogy/device.h> + +/* --- Command descriptor management functions --- */ +int a4l_fill_cmddesc(struct a4l_device_context *cxt, struct a4l_cmd_desc *desc, + unsigned int **chan_descs, void *arg) +{ + unsigned int *tmpchans = NULL; + int ret = 0; + + ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt), + desc, arg, sizeof(struct a4l_cmd_desc)); + if (ret != 0) + goto out_cmddesc; + + + if (desc->nb_chan == 0) { + ret = -EINVAL; + goto out_cmddesc; + } + + tmpchans = rtdm_malloc(desc->nb_chan * sizeof(unsigned int)); + if (tmpchans == NULL) { + ret = -ENOMEM; + goto out_cmddesc; + } + + ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt), + tmpchans, + desc->chan_descs, + desc->nb_chan * sizeof(unsigned int)); + if (ret != 0) { + __a4l_err("%s invalid arguments \n", __FUNCTION__); + goto out_cmddesc; + } + + *chan_descs = desc->chan_descs; + desc->chan_descs = tmpchans; + + __a4l_dbg(1, core_dbg, "desc dump: \n"); + __a4l_dbg(1, core_dbg, "\t->idx_subd=%u\n", desc->idx_subd); + __a4l_dbg(1, core_dbg, "\t->flags=%lu\n", desc->flags); + __a4l_dbg(1, core_dbg, "\t->nb_chan=%u\n", desc->nb_chan); + __a4l_dbg(1, core_dbg, "\t->chan_descs=0x%x\n", *desc->chan_descs); + __a4l_dbg(1, core_dbg, "\t->data_len=%u\n", desc->data_len); + __a4l_dbg(1, core_dbg, "\t->pdata=0x%p\n", desc->data); + + out_cmddesc: + + if (ret != 0) { + __a4l_err("a4l_fill_cmddesc: %d \n", ret); + if (tmpchans != NULL) + rtdm_free(tmpchans); + desc->chan_descs = NULL; + } + + return ret; +} + +void a4l_free_cmddesc(struct a4l_cmd_desc * desc) +{ + if (desc->chan_descs != NULL) + rtdm_free(desc->chan_descs); +} + +int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_subdevice *subd; + + if (desc->idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_check_cmddesc: " + "subdevice index out of range (idx=%u)\n", + desc->idx_subd); + return -EINVAL; + } + + subd = dev->transfer.subds[desc->idx_subd]; + + if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) { + __a4l_err("a4l_check_cmddesc: " + "subdevice type incoherent\n"); + return -EIO; + } + + if (!(subd->flags & A4L_SUBD_CMD)) { + __a4l_err("a4l_check_cmddesc: operation not supported, " + "synchronous only subdevice\n"); + return -EIO; + } + + if (test_bit(A4L_SUBD_BUSY, &subd->status)) { + __a4l_err("a4l_check_cmddesc: subdevice busy\n"); + return -EBUSY; + } + + return a4l_check_chanlist(dev->transfer.subds[desc->idx_subd], + desc->nb_chan, desc->chan_descs); +} + +/* --- Command checking functions --- */ + +int a4l_check_generic_cmdcnt(struct a4l_cmd_desc * desc) +{ + unsigned int tmp1, tmp2; + + /* Makes sure trigger sources are trivially valid */ + tmp1 = + desc->start_src & ~(TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW); + tmp2 = desc->start_src & (TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: start_src, weird trigger\n"); + return -EINVAL; + } + + tmp1 = desc->scan_begin_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW); + tmp2 = desc->scan_begin_src & (TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: scan_begin_src, , weird trigger\n"); + return -EINVAL; + } + + tmp1 = desc->convert_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_NOW); + tmp2 = desc->convert_src & (TRIG_TIMER | TRIG_EXT | TRIG_NOW); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: convert_src, weird trigger\n"); + return -EINVAL; + } + + tmp1 = desc->scan_end_src & ~(TRIG_COUNT); + if (tmp1 != 0) { + __a4l_err("a4l_check_cmddesc: scan_end_src, weird trigger\n"); + return -EINVAL; + } + + tmp1 = desc->stop_src & ~(TRIG_COUNT | TRIG_NONE); + tmp2 = desc->stop_src & (TRIG_COUNT | TRIG_NONE); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: stop_src, weird trigger\n"); + return -EINVAL; + } + + /* Makes sure trigger sources are unique */ + if (desc->start_src != TRIG_NOW && + desc->start_src != TRIG_INT && + desc->start_src != TRIG_EXT && desc->start_src != TRIG_FOLLOW) { + __a4l_err("a4l_check_cmddesc: start_src, " + "only one trigger should be set\n"); + return -EINVAL; + } + + if (desc->scan_begin_src != TRIG_TIMER && + desc->scan_begin_src != TRIG_EXT && + desc->scan_begin_src != TRIG_FOLLOW) { + __a4l_err("a4l_check_cmddesc: scan_begin_src, " + "only one trigger should be set\n"); + return -EINVAL; + } + + if (desc->convert_src != TRIG_TIMER && + desc->convert_src != TRIG_EXT && desc->convert_src != TRIG_NOW) { + __a4l_err("a4l_check_cmddesc: convert_src, " + "only one trigger should be set\n"); + return -EINVAL; + } + + if (desc->stop_src != TRIG_COUNT && desc->stop_src != TRIG_NONE) { + __a4l_err("a4l_check_cmddesc: stop_src, " + "only one trigger should be set\n"); + return -EINVAL; + } + + /* Makes sure arguments are trivially compatible */ + tmp1 = desc->start_src & (TRIG_NOW | TRIG_FOLLOW | TRIG_INT); + tmp2 = desc->start_arg; + if (tmp1 != 0 && tmp2 != 0) { + __a4l_err("a4l_check_cmddesc: no start_arg expected\n"); + return -EINVAL; + } + + tmp1 = desc->scan_begin_src & TRIG_FOLLOW; + tmp2 = desc->scan_begin_arg; + if (tmp1 != 0 && tmp2 != 0) { + __a4l_err("a4l_check_cmddesc: no scan_begin_arg expected\n"); + return -EINVAL; + } + + tmp1 = desc->convert_src & TRIG_NOW; + tmp2 = desc->convert_arg; + if (tmp1 != 0 && tmp2 != 0) { + __a4l_err("a4l_check_cmddesc: no convert_arg expected\n"); + return -EINVAL; + } + + tmp1 = desc->stop_src & TRIG_NONE; + tmp2 = desc->stop_arg; + if (tmp1 != 0 && tmp2 != 0) { + __a4l_err("a4l_check_cmddesc: no stop_arg expected\n"); + return -EINVAL; + } + + return 0; +} + +int a4l_check_specific_cmdcnt(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc) +{ + unsigned int tmp1, tmp2; + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_cmd_desc *cmd_mask = dev->transfer.subds[desc->idx_subd]->cmd_mask; + + if (cmd_mask == NULL) + return 0; + + if (cmd_mask->start_src != 0) { + tmp1 = desc->start_src & ~(cmd_mask->start_src); + tmp2 = desc->start_src & (cmd_mask->start_src); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: start_src, " + "trigger unsupported\n"); + return -EINVAL; + } + } + + if (cmd_mask->scan_begin_src != 0) { + tmp1 = desc->scan_begin_src & ~(cmd_mask->scan_begin_src); + tmp2 = desc->scan_begin_src & (cmd_mask->scan_begin_src); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: scan_begin_src, " + "trigger unsupported\n"); + return -EINVAL; + } + } + + if (cmd_mask->convert_src != 0) { + tmp1 = desc->convert_src & ~(cmd_mask->convert_src); + tmp2 = desc->convert_src & (cmd_mask->convert_src); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: convert_src, " + "trigger unsupported\n"); + return -EINVAL; + } + } + + if (cmd_mask->scan_end_src != 0) { + tmp1 = desc->scan_end_src & ~(cmd_mask->scan_end_src); + if (tmp1 != 0) { + __a4l_err("a4l_check_cmddesc: scan_end_src, " + "trigger unsupported\n"); + return -EINVAL; + } + } + + if (cmd_mask->stop_src != 0) { + tmp1 = desc->stop_src & ~(cmd_mask->stop_src); + tmp2 = desc->stop_src & (cmd_mask->stop_src); + if (tmp1 != 0 || tmp2 == 0) { + __a4l_err("a4l_check_cmddesc: stop_src, " + "trigger unsupported\n"); + return -EINVAL; + } + } + + return 0; +} + +/* --- IOCTL / FOPS function --- */ + +int a4l_ioctl_cmd(struct a4l_device_context * ctx, void *arg) +{ + int ret = 0, simul_flag = 0; + struct a4l_cmd_desc *cmd_desc = NULL; + struct a4l_device *dev = a4l_get_dev(ctx); + unsigned int *chan_descs, *tmp; + struct a4l_subdevice *subd; + + /* The command launching cannot be done in real-time because + of some possible buffer allocations in the drivers */ + if (rtdm_in_rt_context()) + return -ENOSYS; + + /* Basically check the device */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_cmd: cannot command " + "an unattached device\n"); + return -EINVAL; + } + + /* Allocates the command */ + cmd_desc = (struct a4l_cmd_desc *) rtdm_malloc(sizeof(struct a4l_cmd_desc)); + if (cmd_desc == NULL) + return -ENOMEM; + memset(cmd_desc, 0, sizeof(struct a4l_cmd_desc)); + + /* Gets the command */ + ret = a4l_fill_cmddesc(ctx, cmd_desc, &chan_descs, arg); + if (ret != 0) + goto out_ioctl_cmd; + + /* Checks the command */ + ret = a4l_check_cmddesc(ctx, cmd_desc); + if (ret != 0) + goto out_ioctl_cmd; + + ret = a4l_check_generic_cmdcnt(cmd_desc); + if (ret != 0) + goto out_ioctl_cmd; + + ret = a4l_check_specific_cmdcnt(ctx, cmd_desc); + if (ret != 0) + goto out_ioctl_cmd; + + __a4l_dbg(1, core_dbg,"1st cmd checks passed\n"); + subd = dev->transfer.subds[cmd_desc->idx_subd]; + + /* Tests the command with the cmdtest function */ + if (cmd_desc->flags & A4L_CMD_SIMUL) { + simul_flag = 1; + + if (!subd->do_cmdtest) { + __a4l_err("a4l_ioctl_cmd: driver's cmd_test NULL\n"); + ret = -EINVAL; + goto out_ioctl_cmd; + } + + ret = subd->do_cmdtest(subd, cmd_desc); + if (ret != 0) { + __a4l_err("a4l_ioctl_cmd: driver's cmd_test failed\n"); + goto out_ioctl_cmd; + } + __a4l_dbg(1, core_dbg, "driver's cmd checks passed\n"); + goto out_ioctl_cmd; + } + + + /* Gets the transfer system ready */ + ret = a4l_setup_buffer(ctx, cmd_desc); + if (ret < 0) + goto out_ioctl_cmd; + + /* Eventually launches the command */ + ret = subd->do_cmd(subd, cmd_desc); + + if (ret != 0) { + a4l_cancel_buffer(ctx); + goto out_ioctl_cmd; + } + + out_ioctl_cmd: + + if (simul_flag) { + /* copy the kernel based descriptor */ + tmp = cmd_desc->chan_descs; + /* return the user based descriptor */ + cmd_desc->chan_descs = chan_descs; + rtdm_safe_copy_to_user(rtdm_private_to_fd(ctx), arg, cmd_desc, + sizeof(struct a4l_cmd_desc)); + /* make sure we release the memory associated to the kernel */ + cmd_desc->chan_descs = tmp; + + } + + if (ret != 0 || simul_flag == 1) { + a4l_free_cmddesc(cmd_desc); + rtdm_free(cmd_desc); + } + + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c new file mode 100644 index 0000000..69492b9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c @@ -0,0 +1,458 @@ +/* + * Analogy for Linux, device related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/string.h> +#include <rtdm/analogy/device.h> + +#include "proc.h" + +static struct a4l_device a4l_devs[A4L_NB_DEVICES]; + +/* --- Device tab management functions --- */ + +void a4l_init_devs(void) +{ + int i; + memset(a4l_devs, 0, A4L_NB_DEVICES * sizeof(struct a4l_device)); + for (i = 0; i < A4L_NB_DEVICES; i++) { + rtdm_lock_init(&a4l_devs[i].lock); + a4l_devs[i].transfer.irq_desc.irq = A4L_IRQ_UNUSED; + } +} + +int a4l_check_cleanup_devs(void) +{ + int i, ret = 0; + + for (i = 0; i < A4L_NB_DEVICES && ret == 0; i++) + if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags)) + ret = -EBUSY; + + return ret; +} + +void a4l_set_dev(struct a4l_device_context *cxt) +{ + /* Retrieve the minor index */ + const int minor = a4l_get_minor(cxt); + /* Fill the dev fields accordingly */ + cxt->dev = &(a4l_devs[minor]); +} + +/* --- Device tab proc section --- */ + +#ifdef CONFIG_PROC_FS + +int a4l_rdproc_devs(struct seq_file *p, void *data) +{ + int i; + + seq_printf(p, "-- Analogy devices --\n\n"); + seq_printf(p, "| idx | status | driver\n"); + + for (i = 0; i < A4L_NB_DEVICES; i++) { + char *status, *name; + + /* Gets the device's state */ + if (a4l_devs[i].flags == 0) { + status = "Unused"; + name = "No driver"; + } else if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags)) { + status = "Linked"; + name = a4l_devs[i].driver->driver_name; + } else { + status = "Broken"; + name = "Unknown"; + } + + seq_printf(p, "| %02d | %s | %s\n", i, status, name); + } + return 0; +} + +static int a4l_proc_transfer_open(struct inode *inode, struct file *file) +{ + return single_open(file, a4l_rdproc_transfer, pde_data(inode)); +} + +static const DEFINE_PROC_OPS(a4l_proc_transfer_ops, + a4l_proc_transfer_open, + single_release, + seq_read, + NULL); + +int a4l_proc_attach(struct a4l_device_context * cxt) +{ + int ret = 0; + struct a4l_device *dev = a4l_get_dev(cxt); + struct proc_dir_entry *entry; + char *entry_name; + + /* Allocate the buffer for the file name */ + entry_name = rtdm_malloc(A4L_NAMELEN + 4); + if (entry_name == NULL) { + __a4l_err("a4l_proc_attach: failed to allocate buffer\n"); + return -ENOMEM; + } + + /* Create the proc file name */ + ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s", + a4l_get_minor(cxt), dev->driver->board_name); + + /* Create the proc entry */ + entry = proc_create_data(entry_name, 0444, a4l_proc_root, + &a4l_proc_transfer_ops, &dev->transfer); + if (entry == NULL) { + __a4l_err("a4l_proc_attach: " + "failed to create /proc/analogy/%s\n", + entry_name); + ret = -ENOMEM; + } + + rtdm_free(entry_name); + + return ret; +} + +void a4l_proc_detach(struct a4l_device_context * cxt) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + char *entry_name; + + entry_name = rtdm_malloc(A4L_NAMELEN + 4); + if (entry_name == NULL) { + __a4l_err("a4l_proc_detach: " + "failed to allocate filename buffer\n"); + return; + } + + ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s", + a4l_get_minor(cxt), dev->driver->board_name); + + remove_proc_entry(entry_name, a4l_proc_root); + + rtdm_free(entry_name); +} + +#else /* !CONFIG_PROC_FS */ + +int a4l_proc_attach(struct a4l_device_context * cxt) +{ + return 0; +} + +void a4l_proc_detach(struct a4l_device_context * cxt) +{ +} + +#endif /* CONFIG_PROC_FS */ + +/* --- Attach / detach section --- */ + +int a4l_fill_lnkdesc(struct a4l_device_context * cxt, + a4l_lnkdesc_t * link_arg, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int ret; + char *tmpname = NULL; + void *tmpopts = NULL; + + ret = rtdm_safe_copy_from_user(fd, + link_arg, arg, sizeof(a4l_lnkdesc_t)); + if (ret != 0) { + __a4l_err("a4l_fill_lnkdesc: " + "call1(copy_from_user) failed\n"); + goto out_get_lnkdesc; + } + + if (link_arg->bname_size != 0 && link_arg->bname != NULL) { + tmpname = rtdm_malloc(link_arg->bname_size + 1); + if (tmpname == NULL) { + __a4l_err("a4l_fill_lnkdesc: " + "call1(alloc) failed\n"); + ret = -ENOMEM; + goto out_get_lnkdesc; + } + tmpname[link_arg->bname_size] = 0; + + ret = rtdm_safe_copy_from_user(fd, + tmpname, + link_arg->bname, + link_arg->bname_size); + if (ret != 0) { + __a4l_err("a4l_fill_lnkdesc: " + "call2(copy_from_user) failed\n"); + goto out_get_lnkdesc; + } + } else { + __a4l_err("a4l_fill_lnkdesc: board name missing\n"); + ret = -EINVAL; + goto out_get_lnkdesc; + } + + if (link_arg->opts_size != 0 && link_arg->opts != NULL) { + tmpopts = rtdm_malloc(link_arg->opts_size); + + if (tmpopts == NULL) { + __a4l_err("a4l_fill_lnkdesc: " + "call2(alloc) failed\n"); + ret = -ENOMEM; + goto out_get_lnkdesc; + } + + ret = rtdm_safe_copy_from_user(fd, + tmpopts, + link_arg->opts, + link_arg->opts_size); + if (ret != 0) { + __a4l_err("a4l_fill_lnkdesc: " + "call3(copy_from_user) failed\n"); + goto out_get_lnkdesc; + } + } + + link_arg->bname = tmpname; + link_arg->opts = tmpopts; + + out_get_lnkdesc: + + if (tmpname == NULL) { + link_arg->bname = NULL; + link_arg->bname_size = 0; + } + + if (tmpopts == NULL) { + link_arg->opts = NULL; + link_arg->opts_size = 0; + } + + return ret; +} + +void a4l_free_lnkdesc(struct a4l_device_context * cxt, a4l_lnkdesc_t * link_arg) +{ + if (link_arg->bname != NULL) + rtdm_free(link_arg->bname); + + if (link_arg->opts != NULL) + rtdm_free(link_arg->opts); +} + +int a4l_assign_driver(struct a4l_device_context * cxt, + struct a4l_driver * drv, a4l_lnkdesc_t * link_arg) +{ + int ret = 0; + struct a4l_device *dev = a4l_get_dev(cxt); + + dev->driver = drv; + INIT_LIST_HEAD(&dev->subdvsq); + + if (drv->privdata_size == 0) + __a4l_dbg(1, core_dbg, " warning! " + "the field priv will not be usable\n"); + else { + dev->priv = rtdm_malloc(drv->privdata_size); + if (dev->priv == NULL) { + __a4l_err("a4l_assign_driver: " + "call(alloc) failed\n"); + ret = -ENOMEM; + goto out_assign_driver; + } + + /* Initialize the private data even if it not our role + (the driver should do it), that may prevent hard to + find bugs */ + memset(dev->priv, 0, drv->privdata_size); + } + + if ((ret = drv->attach(dev, link_arg)) != 0) + __a4l_err("a4l_assign_driver: " + "call(drv->attach) failed (ret=%d)\n", + ret); + +out_assign_driver: + + /* Increments module's count */ + if (ret == 0 && (!try_module_get(drv->owner))) { + __a4l_err("a4l_assign_driver: " + "driver's owner field wrongly set\n"); + ret = -ENODEV; + } + + if (ret != 0 && dev->priv != NULL) { + rtdm_free(dev->priv); + dev->driver = NULL; + } + + return ret; +} + +int a4l_release_driver(struct a4l_device_context * cxt) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_subdevice *subd, *tmp; + int ret = 0; + + if ((ret = dev->driver->detach(dev)) != 0) + goto out_release_driver; + + module_put(dev->driver->owner); + + /* In case, the driver developer did not free the subdevices */ + if (!list_empty(&dev->subdvsq)) + list_for_each_entry_safe(subd, tmp, &dev->subdvsq, list) { + list_del(&subd->list); + rtdm_free(subd); + } + + /* Free the private field */ + if (dev->priv) + rtdm_free(dev->priv); + + dev->driver = NULL; + +out_release_driver: + return ret; +} + +int a4l_device_attach(struct a4l_device_context * cxt, void *arg) +{ + int ret = 0; + a4l_lnkdesc_t link_arg; + struct a4l_driver *drv = NULL; + + if ((ret = a4l_fill_lnkdesc(cxt, &link_arg, arg)) != 0) + goto out_attach; + + if ((ret = a4l_lct_drv(link_arg.bname, &drv)) != 0) { + __a4l_err("a4l_device_attach: " + "cannot find board name %s\n", link_arg.bname); + goto out_attach; + } + + if ((ret = a4l_assign_driver(cxt, drv, &link_arg)) != 0) + goto out_attach; + + out_attach: + a4l_free_lnkdesc(cxt, &link_arg); + return ret; +} + +int a4l_device_detach(struct a4l_device_context * cxt) +{ + struct a4l_device *dev = a4l_get_dev(cxt); + + if (dev->driver == NULL) { + __a4l_err("a4l_device_detach: " + "incoherent state, driver not reachable\n"); + return -ENXIO; + } + + return a4l_release_driver(cxt); +} + +/* --- IOCTL / FOPS functions --- */ + +int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg) +{ + int ret = 0; + + if (rtdm_in_rt_context()) + return -ENOSYS; + + if (arg == NULL) { + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) { + __a4l_err("a4l_ioctl_devcfg: " + "free device, no driver to detach\n"); + return -EINVAL; + } + /* Pre-cleanup of the transfer structure, we ensure + that nothing is busy */ + if ((ret = a4l_precleanup_transfer(cxt)) != 0) + return ret; + /* Remove the related proc file */ + a4l_proc_detach(cxt); + /* Free the device and the driver from each other */ + if ((ret = a4l_device_detach(cxt)) == 0) + clear_bit(A4L_DEV_ATTACHED_NR, + &(a4l_get_dev(cxt)->flags)); + /* Free the transfer structure and its related data */ + if ((ret = a4l_cleanup_transfer(cxt)) != 0) + return ret; + } else { + /* Basic checking */ + if (test_bit + (A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) { + __a4l_err("a4l_ioctl_devcfg: " + "linked device, cannot attach more driver\n"); + return -EINVAL; + } + /* Pre-initialization of the transfer structure */ + a4l_presetup_transfer(cxt); + /* Link the device with the driver */ + if ((ret = a4l_device_attach(cxt, arg)) != 0) + return ret; + /* Create the transfer structure and + the related proc file */ + if ((ret = a4l_setup_transfer(cxt)) != 0 || + (ret = a4l_proc_attach(cxt)) != 0) + a4l_device_detach(cxt); + else + set_bit(A4L_DEV_ATTACHED_NR, + &(a4l_get_dev(cxt)->flags)); + } + + return ret; +} + +int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + a4l_dvinfo_t info; + struct a4l_device *dev = a4l_get_dev(cxt); + + memset(&info, 0, sizeof(a4l_dvinfo_t)); + + if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + int len = (strlen(dev->driver->board_name) > A4L_NAMELEN) ? + A4L_NAMELEN : strlen(dev->driver->board_name); + + memcpy(info.board_name, dev->driver->board_name, len); + + len = (strlen(dev->driver->driver_name) > A4L_NAMELEN) ? + A4L_NAMELEN : strlen(dev->driver->driver_name); + + memcpy(info.driver_name, dev->driver->driver_name, len); + + info.nb_subd = dev->transfer.nb_subd; + /* TODO: for API compatibility issue, find the first + read subdevice and write subdevice */ + } + + if (rtdm_safe_copy_to_user(fd, + arg, &info, sizeof(a4l_dvinfo_t)) != 0) + return -EFAULT; + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c new file mode 100644 index 0000000..a857dea --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c @@ -0,0 +1,104 @@ +/* + * Analogy for Linux, driver related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/fs.h> +#include <rtdm/analogy/device.h> + +#include "proc.h" + +static LIST_HEAD(a4l_drvs); + +/* --- Driver list management functions --- */ + +int a4l_lct_drv(char *pin, struct a4l_driver ** pio) +{ + struct list_head *this; + int ret = -EINVAL; + + __a4l_dbg(1, core_dbg, "name=%s\n", pin); + + /* Goes through the linked list so as to find + a driver instance with the same name */ + list_for_each(this, &a4l_drvs) { + struct a4l_driver *drv = list_entry(this, struct a4l_driver, list); + + if (strcmp(drv->board_name, pin) == 0) { + /* The argument pio can be NULL + if there is no need to retrieve the pointer */ + if (pio != NULL) + *pio = drv; + ret = 0; + break; + } + } + + return ret; +} + +int a4l_register_drv(struct a4l_driver * drv) +{ + if (!rtdm_available()) + return -ENOSYS; + + __a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name); + + if (a4l_lct_drv(drv->board_name, NULL) != 0) { + list_add(&drv->list, &a4l_drvs); + return 0; + } else + return -EINVAL; +} + +int a4l_unregister_drv(struct a4l_driver * drv) +{ + __a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name); + + if (a4l_lct_drv(drv->board_name, NULL) == 0) { + /* Here, we consider the argument is pointing + to a real driver struct (not a blank structure + with only the name field properly set */ + list_del(&drv->list); + return 0; + } else + return -EINVAL; +} + +#ifdef CONFIG_PROC_FS + +/* --- Driver list proc section --- */ + +int a4l_rdproc_drvs(struct seq_file *p, void *data) +{ + int i = 0; + struct list_head *this; + + seq_printf(p, "-- Analogy drivers --\n\n"); + + seq_printf(p, "| idx | board name \n"); + + list_for_each(this, &a4l_drvs) { + struct a4l_driver *drv = list_entry(this, struct a4l_driver, list); + seq_printf(p, "| %02d | %s \n", i++, drv->board_name); + } + return 0; +} + +#endif /* CONFIG_PROC_FS */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c new file mode 100644 index 0000000..7d2d883 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c @@ -0,0 +1,608 @@ +/* + * Analogy for Linux, driver facilities + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/fs.h> +#include <rtdm/analogy/device.h> + +/** + * @ingroup cobalt + * @defgroup analogy Analogy framework + * A RTDM-based interface for implementing DAQ card drivers + */ + +/** + * @ingroup analogy + * @defgroup analogy_driver_facilities Driver API + * Programming interface provided to DAQ card drivers + */ + +/* --- Driver section --- */ + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_driver Driver management services + * + * Analogy driver registration / unregistration + * + * In a common Linux char driver, the developer has to register a fops + * structure filled with callbacks for read / write / mmap / ioctl + * operations. + * + * Analogy drivers do not have to implement read / write / mmap / + * ioctl functions, these procedures are implemented in the Analogy + * generic layer. Then, the transfers between user-space and + * kernel-space are already managed. Analogy drivers work with commands + * and instructions which are some kind of more dedicated read / write + * operations. And, instead of registering a fops structure, a Analogy + * driver must register some a4l_driver structure. + * + * @{ + */ + +/** + * @brief Register an Analogy driver + * + * After initialising a driver structure, the driver must be made + * available so as to be attached. + * + * @param[in] drv Driver descriptor structure + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_register_drv(struct a4l_driver * drv); +EXPORT_SYMBOL_GPL(a4l_register_drv); + +/** + * @brief Unregister an Analogy driver + * + * This function removes the driver descriptor from the Analogy driver + * list. The driver cannot be attached anymore. + * + * @param[in] drv Driver descriptor structure + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_unregister_drv(struct a4l_driver * drv); +EXPORT_SYMBOL_GPL(a4l_unregister_drv); + +/** @} */ + +/* --- Subdevice section --- */ + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_subdevice Subdevice management services + * + * Subdevice declaration in a driver + * + * The subdevice structure is the most complex one in the Analogy + * driver layer. It contains some description fields to fill and some + * callbacks to declare. + * + * The description fields are: + * - flags: to define the subdevice type and its capabilities; + * - chan_desc: to describe the channels which compose the subdevice; + * - rng_desc: to declare the usable ranges; + * + * The functions callbakcs are: + * - do_cmd() and do_cmdtest(): to performe asynchronous acquisitions + * thanks to commands; + * - cancel(): to abort a working asynchronous acquisition; + * - munge(): to apply modifications on the data freshly acquired + * during an asynchronous transfer. Warning: using this feature with + * can significantly reduce the performances (if the munge operation + * is complex, it will trigger high CPU charge and if the + * acquisition device is DMA capable, many cache-misses and + * cache-replaces will occur (the benefits of the DMA controller + * will vanish); + * - trigger(): optionnaly to launch an asynchronous acquisition; + * - insn_read(), insn_write(), insn_bits(), insn_config(): to perform + * synchronous acquisition operations. + * + * Once the subdevice is filled, it must be inserted into the driver + * structure thanks to a4l_add_subd(). + * + * @{ + */ + +EXPORT_SYMBOL_GPL(a4l_range_bipolar10); +EXPORT_SYMBOL_GPL(a4l_range_bipolar5); +EXPORT_SYMBOL_GPL(a4l_range_unipolar10); +EXPORT_SYMBOL_GPL(a4l_range_unipolar5); +EXPORT_SYMBOL_GPL(a4l_range_unknown); +EXPORT_SYMBOL_GPL(a4l_range_fake); + +/** + * @brief Allocate a subdevice descriptor + * + * This is a helper function so as to get a suitable subdevice + * descriptor + * + * @param[in] sizeof_priv Size of the subdevice's private data + * @param[in] setup Setup function to be called after the allocation + * + * @return the index with which the subdevice has been registered, in + * case of error a negative error code is returned. + * + */ +struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv, + void (*setup)(struct a4l_subdevice *)); +EXPORT_SYMBOL_GPL(a4l_alloc_subd); + +/** + * @brief Add a subdevice to the driver descriptor + * + * Once the driver descriptor structure is initialized, the function + * a4l_add_subd() must be used so to add some subdevices to the + * driver. + * + * @param[in] dev Device descriptor structure + * @param[in] subd Subdevice descriptor structure + * + * @return the index with which the subdevice has been registered, in + * case of error a negative error code is returned. + * + */ +int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice *subd); +EXPORT_SYMBOL_GPL(a4l_add_subd); + +/** + * @brief Get a pointer to the subdevice descriptor referenced by its + * registration index + * + * This function is scarcely useful as all the drivers callbacks get + * the related subdevice descriptor as first argument. + * This function is not optimized, it goes through a linked list to + * get the proper pointer. So it must not be used in real-time context + * but at initialization / cleanup time (attach / detach). + * + * @param[in] dev Device descriptor structure + * @param[in] idx Subdevice index + * + * @return 0 on success, otherwise negative error code. + * + */ +struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx); +EXPORT_SYMBOL_GPL(a4l_get_subd); + +/** @} */ + +/* --- Buffer section --- */ + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_buffer Buffer management services + * + * Buffer management services + * + * The buffer is the key component of the Analogy infrastructure. It + * manages transfers between the user-space and the Analogy drivers + * thanks to generic functions which are described hereafter. Thanks + * to the buffer subsystem, the driver developer does not have to care + * about the way the user program retrieves or sends data. + * + * To write a classical char driver, the developer has to fill a fops + * structure so as to provide transfer operations to the user program + * (read, write, ioctl and mmap if need be). + * + * The Analogy infrastructure manages the whole interface with the + * userspace; the common read, write, mmap, etc. callbacks are generic + * Analogy functions. These functions manage (and perform, if need be) + * tranfers between the user-space and an asynchronous buffer thanks + * to lockless mechanisms. + * + * Consequently, the developer has to use the proper buffer functions + * in order to write / read acquired data into / from the asynchronous + * buffer. + * + * Here are listed the functions: + * - a4l_buf_prepare_(abs)put() and a4l_buf_commit_(abs)put() + * - a4l_buf_prepare_(abs)get() and a4l_buf_commit_(abs)get() + * - a4l_buf_put() + * - a4l_buf_get() + * - a4l_buf_evt(). + * + * The functions count might seem high; however, the developer needs a + * few of them to write a driver. Having so many functions enables to + * manage any transfer cases: + * - If some DMA controller is available, there is no need to make the + * driver copy the acquired data into the asynchronous buffer, the + * DMA controller must directly trigger DMA shots into / from the + * buffer. In that case, a function a4l_buf_prepare_*() must be used + * so as to set up the DMA transfer and a function + * a4l_buf_commit_*() has to be called to complete the transfer(). + * - For DMA controllers which need to work with global counter (the + * transfered data count since the beginning of the acquisition), + * the functions a4l_buf_*_abs_*() have been made available. + * - If no DMA controller is available, the driver has to perform the + * copy between the hardware component and the asynchronous + * buffer. In such cases, the functions a4l_buf_get() and + * a4l_buf_put() are useful. + * + * @{ + */ + +/** + * @brief Update the absolute count of data sent from the device to + * the buffer since the start of the acquisition and after the next + * DMA shot + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(absg)et() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, some + * pointers still have to be updated so as to monitor the tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The data count to be transferred during the next + * DMA shot plus the data count which have been copied since the start + * of the acquisition + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_prepare_absput); + +/** + * @brief Set the absolute count of data which was sent from the + * device to the buffer since the start of the acquisition and until + * the last DMA shot + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(abs)get() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The data count transferred to the buffer during + * the last DMA shot plus the data count which have been sent / + * retrieved since the beginning of the acquisition + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_commit_absput); + +/** + * @brief Set the count of data which is to be sent to the buffer at + * the next DMA shot + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(abs)get() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The data count to be transferred + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_prepare_put); + +/** + * @brief Set the count of data sent to the buffer during the last + * completed DMA shots + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(abs)get() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The amount of data transferred + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_commit_put); + +/** + * @brief Copy some data from the device driver to the buffer + * + * The function a4l_buf_put() must copy data coming from some + * acquisition device to the Analogy buffer. This ring-buffer is an + * intermediate area between the device driver and the user-space + * program, which is supposed to recover the acquired data. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] bufdata The data buffer to copy into the Analogy buffer + * @param[in] count The amount of data to copy + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_put); + +/** + * @brief Update the absolute count of data sent from the buffer to + * the device since the start of the acquisition and after the next + * DMA shot + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(absg)et() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The data count to be transferred during the next + * DMA shot plus the data count which have been copied since the start + * of the acquisition + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_prepare_absget); + +/** + * @brief Set the absolute count of data which was sent from the + * buffer to the device since the start of the acquisition and until + * the last DMA shot + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(abs)get() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The data count transferred to the device during + * the last DMA shot plus the data count which have been sent since + * the beginning of the acquisition + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_commit_absget); + +/** + * @brief Set the count of data which is to be sent from the buffer to + * the device at the next DMA shot + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(abs)get() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The data count to be transferred + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_prepare_get); + +/** + * @brief Set the count of data sent from the buffer to the device + * during the last completed DMA shots + * + * The functions a4l_buf_prepare_(abs)put(), + * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and + * a4l_buf_commit_(abs)get() have been made available for DMA + * transfers. In such situations, no data copy is needed between the + * Analogy buffer and the device as some DMA controller is in charge + * of performing data shots from / to the Analogy buffer. However, + * some pointers still have to be updated so as to monitor the + * tranfers. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] count The amount of data transferred + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_commit_get); + +/** + * @brief Copy some data from the buffer to the device driver + * + * The function a4l_buf_get() must copy data coming from the Analogy + * buffer to some acquisition device. This ring-buffer is an + * intermediate area between the device driver and the user-space + * program, which is supposed to provide the data to send to the + * device. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] bufdata The data buffer to copy into the Analogy buffer + * @param[in] count The amount of data to copy + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count); +EXPORT_SYMBOL_GPL(a4l_buf_get); + +/** + * @brief Signal some event(s) to a user-space program involved in + * some read / write operation + * + * The function a4l_buf_evt() is useful in many cases: + * - To wake-up a process waiting for some data to read. + * - To wake-up a process waiting for some data to write. + * - To notify the user-process an error has occured during the + * acquistion. + * + * @param[in] subd Subdevice descriptor structure + * @param[in] evts Some specific event to notify: + * - A4L_BUF_ERROR to indicate some error has occured during the + * transfer + * - A4L_BUF_EOA to indicate the acquisition is complete (this + * event is automatically set, it should not be used). + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts); +EXPORT_SYMBOL_GPL(a4l_buf_evt); + +/** + * @brief Get the data amount available in the Analogy buffer + * + * @param[in] subd Subdevice descriptor structure + * + * @return the amount of data available in the Analogy buffer. + * + */ +unsigned long a4l_buf_count(struct a4l_subdevice *subd); +EXPORT_SYMBOL_GPL(a4l_buf_count); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @brief Get the current Analogy command descriptor + * + * @param[in] subd Subdevice descriptor structure + * + * @return the command descriptor. + * + */ +struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice * subd); + +#endif /* DOXYGEN_CPP */ + +/** + * @brief Get the channel index according to its type + * + * @param[in] subd Subdevice descriptor structure + * + * @return the channel index. + * + */ +int a4l_get_chan(struct a4l_subdevice *subd); +EXPORT_SYMBOL_GPL(a4l_get_chan); + +/** @} */ + +/* --- IRQ handling section --- */ + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_irq Interrupt management services + * @{ + */ + +/** + * @brief Get the interrupt number in use for a specific device + * + * @param[in] dev Device descriptor structure + * + * @return the line number used or A4L_IRQ_UNUSED if no interrupt + * is registered. + * + */ +unsigned int a4l_get_irq(struct a4l_device * dev); +EXPORT_SYMBOL_GPL(a4l_get_irq); + +/** + * @brief Register an interrupt handler for a specific device + * + * @param[in] dev Device descriptor structure + * @param[in] irq Line number of the addressed IRQ + * @param[in] handler Interrupt handler + * @param[in] flags Registration flags: + * - RTDM_IRQTYPE_SHARED: enable IRQ-sharing with other drivers + * (Warning: real-time drivers and non-real-time drivers cannot + * share an interrupt line). + * - RTDM_IRQTYPE_EDGE: mark IRQ as edge-triggered (Warning: this flag + * is meaningless in RTDM-less context). + * - A4L_IRQ_DISABLED: keep IRQ disabled when calling the action + * handler (Warning: this flag is ignored in RTDM-enabled + * configuration). + * @param[in] cookie Pointer to be passed to the interrupt handler on + * invocation + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_request_irq(struct a4l_device * dev, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie); +EXPORT_SYMBOL_GPL(a4l_request_irq); + +/** + * @brief Release an interrupt handler for a specific device + * + * @param[in] dev Device descriptor structure + * @param[in] irq Line number of the addressed IRQ + * + * @return 0 on success, otherwise negative error code. + * + */ +int a4l_free_irq(struct a4l_device * dev, unsigned int irq); +EXPORT_SYMBOL_GPL(a4l_free_irq); + +/** @} */ + +/* --- Misc section --- */ + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_misc Misc services + * @{ + */ + +/** + * @brief Get the absolute time in nanoseconds + * + * @return the absolute time expressed in nanoseconds + * + */ +unsigned long long a4l_get_time(void); +EXPORT_SYMBOL_GPL(a4l_get_time); + +/** @} */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c new file mode 100644 index 0000000..1cbdb14 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c @@ -0,0 +1,427 @@ +/* + * Analogy for Linux, instruction related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/version.h> +#include <linux/ioport.h> +#include <linux/mman.h> +#include <asm/div64.h> +#include <asm/io.h> +#include <asm/errno.h> +#include <rtdm/analogy/device.h> + +int a4l_do_insn_gettime(struct a4l_kernel_instruction * dsc) +{ + nanosecs_abs_t ns; + uint32_t ns2; + + unsigned int *data = (unsigned int *)dsc->data; + + /* Basic checkings */ + if (dsc->data_size != 2 * sizeof(unsigned int)) { + __a4l_err("a4l_do_insn_gettime: data size should be 2\n"); + return -EINVAL; + } + + /* Get a timestamp */ + ns = a4l_get_time(); + + /* Perform the conversion */ + ns2 = do_div(ns, 1000000000); + data[0] = (unsigned int) ns; + data[1] = (unsigned int) ns2 / 1000; + + return 0; +} + +int a4l_do_insn_wait(struct a4l_kernel_instruction * dsc) +{ + unsigned int us; + unsigned int *data = (unsigned int *)dsc->data; + + /* Basic checkings */ + if (dsc->data_size != sizeof(unsigned int)) { + __a4l_err("a4l_do_insn_wait: data size should be 1\n"); + return -EINVAL; + } + + if (data[0] > A4L_INSN_WAIT_MAX) { + __a4l_err("a4l_do_insn_wait: wait duration is out of range\n"); + return -EINVAL; + } + + /* As we use (a4l_)udelay, we have to convert the delay into + microseconds */ + us = data[0] / 1000; + + /* At least, the delay is rounded up to 1 microsecond */ + if (us == 0) + us = 1; + + /* Performs the busy waiting */ + a4l_udelay(us); + + return 0; +} + +int a4l_do_insn_trig(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc) +{ + struct a4l_subdevice *subd; + struct a4l_device *dev = a4l_get_dev(cxt); + unsigned int trignum; + unsigned int *data = (unsigned int*)dsc->data; + + /* Basic checkings */ + if (dsc->data_size > 1) { + __a4l_err("a4l_do_insn_trig: data size should not be > 1\n"); + return -EINVAL; + } + + trignum = (dsc->data_size == sizeof(unsigned int)) ? data[0] : 0; + + if (dsc->idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_do_insn_trig: " + "subdevice index is out of range\n"); + return -EINVAL; + } + + subd = dev->transfer.subds[dsc->idx_subd]; + + /* Checks that the concerned subdevice is trigger-compliant */ + if ((subd->flags & A4L_SUBD_CMD) == 0 || subd->trigger == NULL) { + __a4l_err("a4l_do_insn_trig: subdevice does not support " + "triggering or asynchronous acquisition\n"); + return -EINVAL; + } + + /* Performs the trigger */ + return subd->trigger(subd, trignum); +} + +int a4l_fill_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int ret = 0; + void *tmp_data = NULL; + + ret = rtdm_safe_copy_from_user(fd, + dsc, arg, sizeof(a4l_insn_t)); + if (ret != 0) + goto out_insndsc; + + if (dsc->data_size != 0 && dsc->data == NULL) { + __a4l_err("a4l_fill_insndsc: no data pointer specified\n"); + ret = -EINVAL; + goto out_insndsc; + } + + if (dsc->data_size != 0 && dsc->data != NULL) { + tmp_data = rtdm_malloc(dsc->data_size); + if (tmp_data == NULL) { + ret = -ENOMEM; + goto out_insndsc; + } + + if ((dsc->type & A4L_INSN_MASK_WRITE) != 0) { + ret = rtdm_safe_copy_from_user(fd, + tmp_data, dsc->data, + dsc->data_size); + if (ret < 0) + goto out_insndsc; + } + } + + dsc->__udata = dsc->data; + dsc->data = tmp_data; + +out_insndsc: + + if (ret != 0 && tmp_data != NULL) + rtdm_free(tmp_data); + + return ret; +} + +int a4l_free_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int ret = 0; + + if ((dsc->type & A4L_INSN_MASK_READ) != 0) + ret = rtdm_safe_copy_to_user(fd, + dsc->__udata, + dsc->data, dsc->data_size); + + if (dsc->data != NULL) + rtdm_free(dsc->data); + + return ret; +} + +int a4l_do_special_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc) +{ + int ret = 0; + + switch (dsc->type) { + case A4L_INSN_GTOD: + ret = a4l_do_insn_gettime(dsc); + break; + case A4L_INSN_WAIT: + ret = a4l_do_insn_wait(dsc); + break; + case A4L_INSN_INTTRIG: + ret = a4l_do_insn_trig(cxt, dsc); + break; + default: + __a4l_err("a4l_do_special_insn: " + "incoherent instruction code\n"); + return -EINVAL; + } + + if (ret < 0) + __a4l_err("a4l_do_special_insn: " + "execution of the instruction failed (err=%d)\n", + ret); + + return ret; +} + +int a4l_do_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc) +{ + int ret = 0; + struct a4l_subdevice *subd; + struct a4l_device *dev = a4l_get_dev(cxt); + int (*hdlr) (struct a4l_subdevice *, struct a4l_kernel_instruction *) = NULL; + + /* Checks the subdevice index */ + if (dsc->idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_do_insn: " + "subdevice index out of range (idx=%d)\n", + dsc->idx_subd); + return -EINVAL; + } + + /* Recovers pointers on the proper subdevice */ + subd = dev->transfer.subds[dsc->idx_subd]; + + /* Checks the subdevice's characteristics */ + if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) { + __a4l_err("a4l_do_insn: wrong subdevice selected\n"); + return -EINVAL; + } + + /* Checks the channel descriptor */ + if ((subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_CALIB) { + ret = a4l_check_chanlist(dev->transfer.subds[dsc->idx_subd], + 1, &dsc->chan_desc); + if (ret < 0) + return ret; + } + + /* Choose the proper handler, we can check the pointer because + the subdevice was memset to 0 at allocation time */ + switch (dsc->type) { + case A4L_INSN_READ: + hdlr = subd->insn_read; + break; + case A4L_INSN_WRITE: + hdlr = subd->insn_write; + break; + case A4L_INSN_BITS: + hdlr = subd->insn_bits; + break; + case A4L_INSN_CONFIG: + hdlr = subd->insn_config; + break; + default: + ret = -EINVAL; + } + + /* We check the instruction type */ + if (ret < 0) + return ret; + + /* We check whether a handler is available */ + if (hdlr == NULL) + return -ENOSYS; + + /* Prevents the subdevice from being used during + the following operations */ + if (test_and_set_bit(A4L_SUBD_BUSY_NR, &subd->status)) { + ret = -EBUSY; + goto out_do_insn; + } + + /* Let's the driver-specific code perform the instruction */ + ret = hdlr(subd, dsc); + + if (ret < 0) + __a4l_err("a4l_do_insn: " + "execution of the instruction failed (err=%d)\n", + ret); + +out_do_insn: + + /* Releases the subdevice from its reserved state */ + clear_bit(A4L_SUBD_BUSY_NR, &subd->status); + + return ret; +} + +int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int ret = 0; + struct a4l_kernel_instruction insn; + struct a4l_device *dev = a4l_get_dev(cxt); + + if (!rtdm_in_rt_context() && rtdm_rt_capable(fd)) + return -ENOSYS; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_insn: unattached device\n"); + return -EINVAL; + } + + /* Recovers the instruction descriptor */ + ret = a4l_fill_insndsc(cxt, &insn, arg); + if (ret != 0) + goto err_ioctl_insn; + + /* Performs the instruction */ + if ((insn.type & A4L_INSN_MASK_SPECIAL) != 0) + ret = a4l_do_special_insn(cxt, &insn); + else + ret = a4l_do_insn(cxt, &insn); + + if (ret < 0) + goto err_ioctl_insn; + + /* Frees the used memory and sends back some + data, if need be */ + ret = a4l_free_insndsc(cxt, &insn); + + return ret; + +err_ioctl_insn: + a4l_free_insndsc(cxt, &insn); + return ret; +} + +int a4l_fill_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int i, ret = 0; + + dsc->insns = NULL; + + /* Recovers the structure from user space */ + ret = rtdm_safe_copy_from_user(fd, + dsc, arg, sizeof(a4l_insnlst_t)); + if (ret < 0) + return ret; + + /* Some basic checking */ + if (dsc->count == 0) { + __a4l_err("a4l_fill_ilstdsc: instruction list's count is 0\n"); + return -EINVAL; + } + + /* Keeps the user pointer in an opaque field */ + dsc->__uinsns = (a4l_insn_t *)dsc->insns; + + dsc->insns = rtdm_malloc(dsc->count * sizeof(struct a4l_kernel_instruction)); + if (dsc->insns == NULL) + return -ENOMEM; + + /* Recovers the instructions, one by one. This part is not + optimized */ + for (i = 0; i < dsc->count && ret == 0; i++) + ret = a4l_fill_insndsc(cxt, + &(dsc->insns[i]), + &(dsc->__uinsns[i])); + + /* In case of error, frees the allocated memory */ + if (ret < 0 && dsc->insns != NULL) + rtdm_free(dsc->insns); + + return ret; +} + +int a4l_free_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc) +{ + int i, ret = 0; + + if (dsc->insns != NULL) { + + for (i = 0; i < dsc->count && ret == 0; i++) + ret = a4l_free_insndsc(cxt, &(dsc->insns[i])); + + while (i < dsc->count) { + a4l_free_insndsc(cxt, &(dsc->insns[i])); + i++; + } + + rtdm_free(dsc->insns); + } + + return ret; +} + +/* This function is not optimized in terms of memory footprint and + CPU charge; however, the whole analogy instruction system was not + designed for performance issues */ +int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int i, ret = 0; + struct a4l_kernel_instruction_list ilst; + struct a4l_device *dev = a4l_get_dev(cxt); + + if (!rtdm_in_rt_context() && rtdm_rt_capable(fd)) + return -ENOSYS; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_insnlist: unattached device\n"); + return -EINVAL; + } + + if ((ret = a4l_fill_ilstdsc(cxt, &ilst, arg)) < 0) + return ret; + + /* Performs the instructions */ + for (i = 0; i < ilst.count && ret == 0; i++) { + if ((ilst.insns[i].type & A4L_INSN_MASK_SPECIAL) != 0) + ret = a4l_do_special_insn(cxt, &ilst.insns[i]); + else + ret = a4l_do_insn(cxt, &ilst.insns[i]); + } + + if (ret < 0) + goto err_ioctl_ilst; + + return a4l_free_ilstdsc(cxt, &ilst); + +err_ioctl_ilst: + a4l_free_ilstdsc(cxt, &ilst); + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c new file mode 100644 index 0000000..1abe250 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c @@ -0,0 +1,331 @@ +/* + * Analogy subdevice driver for 8255 chip + * Copyright (C) 1999 David A. Schleef <ds@schleef.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <rtdm/analogy/device.h> + +#include "8255.h" + +#define CALLBACK_ARG (((subd_8255_t *)subd->priv)->cb_arg) +#define CALLBACK_FUNC (((subd_8255_t *)subd->priv)->cb_func) + +/* Channels descriptor */ +static struct a4l_channels_desc chandesc_8255 = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 24, + .chans = { + {A4L_CHAN_AREF_GROUND, sizeof(sampl_t)}, + }, +}; + +/* Command options mask */ +static struct a4l_cmd_desc cmd_mask_8255 = { + .idx_subd = 0, + .start_src = TRIG_NOW, + .scan_begin_src = TRIG_EXT, + .convert_src = TRIG_FOLLOW, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_NONE, +}; + +void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd) +{ + sampl_t d; + + /* Retrieve the sample... */ + d = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG); + d |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8); + + /* ...and send it */ + a4l_buf_put(subd, &d, sizeof(sampl_t)); + + a4l_buf_evt(subd, 0); +} +EXPORT_SYMBOL_GPL(a4l_subdev_8255_interrupt); + +static int subdev_8255_cb(int dir, int port, int data, unsigned long arg) +{ + unsigned long iobase = arg; + + if (dir) { + outb(data, iobase + port); + return 0; + } else { + return inb(iobase + port); + } +} + +static void do_config(struct a4l_subdevice *subd) +{ + int config; + subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv; + + config = CR_CW; + /* 1 in io_bits indicates output, 1 in config indicates input */ + if (!(subd_8255->io_bits & 0x0000ff)) + config |= CR_A_IO; + if (!(subd_8255->io_bits & 0x00ff00)) + config |= CR_B_IO; + if (!(subd_8255->io_bits & 0x0f0000)) + config |= CR_C_LO_IO; + if (!(subd_8255->io_bits & 0xf00000)) + config |= CR_C_HI_IO; + CALLBACK_FUNC(1, _8255_CR, config, CALLBACK_ARG); +} + +int subd_8255_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + /* FIXME */ + return 0; +} + +int subd_8255_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + if (cmd->start_arg != 0) { + cmd->start_arg = 0; + return -EINVAL; + } + if (cmd->scan_begin_arg != 0) { + cmd->scan_begin_arg = 0; + return -EINVAL; + } + if (cmd->convert_arg != 0) { + cmd->convert_arg = 0; + return -EINVAL; + } + if (cmd->scan_end_arg != 1) { + cmd->scan_end_arg = 1; + return -EINVAL; + } + if (cmd->stop_arg != 0) { + cmd->stop_arg = 0; + return -EINVAL; + } + + return 0; +} + +void subd_8255_cancel(struct a4l_subdevice *subd) +{ + /* FIXME */ +} + +int subd_8255_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv; + uint32_t *data = (uint32_t *)insn->data; + + if (data[0]) { + + subd_8255->status &= ~data[0]; + subd_8255->status |= (data[0] & data[1]); + + if (data[0] & 0xff) + CALLBACK_FUNC(1, _8255_DATA, + subd_8255->status & 0xff, CALLBACK_ARG); + if (data[0] & 0xff00) + CALLBACK_FUNC(1, _8255_DATA + 1, + (subd_8255->status >> 8) & 0xff, + CALLBACK_ARG); + if (data[0] & 0xff0000) + CALLBACK_FUNC(1, _8255_DATA + 2, + (subd_8255->status >> 16) & 0xff, + CALLBACK_ARG); + } + + data[1] = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG); + data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8); + data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 2, 0, CALLBACK_ARG) << 16); + + return 0; +} + +int subd_8255_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + unsigned int mask; + unsigned int bits; + subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv; + unsigned int *data = (unsigned int *)insn->data; + + mask = 1 << CR_CHAN(insn->chan_desc); + + if (mask & 0x0000ff) { + bits = 0x0000ff; + } else if (mask & 0x00ff00) { + bits = 0x00ff00; + } else if (mask & 0x0f0000) { + bits = 0x0f0000; + } else { + bits = 0xf00000; + } + + switch (data[0]) { + case A4L_INSN_CONFIG_DIO_INPUT: + subd_8255->io_bits &= ~bits; + break; + case A4L_INSN_CONFIG_DIO_OUTPUT: + subd_8255->io_bits |= bits; + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = (subd_8255->io_bits & bits) ? + A4L_OUTPUT : A4L_INPUT; + return 0; + break; + default: + return -EINVAL; + } + + do_config(subd); + + return 0; +} + +void a4l_subdev_8255_init(struct a4l_subdevice *subd) +{ + subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv; + /* Initializes the subdevice structure */ + memset(subd, 0, sizeof(struct a4l_subdevice)); + + /* Subdevice filling part */ + + subd->flags = A4L_SUBD_DIO; + subd->flags |= A4L_SUBD_CMD; + subd->chan_desc = &chandesc_8255; + subd->insn_bits = subd_8255_insn_bits; + subd->insn_config = subd_8255_insn_config; + + if(subd_8255->have_irq) { + subd->cmd_mask = &cmd_mask_8255; + subd->do_cmdtest = subd_8255_cmdtest; + subd->do_cmd = subd_8255_cmd; + subd->cancel = subd_8255_cancel; + } + + /* 8255 setting part */ + + if(CALLBACK_FUNC == NULL) + CALLBACK_FUNC = subdev_8255_cb; + + do_config(subd); +} +EXPORT_SYMBOL_GPL(a4l_subdev_8255_init); + +/* + + Start of the 8255 standalone device + +*/ + +static int dev_8255_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + unsigned long *addrs; + int i, err = 0; + + if(arg->opts == NULL || arg->opts_size == 0) { + a4l_err(dev, + "dev_8255_attach: unable to detect any 8255 chip, " + "chips addresses must be passed as attach arguments\n"); + return -EINVAL; + } + + addrs = (unsigned long*) arg->opts; + + for(i = 0; i < (arg->opts_size / sizeof(unsigned long)); i++) { + struct a4l_subdevice * subd; + subd_8255_t *subd_8255; + + subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL); + if(subd == NULL) { + a4l_err(dev, + "dev_8255_attach: " + "unable to allocate subdevice\n"); + /* There is no need to free previously + allocated structure(s), the analogy layer will + do it for us */ + err = -ENOMEM; + goto out_attach; + } + + memset(subd, 0, sizeof(struct a4l_subdevice)); + memset(subd->priv, 0, sizeof(subd_8255_t)); + + subd_8255 = (subd_8255_t *)subd->priv; + + if(request_region(addrs[i], _8255_SIZE, "Analogy 8255") == 0) { + subd->flags = A4L_SUBD_UNUSED; + a4l_warn(dev, + "dev_8255_attach: " + "I/O port conflict at 0x%lx\n", addrs[i]); + } + else { + subd_8255->cb_arg = addrs[i]; + a4l_subdev_8255_init(subd); + } + + err = a4l_add_subd(dev, subd); + if(err < 0) { + a4l_err(dev, + "dev_8255_attach: " + "a4l_add_subd() failed (err=%d)\n", err); + goto out_attach; + } + } + +out_attach: + return err; +} + +static int dev_8255_detach(struct a4l_device *dev) +{ + struct a4l_subdevice *subd; + int i = 0; + + while((subd = a4l_get_subd(dev, i++)) != NULL) { + subd_8255_t *subd_8255 = (subd_8255_t *) subd->priv; + if(subd_8255 != NULL && subd_8255->cb_arg != 0) + release_region(subd_8255->cb_arg, _8255_SIZE); + } + + return 0; +} + +static struct a4l_driver drv_8255 = { + .owner = THIS_MODULE, + .board_name = "analogy_8255", + .driver_name = "8255", + .attach = dev_8255_attach, + .detach = dev_8255_detach, + .privdata_size = 0, +}; + +static int __init drv_8255_init(void) +{ + return a4l_register_drv(&drv_8255); +} + +static void __exit drv_8255_cleanup(void) +{ + a4l_unregister_drv(&drv_8255); +} +MODULE_DESCRIPTION("Analogy driver for 8255 chip"); +MODULE_LICENSE("GPL"); + +module_init(drv_8255_init); +module_exit(drv_8255_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h new file mode 100644 index 0000000..31b1ed8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h @@ -0,0 +1,60 @@ +/* + * Hardware driver for 8255 chip + * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef __ANALOGY_8255_H__ +#define __ANALOGY_8255_H__ + +#include <rtdm/analogy/device.h> + +typedef int (*a4l_8255_cb_t)(int, int, int, unsigned long); + +typedef struct subd_8255_struct { + unsigned long cb_arg; + a4l_8255_cb_t cb_func; + unsigned int status; + int have_irq; + int io_bits; +} subd_8255_t; + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_8255) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_8255_MODULE)) + +#define _8255_SIZE 4 + +#define _8255_DATA 0 +#define _8255_CR 3 + +#define CR_C_LO_IO 0x01 +#define CR_B_IO 0x02 +#define CR_B_MODE 0x04 +#define CR_C_HI_IO 0x08 +#define CR_A_IO 0x10 +#define CR_A_MODE(a) ((a)<<5) +#define CR_CW 0x80 + +void a4l_subdev_8255_init(struct a4l_subdevice *subd); +void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd); + +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_8255 */ + +#define a4l_subdev_8255_init(x) do { } while(0) +#define a4l_subdev_8255_interrupt(x) do { } while(0) + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_8255 */ + +#endif /* !__ANALOGY_8255_H__ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig new file mode 100644 index 0000000..6907c83 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig @@ -0,0 +1,10 @@ + +config XENO_DRIVERS_ANALOGY_8255 + depends on XENO_DRIVERS_ANALOGY + tristate "8255 driver" + default n + +config XENO_DRIVERS_ANALOGY_PARPORT + depends on XENO_DRIVERS_ANALOGY && X86 + tristate "Standard parallel port driver" + default n diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile new file mode 100644 index 0000000..94beedc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile @@ -0,0 +1,10 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/analogy + +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_8255) += analogy_8255.o + +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_PARPORT) += analogy_parport.o + +analogy_8255-y := 8255.o + +analogy_parport-y := parport.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c new file mode 100644 index 0000000..eb07434 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c @@ -0,0 +1,457 @@ +/* + * Analogy driver for standard parallel port + * Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + A cheap and easy way to get a few more digital I/O lines. Steal + additional parallel ports from old computers or your neighbors' + computers. + + Attach options list: + 0: I/O port base for the parallel port. + 1: IRQ + + Parallel Port Lines: + + pin subdev chan aka + --- ------ ---- --- + 1 2 0 strobe + 2 0 0 data 0 + 3 0 1 data 1 + 4 0 2 data 2 + 5 0 3 data 3 + 6 0 4 data 4 + 7 0 5 data 5 + 8 0 6 data 6 + 9 0 7 data 7 + 10 1 3 acknowledge + 11 1 4 busy + 12 1 2 output + 13 1 1 printer selected + 14 2 1 auto LF + 15 1 0 error + 16 2 2 init + 17 2 3 select printer + 18-25 ground + + Notes: + + Subdevices 0 is digital I/O, subdevice 1 is digital input, and + subdevice 2 is digital output. Unlike other Analogy devices, + subdevice 0 defaults to output. + + Pins 13 and 14 are inverted once by Analogy and once by the + hardware, thus cancelling the effect. + + Pin 1 is a strobe, thus acts like one. There's no way in software + to change this, at least on a standard parallel port. + + Subdevice 3 pretends to be a digital input subdevice, but it always + returns 0 when read. However, if you run a command with + scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering + pin, which can be used to wake up tasks. + + see http://www.beyondlogic.org/ for information. + or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html +*/ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/io.h> /* For inb/outb */ +#include <rtdm/analogy/device.h> + +#define PARPORT_SIZE 3 + +#define PARPORT_A 0 +#define PARPORT_B 1 +#define PARPORT_C 2 + +#define DEFAULT_ADDRESS 0x378 +#define DEFAULT_IRQ 7 + +typedef struct parport_subd_priv { + unsigned long io_bits; +} parport_spriv_t; + +typedef struct parport_priv { + unsigned long io_base; + unsigned int a_data; + unsigned int c_data; + int enable_irq; +} parport_priv_t; + +#define devpriv ((parport_priv_t *)(dev->priv)) + +static int parport_insn_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + if (data[0]) { + devpriv->a_data &= ~data[0]; + devpriv->a_data |= (data[0] & data[1]); + + outb(devpriv->a_data, devpriv->io_base + PARPORT_A); + } + + data[1] = inb(devpriv->io_base + PARPORT_A); + + return 0; +} + +static int parport_insn_config_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + parport_spriv_t *spriv = (parport_spriv_t *)subd->priv; + unsigned int *data = (unsigned int *)insn->data; + + /* No need to check the channel descriptor; the input / output + setting is global for all channels */ + + switch (data[0]) { + + case A4L_INSN_CONFIG_DIO_OUTPUT: + spriv->io_bits = 0xff; + devpriv->c_data &= ~(1 << 5); + break; + + case A4L_INSN_CONFIG_DIO_INPUT: + spriv->io_bits = 0; + devpriv->c_data |= (1 << 5); + break; + + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = (spriv->io_bits == 0xff) ? + A4L_OUTPUT: A4L_INPUT; + break; + + default: + return -EINVAL; + } + + outb(devpriv->c_data, devpriv->io_base + PARPORT_C); + + return 0; +} + +static int parport_insn_b(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + if (data[0]) { + /* should writes be ignored? */ + } + + data[1] = (inb(devpriv->io_base + PARPORT_B) >> 3); + + return 0; +} + +static int parport_insn_c(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + data[0] &= 0x0f; + if (data[0]) { + devpriv->c_data &= ~data[0]; + devpriv->c_data |= (data[0] & data[1]); + + outb(devpriv->c_data, devpriv->io_base + PARPORT_C); + } + + data[1] = devpriv->c_data & 0xf; + + return 2; +} + +static int parport_intr_insn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + uint8_t *data = (uint8_t *)insn->data; + + if (insn->data_size < sizeof(uint8_t)) + return -EINVAL; + + data[1] = 0; + return 0; +} + +static struct a4l_cmd_desc parport_intr_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_NOW, + .scan_begin_src = TRIG_EXT, + .convert_src = TRIG_FOLLOW, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_NONE, +}; + +static int parport_intr_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc * cmd) +{ + + if (cmd->start_arg != 0) { + return -EINVAL; + } + if (cmd->scan_begin_arg != 0) { + return -EINVAL; + } + if (cmd->convert_arg != 0) { + return -EINVAL; + } + if (cmd->scan_end_arg != 1) { + return -EINVAL; + } + if (cmd->stop_arg != 0) { + return -EINVAL; + } + + return 0; +} + +static int parport_intr_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct a4l_device *dev = subd->dev; + + devpriv->c_data |= 0x10; + outb(devpriv->c_data, devpriv->io_base + PARPORT_C); + + devpriv->enable_irq = 1; + + return 0; +} + +static void parport_intr_cancel(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + + a4l_info(dev, "cancel in progress\n"); + + devpriv->c_data &= ~0x10; + outb(devpriv->c_data, devpriv->io_base + PARPORT_C); + + devpriv->enable_irq = 0; +} + +static int parport_interrupt(unsigned int irq, void *d) +{ + struct a4l_device *dev = d; + struct a4l_subdevice *subd = a4l_get_subd(dev, 3); + + if (!devpriv->enable_irq) { + a4l_err(dev, "parport_interrupt: bogus irq, ignored\n"); + return IRQ_NONE; + } + + a4l_buf_put(subd, 0, sizeof(unsigned int)); + a4l_buf_evt(subd, 0); + + return 0; +} + + +/* --- Channels descriptor --- */ + +static struct a4l_channels_desc parport_chan_desc_a = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 8, + .chans = { + {A4L_CHAN_AREF_GROUND, 1}, + }, +}; + +static struct a4l_channels_desc parport_chan_desc_b = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 5, + .chans = { + {A4L_CHAN_AREF_GROUND, 1}, + }, +}; + +static struct a4l_channels_desc parport_chan_desc_c = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 4, + .chans = { + {A4L_CHAN_AREF_GROUND, 1}, + }, +}; + +static struct a4l_channels_desc parport_chan_desc_intr = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 1, + .chans = { + {A4L_CHAN_AREF_GROUND, 1}, + }, +}; + +/* --- Subdevice initialization functions --- */ + +static void setup_subd_a(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_DIO; + subd->chan_desc = &parport_chan_desc_a; + subd->rng_desc = &range_digital; + subd->insn_bits = parport_insn_a; + subd->insn_config = parport_insn_config_a; +} + +static void setup_subd_b(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_DI; + subd->chan_desc = &parport_chan_desc_b; + subd->rng_desc = &range_digital; + subd->insn_bits = parport_insn_b; +} + +static void setup_subd_c(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_DO; + subd->chan_desc = &parport_chan_desc_c; + subd->rng_desc = &range_digital; + subd->insn_bits = parport_insn_c; +} + +static void setup_subd_intr(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_DI; + subd->chan_desc = &parport_chan_desc_intr; + subd->rng_desc = &range_digital; + subd->insn_bits = parport_intr_insn; + subd->cmd_mask = &parport_intr_cmd_mask; + subd->do_cmdtest = parport_intr_cmdtest; + subd->do_cmd = parport_intr_cmd; + subd->cancel = parport_intr_cancel; +} + +static void (*setup_subds[3])(struct a4l_subdevice *) = { + setup_subd_a, + setup_subd_b, + setup_subd_c +}; + +static int dev_parport_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + int i, err = 0, irq = A4L_IRQ_UNUSED; + unsigned long io_base; + + if(arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) { + + a4l_warn(dev, + "dev_parport_attach: no attach options specified, " + "taking default options (addr=0x%x, irq=%d)\n", + DEFAULT_ADDRESS, DEFAULT_IRQ); + + io_base = DEFAULT_ADDRESS; + irq = DEFAULT_IRQ; + } else { + + io_base = ((unsigned long *)arg->opts)[0]; + + if (arg->opts_size >= 2 * sizeof(unsigned long)) + irq = (int) ((unsigned long *)arg->opts)[1]; + } + + if (!request_region(io_base, PARPORT_SIZE, "analogy_parport")) { + a4l_err(dev, "dev_parport_attach: I/O port conflict"); + return -EIO; + } + + a4l_info(dev, "address = 0x%lx\n", io_base); + + for (i = 0; i < 3; i++) { + + struct a4l_subdevice *subd = a4l_alloc_subd(sizeof(parport_spriv_t), + setup_subds[i]); + if (subd == NULL) + return -ENOMEM; + + err = a4l_add_subd(dev, subd); + if (err != i) + return err; + } + + if (irq != A4L_IRQ_UNUSED) { + + struct a4l_subdevice *subd; + + a4l_info(dev, "irq = %d\n", irq); + + err = a4l_request_irq(dev, irq, parport_interrupt, 0, dev); + if (err < 0) { + a4l_err(dev, "dev_parport_attach: irq not available\n"); + return err; + } + + subd = a4l_alloc_subd(0, setup_subd_intr); + if (subd == NULL) + return -ENOMEM; + + err = a4l_add_subd(dev, subd); + if (err < 0) + return err; + } + + devpriv->io_base = io_base; + + devpriv->a_data = 0; + outb(devpriv->a_data, devpriv->io_base + PARPORT_A); + + devpriv->c_data = 0; + outb(devpriv->c_data, devpriv->io_base + PARPORT_C); + + return 0; +} + +static int dev_parport_detach(struct a4l_device *dev) +{ + int err = 0; + + if (devpriv->io_base != 0) + release_region(devpriv->io_base, PARPORT_SIZE); + + if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) { + a4l_free_irq(dev, a4l_get_irq(dev)); + } + + + return err; +} + +static struct a4l_driver drv_parport = { + .owner = THIS_MODULE, + .board_name = "analogy_parport", + .driver_name = "parport", + .attach = dev_parport_attach, + .detach = dev_parport_detach, + .privdata_size = sizeof(parport_priv_t), +}; + +static int __init drv_parport_init(void) +{ + return a4l_register_drv(&drv_parport); +} + +static void __exit drv_parport_cleanup(void) +{ + a4l_unregister_drv(&drv_parport); +} + +MODULE_DESCRIPTION("Analogy driver for standard parallel port"); +MODULE_LICENSE("GPL"); + +module_init(drv_parport_init); +module_exit(drv_parport_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig new file mode 100644 index 0000000..bd1687a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig @@ -0,0 +1,42 @@ + +config XENO_DRIVERS_ANALOGY_NI_MITE + depends on XENO_DRIVERS_ANALOGY && PCI + tristate "NI MITE driver" + default n + +config XENO_DRIVERS_ANALOGY_NI_TIO + depends on XENO_DRIVERS_ANALOGY + tristate "NI TIO driver" + default n + +config XENO_DRIVERS_ANALOGY_NI_MIO + depends on XENO_DRIVERS_ANALOGY && XENO_DRIVERS_ANALOGY_NI_TIO && PCI + tristate "NI MIO driver" + default n + +config XENO_DRIVERS_ANALOGY_NI_PCIMIO + depends on XENO_DRIVERS_ANALOGY && PCI + select XENO_DRIVERS_ANALOGY_NI_MITE + select XENO_DRIVERS_ANALOGY_NI_TIO + select XENO_DRIVERS_ANALOGY_NI_MIO + select XENO_DRIVERS_ANALOGY_8255 + tristate "NI PCIMIO driver" + default n + +config XENO_DRIVERS_ANALOGY_NI_670x + depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI + select XENO_DRIVERS_ANALOGY_NI_MITE + select XENO_DRIVERS_ANALOGY_NI_TIO + select XENO_DRIVERS_ANALOGY_NI_MIO + select XENO_DRIVERS_ANALOGY_8255 + tristate "NI 670X driver (EXPERIMENTAL)" + default n + +config XENO_DRIVERS_ANALOGY_NI_660x + depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI + select XENO_DRIVERS_ANALOGY_NI_MITE + select XENO_DRIVERS_ANALOGY_NI_TIO + select XENO_DRIVERS_ANALOGY_NI_MIO + select XENO_DRIVERS_ANALOGY_8255 + tristate "NI 660X driver (EXPERIMENTAL)" + default n diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile new file mode 100644 index 0000000..b4c93d2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile @@ -0,0 +1,16 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/analogy + +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) += analogy_ni_mite.o +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_TIO) += analogy_ni_tio.o +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MIO) += analogy_ni_mio.o +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_PCIMIO) += analogy_ni_pcimio.o +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_670x) += analogy_ni_670x.o +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_660x) += analogy_ni_660x.o + +analogy_ni_mite-y := mite.o +analogy_ni_tio-y := tio_common.o +analogy_ni_mio-y := mio_common.o +analogy_ni_pcimio-y := pcimio.o +analogy_ni_670x-y := ni_670x.o +analogy_ni_660x-y := ni_660x.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c new file mode 100644 index 0000000..b071adc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c @@ -0,0 +1,5590 @@ +/* + * Hardware driver for DAQ-STC based boards + * + * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org> + * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Description: DAQ-STC systems + * + * References: + * 340747b.pdf AT-MIO E series Register-Level Programmer Manual + * 341079b.pdf PCI E Series Register-Level Programmer Manual + * 340934b.pdf DAQ-STC reference manual + * 322080b.pdf 6711/6713/6715 User Manual + * 320945c.pdf PCI E Series User Manual + * 322138a.pdf PCI-6052E and DAQPad-6052E User Manual + * 320517c.pdf AT E Series User manual (obsolete) + * 320517f.pdf AT E Series User manual + * 320906c.pdf Maximum signal ratings + * 321066a.pdf About 16x + * 321791a.pdf Discontinuation of at-mio-16e-10 rev. c + * 321808a.pdf About at-mio-16e-10 rev P + * 321837a.pdf Discontinuation of at-mio-16de-10 rev d + * 321838a.pdf About at-mio-16de-10 rev N + * + * ISSUES: + * - The interrupt routine needs to be cleaned up + * - S-Series PCI-6143 support has been added but is not fully tested + * as yet. Terry Barnaby, BEAM Ltd. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include "../intel/8255.h" +#include "mite.h" +#include "ni_stc.h" +#include "ni_mio.h" + +#define NI_TIMEOUT 1000 + +/* Note: this table must match the ai_gain_* definitions */ +static const short ni_gainlkup[][16] = { + /* ai_gain_16 */ + {0, 1, 2, 3, 4, 5, 6, 7, 0x100, 0x101, 0x102, 0x103, 0x104, 0x105, + 0x106, 0x107}, + /* ai_gain_8 */ + {1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107}, + /* ai_gain_14 */ + {1, 2, 3, 4, 5, 6, 7, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106, + 0x107}, + /* ai_gain_4 */ + {0, 1, 4, 7}, + /* ai_gain_611x */ + {0x00a, 0x00b, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006}, + /* ai_gain_622x */ + {0, 1, 4, 5}, + /* ai_gain_628x */ + {1, 2, 3, 4, 5, 6, 7}, + /* ai_gain_6143 */ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, +}; + +struct a4l_rngtab rng_ni_E_ai = {16, { + RANGE_V(-10, 10), + RANGE_V(-5, 5), + RANGE_V(-2.5, 2.5), + RANGE_V(-1, 1), + RANGE_V(-0.5, 0.5), + RANGE_V(-0.25, 0.25), + RANGE_V(-0.1, 0.1), + RANGE_V(-0.05, 0.05), + RANGE_V(0, 20), + RANGE_V(0, 10), + RANGE_V(0, 5), + RANGE_V(0, 2), + RANGE_V(0, 1), + RANGE_V(0, 0.5), + RANGE_V(0, 0.2), + RANGE_V(0, 0.1), +}}; +struct a4l_rngdesc a4l_range_ni_E_ai = + RNG_GLOBAL(rng_ni_E_ai); + +struct a4l_rngtab rng_ni_E_ai_limited = {8, { + RANGE_V(-10, 10), + RANGE_V(-5, 5), + RANGE_V(-1, 1), + RANGE_V(-0.1, 0.1), + RANGE_V(0, 10), + RANGE_V(0, 5), + RANGE_V(0, 1), + RANGE_V(0, 0.1), +}}; +struct a4l_rngdesc a4l_range_ni_E_ai_limited = + RNG_GLOBAL(rng_ni_E_ai_limited); + +struct a4l_rngtab rng_ni_E_ai_limited14 = {14, { + RANGE_V(-10, 10), + RANGE_V(-5, 5), + RANGE_V(-2, 2), + RANGE_V(-1, 1), + RANGE_V(-0.5, 0.5), + RANGE_V(-0.2, 0.2), + RANGE_V(-0.1, 0.1), + RANGE_V(0, 10), + RANGE_V(0, 5), + RANGE_V(0, 2), + RANGE_V(0, 1), + RANGE_V(0, 0.5), + RANGE_V(0, 0.2), + RANGE_V(0, 0.1), +}}; +struct a4l_rngdesc a4l_range_ni_E_ai_limited14 = + RNG_GLOBAL(rng_ni_E_ai_limited14); + +struct a4l_rngtab rng_ni_E_ai_bipolar4 = {4, { + RANGE_V(-10,10), + RANGE_V(-5, 5), + RANGE_V(-0.5, 0.5), + RANGE_V(-0.05, 0.05), +}}; +struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4 = + RNG_GLOBAL(rng_ni_E_ai_bipolar4); + +struct a4l_rngtab rng_ni_E_ai_611x = {8, { + RANGE_V(-50, 50), + RANGE_V(-20, 20), + RANGE_V(-10, 10), + RANGE_V(-5, 5), + RANGE_V(-2, 2), + RANGE_V(-1, 1), + RANGE_V(-0.5, 0.5), + RANGE_V(-0.2, 0.2), +}}; +struct a4l_rngdesc a4l_range_ni_E_ai_611x = + RNG_GLOBAL(rng_ni_E_ai_611x); + +struct a4l_rngtab rng_ni_M_ai_622x = {4, { + RANGE_V(-10, 10), + RANGE_V(-5, 5), + RANGE_V(-1, 1), + RANGE_V(-0.2, 0.2), +}}; +struct a4l_rngdesc a4l_range_ni_M_ai_622x = + RNG_GLOBAL(rng_ni_M_ai_622x); + +struct a4l_rngtab rng_ni_M_ai_628x = {7, { + RANGE_V(-10, 10), + RANGE_V(-5, 5), + RANGE_V(-2, 2), + RANGE_V(-1, 1), + RANGE_V(-0.5, 0.5), + RANGE_V(-0.2, 0.2), + RANGE_V(-0.1, 0.1), +}}; +struct a4l_rngdesc a4l_range_ni_M_ai_628x = + RNG_GLOBAL(rng_ni_M_ai_628x); + +struct a4l_rngtab rng_ni_S_ai_6143 = {1, { + RANGE_V(-5, 5), +}}; +struct a4l_rngdesc a4l_range_ni_S_ai_6143 = + RNG_GLOBAL(rng_ni_S_ai_6143); + + +struct a4l_rngtab rng_ni_E_ao_ext = {4, { + RANGE_V(-10, 10), + RANGE_V(0, 10), + RANGE_ext(-1, 1), + RANGE_ext(0, 1), +}}; +struct a4l_rngdesc a4l_range_ni_E_ao_ext = + RNG_GLOBAL(rng_ni_E_ao_ext); + +struct a4l_rngdesc *ni_range_lkup[] = { + &a4l_range_ni_E_ai, + &a4l_range_ni_E_ai_limited, + &a4l_range_ni_E_ai_limited14, + &a4l_range_ni_E_ai_bipolar4, + &a4l_range_ni_E_ai_611x, + &a4l_range_ni_M_ai_622x, + &a4l_range_ni_M_ai_628x, + &a4l_range_ni_S_ai_6143 +}; + +static const int num_adc_stages_611x = 3; + +static void ni_handle_fifo_dregs(struct a4l_subdevice *subd); +static void get_last_sample_611x(struct a4l_subdevice *subd); +static void get_last_sample_6143(struct a4l_subdevice *subd); +static void handle_cdio_interrupt(struct a4l_device *dev); +static void ni_load_channelgain_list(struct a4l_device *dev, + unsigned int n_chan, unsigned int *list); + +#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \ + !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) +static void ni_handle_fifo_half_full(struct a4l_subdevice *subd); +static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd); +#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +static inline void ni_set_bitfield(struct a4l_device *dev, + int reg, + unsigned int bit_mask, + unsigned int bit_values) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags); + switch (reg) { + case Interrupt_A_Enable_Register: + devpriv->int_a_enable_reg &= ~bit_mask; + devpriv->int_a_enable_reg |= bit_values & bit_mask; + devpriv->stc_writew(dev, devpriv->int_a_enable_reg, + Interrupt_A_Enable_Register); + break; + case Interrupt_B_Enable_Register: + devpriv->int_b_enable_reg &= ~bit_mask; + devpriv->int_b_enable_reg |= bit_values & bit_mask; + devpriv->stc_writew(dev, devpriv->int_b_enable_reg, + Interrupt_B_Enable_Register); + break; + case IO_Bidirection_Pin_Register: + devpriv->io_bidirection_pin_reg &= ~bit_mask; + devpriv->io_bidirection_pin_reg |= bit_values & bit_mask; + devpriv->stc_writew(dev, devpriv->io_bidirection_pin_reg, + IO_Bidirection_Pin_Register); + break; + case AI_AO_Select: + devpriv->ai_ao_select_reg &= ~bit_mask; + devpriv->ai_ao_select_reg |= bit_values & bit_mask; + ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select); + break; + case G0_G1_Select: + devpriv->g0_g1_select_reg &= ~bit_mask; + devpriv->g0_g1_select_reg |= bit_values & bit_mask; + ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select); + break; + default: + a4l_err(dev, + "Warning %s() called with invalid register\n", + __FUNCTION__); + a4l_err(dev,"reg is %d\n", reg); + break; + } + + mmiowb(); + rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags); +} + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +static int ni_ai_drain_dma(struct a4l_subdevice *subd); + +static inline void ni_set_ai_dma_channel(struct a4l_device * dev, int channel) +{ + unsigned bitfield; + + if (channel >= 0) { + bitfield = + (ni_stc_dma_channel_select_bitfield(channel) << + AI_DMA_Select_Shift) & AI_DMA_Select_Mask; + } else { + bitfield = 0; + } + ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield); +} + +static inline void ni_set_ao_dma_channel(struct a4l_device * dev, int channel) +{ + unsigned bitfield; + + if (channel >= 0) { + bitfield = + (ni_stc_dma_channel_select_bitfield(channel) << + AO_DMA_Select_Shift) & AO_DMA_Select_Mask; + } else { + bitfield = 0; + } + ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield); +} + +static inline void ni_set_gpct_dma_channel(struct a4l_device * dev, + unsigned gpct_index, int mite_channel) +{ + unsigned bitfield; + + if (mite_channel >= 0) { + bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel); + } else { + bitfield = 0; + } + ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index), + bitfield); +} + +static inline void ni_set_cdo_dma_channel(struct a4l_device * dev, int mite_channel) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags); + devpriv->cdio_dma_select_reg &= ~CDO_DMA_Select_Mask; + if (mite_channel >= 0) { + /*XXX just guessing + ni_stc_dma_channel_select_bitfield() returns the right + bits, under the assumption the cdio dma selection + works just like ai/ao/gpct. Definitely works for dma + channels 0 and 1. */ + devpriv->cdio_dma_select_reg |= + (ni_stc_dma_channel_select_bitfield(mite_channel) << + CDO_DMA_Select_Shift) & CDO_DMA_Select_Mask; + } + ni_writeb(devpriv->cdio_dma_select_reg, M_Offset_CDIO_DMA_Select); + mmiowb(); + rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags); +} + +static int ni_request_ai_mite_channel(struct a4l_device * dev) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + BUG_ON(devpriv->ai_mite_chan); + devpriv->ai_mite_chan = + mite_request_channel(devpriv->mite, devpriv->ai_mite_ring); + if (devpriv->ai_mite_chan == NULL) { + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, + flags); + a4l_err(dev, + "ni_request_ai_mite_channel: " + "failed to reserve mite dma channel for analog input."); + return -EBUSY; + } + devpriv->ai_mite_chan->dir = A4L_INPUT; + ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel); + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + return 0; +} + +static int ni_request_ao_mite_channel(struct a4l_device * dev) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + BUG_ON(devpriv->ao_mite_chan); + devpriv->ao_mite_chan = + mite_request_channel(devpriv->mite, devpriv->ao_mite_ring); + if (devpriv->ao_mite_chan == NULL) { + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, + flags); + a4l_err(dev, + "ni_request_ao_mite_channel: " + "failed to reserve mite dma channel for analog outut."); + return -EBUSY; + } + devpriv->ao_mite_chan->dir = A4L_OUTPUT; + ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel); + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + return 0; +} + +static int ni_request_gpct_mite_channel(struct a4l_device * dev, + unsigned gpct_index, int direction) +{ + unsigned long flags; + struct mite_channel *mite_chan; + + BUG_ON(gpct_index >= NUM_GPCT); + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + BUG_ON(devpriv->counter_dev->counters[gpct_index]->mite_chan); + mite_chan = mite_request_channel(devpriv->mite, + devpriv->gpct_mite_ring[gpct_index]); + if (mite_chan == NULL) { + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, + flags); + a4l_err(dev, + "ni_request_gpct_mite_channel: " + "failed to reserve mite dma channel for counter."); + return -EBUSY; + } + mite_chan->dir = direction; + a4l_ni_tio_set_mite_channel(devpriv->counter_dev->counters[gpct_index], + mite_chan); + ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel); + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + return 0; +} + +static int ni_request_cdo_mite_channel(struct a4l_device *dev) +{ + unsigned long flags; + int err = 0; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + + /* No channel should be allocated... */ + BUG_ON(devpriv->cdo_mite_chan); + /* ...until now */ + devpriv->cdo_mite_chan = + mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring); + + if (devpriv->cdo_mite_chan) { + devpriv->cdo_mite_chan->dir = A4L_OUTPUT; + ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel); + } else { + err = -EBUSY; + a4l_err(dev, + "ni_request_cdo_mite_channel: " + "failed to reserve mite dma channel " + "for correlated digital outut."); + } + + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + + return err; +} + +void ni_release_ai_mite_channel(struct a4l_device *dev) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->ai_mite_chan) { + ni_set_ai_dma_channel(dev, -1); + a4l_mite_release_channel(devpriv->ai_mite_chan); + devpriv->ai_mite_chan = NULL; + } + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + +} + +void ni_release_ao_mite_channel(struct a4l_device *dev) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->ao_mite_chan) { + ni_set_ao_dma_channel(dev, -1); + a4l_mite_release_channel(devpriv->ao_mite_chan); + devpriv->ao_mite_chan = NULL; + } + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + +} + +void ni_release_gpct_mite_channel(struct a4l_device *dev, unsigned gpct_index) +{ + unsigned long flags; + + BUG_ON(gpct_index >= NUM_GPCT); + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->counter_dev->counters[gpct_index]->mite_chan) { + struct mite_channel *mite_chan = + devpriv->counter_dev->counters[gpct_index]->mite_chan; + + ni_set_gpct_dma_channel(dev, gpct_index, -1); + a4l_ni_tio_set_mite_channel(devpriv->counter_dev-> + counters[gpct_index], NULL); + a4l_mite_release_channel(mite_chan); + } + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + +} + +void ni_release_cdo_mite_channel(struct a4l_device *dev) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->cdo_mite_chan) { + ni_set_cdo_dma_channel(dev, -1); + a4l_mite_release_channel(devpriv->cdo_mite_chan); + devpriv->cdo_mite_chan = NULL; + } + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + +} + +void ni_sync_ai_dma(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->ai_mite_chan) + a4l_mite_sync_input_dma(devpriv->ai_mite_chan, subd); + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); +} + +void mite_handle_b_linkc(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->ao_mite_chan) + a4l_mite_sync_output_dma(devpriv->ao_mite_chan, subd); + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); +} + +static int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd) +{ + static const int timeout = 10000; + + struct a4l_device *dev = subd->dev; + struct a4l_buffer *buf = subd->buf; + + int i; + + for (i = 0; i < timeout; i++) { + + int buffer_filled; + unsigned short b_status; + + b_status = devpriv->stc_readw(dev, AO_Status_1_Register); + + buffer_filled = test_bit(A4L_BUF_EOA_NR, &buf->flags); + buffer_filled |= (b_status & AO_FIFO_Half_Full_St); + + if (buffer_filled) + break; + + /* If we poll too often, the pci bus activity seems + to slow the dma transfer down */ + a4l_udelay(10); + } + + if (i == timeout) { + a4l_err(dev, + "ni_ao_wait_for_dma_load: " + "timed out waiting for dma load"); + return -EPIPE; + } + + return 0; +} + + +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +static inline int ni_ai_drain_dma(struct a4l_subdevice *subd) +{ + return -ENOTSUPP; +} + +static inline int ni_request_ai_mite_channel(struct a4l_device * dev) +{ + return -ENOTSUPP; +} + +static inline int ni_request_ao_mite_channel(struct a4l_device * dev) +{ + return -ENOTSUPP; +} + +static inline +int ni_request_gpct_mite_channel(struct a4l_device * dev, + unsigned gpct_index, int direction) +{ + return -ENOTSUPP; +} + +static inline int ni_request_cdo_mite_channel(struct a4l_device *dev) +{ + return -ENOTSUPP; +} + +#define ni_release_ai_mite_channel(x) do { } while (0) +#define ni_release_ao_mite_channel(x) do { } while (0) +#define ni_release_gpct_mite_channel(x) do { } while (0) +#define ni_release_cdo_mite_channel(x) do { } while (0) +#define ni_sync_ai_dma(x) do { } while (0) +#define mite_handle_b_linkc(x) do { } while (0) + +static inline int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd) +{ + return -ENOTSUPP; +} + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +/* E-series boards use the second irq signals to generate dma requests + for their counters */ +void ni_e_series_enable_second_irq(struct a4l_device *dev, + unsigned gpct_index, short enable) +{ + if (boardtype.reg_type & ni_reg_m_series_mask) + return; + switch (gpct_index) { + case 0: + if (enable) { + devpriv->stc_writew(dev, G0_Gate_Second_Irq_Enable, + Second_IRQ_A_Enable_Register); + } else { + devpriv->stc_writew(dev, 0, + Second_IRQ_A_Enable_Register); + } + break; + case 1: + if (enable) { + devpriv->stc_writew(dev, G1_Gate_Second_Irq_Enable, + Second_IRQ_B_Enable_Register); + } else { + devpriv->stc_writew(dev, 0, + Second_IRQ_B_Enable_Register); + } + break; + default: + BUG(); + break; + } +} + +void ni_clear_ai_fifo(struct a4l_device *dev) +{ + if (boardtype.reg_type == ni_reg_6143) { + /* Flush the 6143 data FIFO */ + ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */ + ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */ + while (ni_readl(AIFIFO_Status_6143) & 0x10); /* Wait for complete */ + } else { + devpriv->stc_writew(dev, 1, ADC_FIFO_Clear); + if (boardtype.reg_type == ni_reg_625x) { + ni_writeb(0, M_Offset_Static_AI_Control(0)); + ni_writeb(1, M_Offset_Static_AI_Control(0)); + } + } +} + +#define ao_win_out(data, addr) ni_ao_win_outw(dev, data, addr) +static inline void ni_ao_win_outw(struct a4l_device *dev, uint16_t data, int addr) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->window_lock, flags); + ni_writew(addr, AO_Window_Address_611x); + ni_writew(data, AO_Window_Data_611x); + rtdm_lock_put_irqrestore(&devpriv->window_lock, flags); +} + +static inline void ni_ao_win_outl(struct a4l_device *dev, uint32_t data, int addr) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->window_lock, flags); + ni_writew(addr, AO_Window_Address_611x); + ni_writel(data, AO_Window_Data_611x); + rtdm_lock_put_irqrestore(&devpriv->window_lock, flags); +} + +static inline unsigned short ni_ao_win_inw(struct a4l_device *dev, int addr) +{ + unsigned long flags; + unsigned short data; + + rtdm_lock_get_irqsave(&devpriv->window_lock, flags); + ni_writew(addr, AO_Window_Address_611x); + data = ni_readw(AO_Window_Data_611x); + rtdm_lock_put_irqrestore(&devpriv->window_lock, flags); + return data; +} + +/* + * ni_set_bits( ) allows different parts of the ni_mio_common driver + * to share registers (such as Interrupt_A_Register) without interfering + * with each other. + * + * NOTE: the switch/case statements are optimized out for a constant + * argument so this is actually quite fast--- If you must wrap another + * function around this make it inline to avoid a large speed penalty. + * + * value should only be 1 or 0. + */ + +static inline void ni_set_bits(struct a4l_device *dev, + int reg, unsigned bits, unsigned value) +{ + unsigned bit_values; + + if (value) + bit_values = bits; + else + bit_values = 0; + + ni_set_bitfield(dev, reg, bits, bit_values); +} + +static void shutdown_ai_command(struct a4l_subdevice *subd) +{ + ni_ai_drain_dma(subd); + ni_handle_fifo_dregs(subd); + get_last_sample_611x(subd); + get_last_sample_6143(subd); + + /* TODO: stop the acquisiton */ +} + +static void ni_handle_eos(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + + if (devpriv->aimode == AIMODE_SCAN) { + static const int timeout = 10; + int i; + + for (i = 0; i < timeout; i++) { + ni_sync_ai_dma(subd); + /* TODO: stop when the transfer is really over */ + a4l_udelay(1); + } + } + + /* Handle special case of single scan using AI_End_On_End_Of_Scan */ + if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) { + shutdown_ai_command(subd); + } +} + +static void ni_event(struct a4l_subdevice * subd) +{ + /* Temporary hack */ + struct a4l_buffer *buf = subd->buf; + + if(test_bit(A4L_BUF_ERROR_NR, &buf->flags)) { + if (subd->cancel != NULL) + subd->cancel(subd); + } + + a4l_buf_evt(subd, 0); + +} + +static void handle_gpct_interrupt(struct a4l_device *dev, unsigned short counter_index) +{ +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + struct ni_gpct *counter = devpriv->counter_dev->counters[counter_index]; + a4l_ni_tio_handle_interrupt(counter, dev); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ +} + +#ifdef CONFIG_DEBUG_MIO_COMMON +static const char *const status_a_strings[] = { + "passthru0", "fifo", "G0_gate", "G0_TC", + "stop", "start", "sc_tc", "start1", + "start2", "sc_tc_error", "overflow", "overrun", + "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a" +}; + +static void ni_mio_print_status_a(int status) +{ + int i; + + __a4l_info("A status:"); + for (i = 15; i >= 0; i--) { + if (status & (1 << i)) { + __a4l_info(" %s", status_a_strings[i]); + } + } + __a4l_info("\n"); +} + +static const char *const status_b_strings[] = { + "passthru1", "fifo", "G1_gate", "G1_TC", + "UI2_TC", "UPDATE", "UC_TC", "BC_TC", + "start1", "overrun", "start", "bc_tc_error", + "fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b" +}; + +static void ni_mio_print_status_b(int status) +{ + int i; + + __a4l_info("B status:"); + for (i = 15; i >= 0; i--) { + if (status & (1 << i)) { + __a4l_info(" %s", status_b_strings[i]); + } + } + __a4l_info("\n"); +} + +#else /* !CONFIG_DEBUG_MIO_COMMON */ + +#define ni_mio_print_status_a(x) +#define ni_mio_print_status_b(x) + +#endif /* CONFIG_DEBUG_MIO_COMMON */ + +static void ack_a_interrupt(struct a4l_device *dev, unsigned short a_status) +{ + unsigned short ack = 0; + + if (a_status & AI_SC_TC_St) { + ack |= AI_SC_TC_Interrupt_Ack; + } + if (a_status & AI_START1_St) { + ack |= AI_START1_Interrupt_Ack; + } + if (a_status & AI_START_St) { + ack |= AI_START_Interrupt_Ack; + } + if (a_status & AI_STOP_St) { + /* not sure why we used to ack the START here also, + instead of doing it independently. Frank Hess + 2007-07-06 */ + ack |= AI_STOP_Interrupt_Ack; + } + if (ack) + devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register); +} + +static void handle_a_interrupt(struct a4l_device *dev, + unsigned short status,unsigned int ai_mite_status) +{ + + struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AI_SUBDEV); + + /* 67xx boards don't have ai subdevice, but their gpct0 + might generate an a interrupt. */ + + if((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) + return; + + a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: " + "a_status=%04x ai_mite_status=%08x\n",status, ai_mite_status); + ni_mio_print_status_a(status); + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + if (ai_mite_status & CHSR_LINKC) + ni_sync_ai_dma(subd); + + if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY | + CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR | + CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) { + a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: " + "unknown mite interrupt, ack! (ai_mite_status=%08x)\n", + ai_mite_status); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + /* Test for all uncommon interrupt events at the same time */ + if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St | + AI_SC_TC_St | AI_START1_St)) { + if (status == 0xffff) { + a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: " + "a_status=0xffff. Card removed?\n"); + /* TODO: we probably aren't even running a command now, + so it's a good idea to be careful. + we should check the transfer status */ + a4l_buf_evt(subd, A4L_BUF_ERROR); + ni_event(subd); + return; + } + if (status & (AI_Overrun_St | AI_Overflow_St | + AI_SC_TC_Error_St)) { + a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: " + "ai error a_status=%04x\n", status); + ni_mio_print_status_a(status); + + shutdown_ai_command(subd); + + a4l_buf_evt(subd, A4L_BUF_ERROR); + ni_event(subd); + + return; + } + if (status & AI_SC_TC_St) { + a4l_dbg(1, drv_dbg, dev, "ni_mio_common: SC_TC interrupt\n"); + if (!devpriv->ai_continuous) { + shutdown_ai_command(subd); + } + } + } + +#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \ + !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + + if (status & AI_FIFO_Half_Full_St) { + int i; + static const int timeout = 10; + /* PCMCIA cards (at least 6036) seem to stop producing + interrupts if we fail to get the fifo less than half + full, so loop to be sure. */ + for (i = 0; i < timeout; ++i) { + ni_handle_fifo_half_full(subd); + if ((devpriv->stc_readw(dev, AI_Status_1_Register) & + AI_FIFO_Half_Full_St) == 0) + break; + } + } +#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + if ((status & AI_STOP_St)) { + ni_handle_eos(subd); + } + + ni_event(subd); + + status = devpriv->stc_readw(dev, AI_Status_1_Register); + if (status & Interrupt_A_St) + a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: " + " didn't clear interrupt? status=0x%x\n", status); +} + +static void ack_b_interrupt(struct a4l_device *dev, unsigned short b_status) +{ + unsigned short ack = 0; + if (b_status & AO_BC_TC_St) { + ack |= AO_BC_TC_Interrupt_Ack; + } + if (b_status & AO_Overrun_St) { + ack |= AO_Error_Interrupt_Ack; + } + if (b_status & AO_START_St) { + ack |= AO_START_Interrupt_Ack; + } + if (b_status & AO_START1_St) { + ack |= AO_START1_Interrupt_Ack; + } + if (b_status & AO_UC_TC_St) { + ack |= AO_UC_TC_Interrupt_Ack; + } + if (b_status & AO_UI2_TC_St) { + ack |= AO_UI2_TC_Interrupt_Ack; + } + if (b_status & AO_UPDATE_St) { + ack |= AO_UPDATE_Interrupt_Ack; + } + if (ack) + devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register); +} + +static void handle_b_interrupt(struct a4l_device * dev, + unsigned short b_status, unsigned int ao_mite_status) +{ + + struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV); + + a4l_dbg(1, drv_dbg, dev, + "ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n", + b_status, ao_mite_status); + + ni_mio_print_status_b(b_status); + + if (b_status == 0xffff) + return; + + if (b_status & AO_Overrun_St) { + a4l_err(dev, + "ni_mio_common: interrupt: " + "AO FIFO underrun status=0x%04x status2=0x%04x\n", + b_status, + devpriv->stc_readw(dev, AO_Status_2_Register)); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } + + if (b_status & AO_BC_TC_St) { + a4l_dbg(1, drv_dbg, dev, + "ni_mio_common: interrupt: " + "AO BC_TC status=0x%04x status2=0x%04x\n", + b_status, devpriv->stc_readw(dev, AO_Status_2_Register)); + a4l_buf_evt(subd, A4L_BUF_EOA); + } + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + + if (ao_mite_status & CHSR_STOPS) { + a4l_dbg(1, drv_dbg, dev, + "ni_mio_common: interrupt: MITE transfer stopped\n"); + } else if (ao_mite_status & CHSR_LINKC) { + /* Currently, mite.c requires us to handle LINKC */ + mite_handle_b_linkc(subd); + } + + if (ao_mite_status & + ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY | + CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR | + CHSR_SABORT | CHSR_STOPS | CHSR_XFERR | CHSR_LxERR_mask)) { + a4l_err(dev, + "unknown mite interrupt, ack! (ao_mite_status=%08x)\n", + ao_mite_status); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \ + !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + + if (b_status & AO_FIFO_Request_St) { + int ret; + + ret = ni_ao_fifo_half_empty(subd); + if (!ret) { + a4l_err(dev, + "ni_mio_common: " + "interrupt: AO buffer underrun\n"); + ni_set_bits(dev, Interrupt_B_Enable_Register, + AO_FIFO_Interrupt_Enable | + AO_Error_Interrupt_Enable, 0); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } + } +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + ni_event(subd); +} + +int a4l_ni_E_interrupt(unsigned int irq, void *d) +{ + struct a4l_device *dev = d; + unsigned short a_status; + unsigned short b_status; + unsigned int ai_mite_status = 0; + unsigned int ao_mite_status = 0; + unsigned long flags; + struct mite_struct *mite = devpriv->mite; + + /* Make sure dev->attached is checked before handler does + anything else. */ + smp_mb(); + + /* lock to avoid race with a4l_poll */ + rtdm_lock_get_irqsave(&dev->lock, flags); + a_status = devpriv->stc_readw(dev, AI_Status_1_Register); + b_status = devpriv->stc_readw(dev, AO_Status_1_Register); + if (mite) { +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + rtdm_lock_get(&devpriv->mite_channel_lock); + if (devpriv->ai_mite_chan) { + ai_mite_status = a4l_mite_get_status(devpriv->ai_mite_chan); + if (ai_mite_status & CHSR_LINKC) + writel(CHOR_CLRLC, + devpriv->mite->mite_io_addr + + MITE_CHOR(devpriv->ai_mite_chan->channel)); + } + if (devpriv->ao_mite_chan) { + ao_mite_status = a4l_mite_get_status(devpriv->ao_mite_chan); + if (ao_mite_status & CHSR_LINKC) + writel(CHOR_CLRLC, + mite->mite_io_addr + + MITE_CHOR(devpriv->ao_mite_chan->channel)); + } + rtdm_lock_put(&devpriv->mite_channel_lock); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + } + ack_a_interrupt(dev, a_status); + ack_b_interrupt(dev, b_status); + if ((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT)) + handle_a_interrupt(dev, a_status, ai_mite_status); + if ((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT)) + handle_b_interrupt(dev, b_status, ao_mite_status); + handle_gpct_interrupt(dev, 0); + handle_gpct_interrupt(dev, 1); + handle_cdio_interrupt(dev); + + rtdm_lock_put_irqrestore(&dev->lock, flags); + return 0; +} + +#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \ + !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +static void ni_ao_fifo_load(struct a4l_subdevice *subd, int n) +{ + struct a4l_device *dev = subd->dev; + sampl_t d; + u32 packed_data; + int i, err = 1; + + for (i = 0; i < n; i++) { + err = a4l_buf_get(subd, &d, sizeof(sampl_t)); + if (err != 0) + break; + + if (boardtype.reg_type & ni_reg_6xxx_mask) { + packed_data = d & 0xffff; + /* 6711 only has 16 bit wide ao fifo */ + if (boardtype.reg_type != ni_reg_6711) { + err = a4l_buf_get(subd, &d, sizeof(sampl_t)); + if (err != 0) + break; + i++; + packed_data |= (d << 16) & 0xffff0000; + } + ni_writel(packed_data, DAC_FIFO_Data_611x); + } else { + ni_writew(d, DAC_FIFO_Data); + } + } + if (err != 0) { + a4l_buf_evt(subd, A4L_BUF_ERROR); + } +} + +/* + * There's a small problem if the FIFO gets really low and we + * don't have the data to fill it. Basically, if after we fill + * the FIFO with all the data available, the FIFO is _still_ + * less than half full, we never clear the interrupt. If the + * IRQ is in edge mode, we never get another interrupt, because + * this one wasn't cleared. If in level mode, we get flooded + * with interrupts that we can't fulfill, because nothing ever + * gets put into the buffer. + * + * This kind of situation is recoverable, but it is easier to + * just pretend we had a FIFO underrun, since there is a good + * chance it will happen anyway. This is _not_ the case for + * RT code, as RT code might purposely be running close to the + * metal. Needs to be fixed eventually. + */ +static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + int n; + + n = a4l_buf_count(subd); + if (n == 0) { + a4l_buf_evt(subd, A4L_BUF_ERROR); + return 0; + } + + n /= sizeof(sampl_t); + if (n > boardtype.ao_fifo_depth / 2) + n = boardtype.ao_fifo_depth / 2; + + ni_ao_fifo_load(subd, n); + + return 1; +} + +static int ni_ao_prep_fifo(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + int n; + + /* Reset fifo */ + devpriv->stc_writew(dev, 1, DAC_FIFO_Clear); + if (boardtype.reg_type & ni_reg_6xxx_mask) + ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x); + + /* Load some data */ + n = a4l_buf_count(subd); + if (n == 0) + return 0; + + n /= sizeof(sampl_t); + if (n > boardtype.ao_fifo_depth) + n = boardtype.ao_fifo_depth; + + ni_ao_fifo_load(subd, n); + + return n; +} + +static void ni_ai_fifo_read(struct a4l_subdevice *subd, int n) +{ + struct a4l_device *dev = subd->dev; + int i; + + if (boardtype.reg_type == ni_reg_611x) { + sampl_t data[2]; + u32 dl; + + for (i = 0; i < n / 2; i++) { + dl = ni_readl(ADC_FIFO_Data_611x); + /* This may get the hi/lo data in the wrong order */ + data[0] = (dl >> 16) & 0xffff; + data[1] = dl & 0xffff; + a4l_buf_put(subd, data, sizeof(sampl_t) * 2); + } + /* Check if there's a single sample stuck in the FIFO */ + if (n % 2) { + dl = ni_readl(ADC_FIFO_Data_611x); + data[0] = dl & 0xffff; + a4l_buf_put(subd, &data[0], sizeof(sampl_t)); + } + } else if (boardtype.reg_type == ni_reg_6143) { + sampl_t data[2]; + u32 dl; + + /* This just reads the FIFO assuming the data is + present, no checks on the FIFO status are performed */ + for (i = 0; i < n / 2; i++) { + dl = ni_readl(AIFIFO_Data_6143); + + data[0] = (dl >> 16) & 0xffff; + data[1] = dl & 0xffff; + a4l_buf_put(subd, data, sizeof(sampl_t) * 2); + } + if (n % 2) { + /* Assume there is a single sample stuck in the FIFO. + Get stranded sample into FIFO */ + ni_writel(0x01, AIFIFO_Control_6143); + dl = ni_readl(AIFIFO_Data_6143); + data[0] = (dl >> 16) & 0xffff; + a4l_buf_put(subd, &data[0], sizeof(sampl_t)); + } + } else { + if (n > sizeof(devpriv->ai_fifo_buffer) / + sizeof(devpriv->ai_fifo_buffer[0])) { + a4l_err(dev, + "ni_ai_fifo_read: " + "bug! ai_fifo_buffer too small"); + a4l_buf_evt(subd, A4L_BUF_ERROR); + return; + } + for (i = 0; i < n; i++) { + devpriv->ai_fifo_buffer[i] = + ni_readw(ADC_FIFO_Data_Register); + } + a4l_buf_put(subd, + devpriv->ai_fifo_buffer, + n * sizeof(devpriv->ai_fifo_buffer[0])); + } +} + +static void ni_handle_fifo_half_full(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + ni_ai_fifo_read(subd, boardtype.ai_fifo_depth / 2); +} + +#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +static int ni_ai_drain_dma(struct a4l_subdevice *subd) +{ + int i; + static const int timeout = 10000; + unsigned long flags; + int retval = 0; + struct a4l_device *dev = subd->dev; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->ai_mite_chan) { + for (i = 0; i < timeout; i++) { + if ((devpriv->stc_readw(dev, + AI_Status_1_Register) & + AI_FIFO_Empty_St) + && a4l_mite_bytes_in_transit(devpriv-> + ai_mite_chan) == 0) + break; + a4l_udelay(5); + } + if (i == timeout) { + a4l_info(dev, "wait for dma drain timed out\n"); + + a4l_info(dev, "a4l_mite_bytes_in_transit=%i, " + "AI_Status1_Register=0x%x\n", + a4l_mite_bytes_in_transit(devpriv->ai_mite_chan), + devpriv->stc_readw(dev, AI_Status_1_Register)); + retval = -1; + } + } + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + + ni_sync_ai_dma(subd); + + return retval; +} + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +/* Empties the AI fifo */ +static void ni_handle_fifo_dregs(struct a4l_subdevice *subd) +{ + sampl_t data[2]; + u32 dl; + short fifo_empty; + int i; + struct a4l_device *dev = subd->dev; + + if (boardtype.reg_type == ni_reg_611x) { + while ((devpriv->stc_readw(dev, + AI_Status_1_Register) & + AI_FIFO_Empty_St) == 0) { + dl = ni_readl(ADC_FIFO_Data_611x); + + /* This may get the hi/lo data in the wrong order */ + data[0] = (dl >> 16); + data[1] = (dl & 0xffff); + a4l_buf_put(subd, data, sizeof(sampl_t) * 2); + } + } else if (boardtype.reg_type == ni_reg_6143) { + i = 0; + while (ni_readl(AIFIFO_Status_6143) & 0x04) { + dl = ni_readl(AIFIFO_Data_6143); + + /* This may get the hi/lo data in the wrong order */ + data[0] = (dl >> 16); + data[1] = (dl & 0xffff); + a4l_buf_put(subd, data, sizeof(sampl_t) * 2); + i += 2; + } + // Check if stranded sample is present + if (ni_readl(AIFIFO_Status_6143) & 0x01) { + ni_writel(0x01, AIFIFO_Control_6143); // Get stranded sample into FIFO + dl = ni_readl(AIFIFO_Data_6143); + data[0] = (dl >> 16) & 0xffff; + a4l_buf_put(subd, &data[0], sizeof(sampl_t)); + } + + } else { + fifo_empty = + devpriv->stc_readw(dev, + AI_Status_1_Register) & AI_FIFO_Empty_St; + while (fifo_empty == 0) { + for (i = 0; + i < + sizeof(devpriv->ai_fifo_buffer) / + sizeof(devpriv->ai_fifo_buffer[0]); i++) { + fifo_empty = + devpriv->stc_readw(dev, + AI_Status_1_Register) & + AI_FIFO_Empty_St; + if (fifo_empty) + break; + devpriv->ai_fifo_buffer[i] = + ni_readw(ADC_FIFO_Data_Register); + } + a4l_buf_put(subd, + devpriv->ai_fifo_buffer, + i * sizeof(devpriv->ai_fifo_buffer[0])); + } + } +} + +static void get_last_sample_611x(struct a4l_subdevice *subd) +{ + sampl_t data; + u32 dl; + struct a4l_device *dev = subd->dev; + + if (boardtype.reg_type != ni_reg_611x) + return; + + /* Check if there's a single sample stuck in the FIFO */ + if (ni_readb(XXX_Status) & 0x80) { + dl = ni_readl(ADC_FIFO_Data_611x); + data = (dl & 0xffff); + a4l_buf_put(subd, &data, sizeof(sampl_t)); + } +} + +static void get_last_sample_6143(struct a4l_subdevice *subd) +{ + sampl_t data; + u32 dl; + struct a4l_device *dev = subd->dev; + + if (boardtype.reg_type != ni_reg_6143) + return; + + /* Check if there's a single sample stuck in the FIFO */ + if (ni_readl(AIFIFO_Status_6143) & 0x01) { + /* Get stranded sample into FIFO */ + ni_writel(0x01, AIFIFO_Control_6143); + dl = ni_readl(AIFIFO_Data_6143); + + /* This may get the hi/lo data in the wrong order */ + data = (dl >> 16) & 0xffff; + a4l_buf_put(subd, &data, sizeof(sampl_t)); + } +} + +static void ni_ai_munge16(struct a4l_subdevice *subd, void *buf, unsigned long size) +{ + struct a4l_device *dev = subd->dev; + struct a4l_cmd_desc *cmd = a4l_get_cmd(subd); + int chan_idx = a4l_get_chan(subd); + unsigned int i; + sampl_t *array = buf; + + for (i = 0; i < size / sizeof(sampl_t); i++) { +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + array[i] = le16_to_cpu(array[i]); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + array[i] += devpriv->ai_offset[chan_idx]; + chan_idx++; + chan_idx %= cmd->nb_chan; + } +} + +static void ni_ai_munge32(struct a4l_subdevice *subd, void *buf, unsigned long size) +{ + struct a4l_device *dev = subd->dev; + struct a4l_cmd_desc *cmd = a4l_get_cmd(subd); + int chan_idx = a4l_get_chan(subd); + unsigned int i; + lsampl_t *larray = buf; + + for (i = 0; i < size / sizeof(lsampl_t); i++) { +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + larray[i] = le32_to_cpu(larray[i]); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + larray[i] += devpriv->ai_offset[chan_idx]; + chan_idx++; + chan_idx %= cmd->nb_chan; + } +} + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +static int ni_ai_setup_MITE_dma(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + unsigned long flags; + int err; + + err = ni_request_ai_mite_channel(dev); + if (err < 0) + return err; + + err = a4l_mite_buf_change(devpriv->ai_mite_chan->ring, subd); + if (err < 0) + return err; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + + switch (boardtype.reg_type) { + case ni_reg_611x: + case ni_reg_6143: + a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 16); + break; + case ni_reg_628x: + a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 32); + break; + default: + a4l_mite_prep_dma(devpriv->ai_mite_chan, 16, 16); + break; + }; + + /* start the MITE */ + a4l_mite_dma_arm(devpriv->ai_mite_chan); + + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + + return 0; +} + +static int ni_ao_setup_MITE_dma(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + unsigned long flags; + int err; + + err = ni_request_ao_mite_channel(dev); + if (err < 0) + return err; + + err = a4l_mite_buf_change(devpriv->ao_mite_chan->ring, subd); + if (err < 0) + return err; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + + if (devpriv->ao_mite_chan) { + + if (boardtype.reg_type & (ni_reg_611x | ni_reg_6713)) { + a4l_mite_prep_dma(devpriv->ao_mite_chan, 32, 32); + } else { + /* Doing 32 instead of 16 bit wide transfers + from memory makes the mite do 32 bit pci + transfers, doubling pci bandwidth. */ + a4l_mite_prep_dma(devpriv->ao_mite_chan, 16, 32); + } + a4l_mite_dma_arm(devpriv->ao_mite_chan); + } else + err = -EIO; + + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + + return err; +} + +static int ni_cdo_setup_MITE_dma(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + unsigned long flags; + int err; + + err = ni_request_cdo_mite_channel(dev); + if (err < 0) + return err; + + /* No need to get a lock to setup the ring buffer */ + err = a4l_mite_buf_change(devpriv->cdo_mite_chan->ring, subd); + if (err < 0) + return err; + + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + + /* This test should be useless but one never knows */ + if (devpriv->cdo_mite_chan) { + /* Configure the DMA transfer */ + a4l_mite_prep_dma(devpriv->cdo_mite_chan, 32, 32); + a4l_mite_dma_arm(devpriv->cdo_mite_chan); + } else + err = -EIO; + + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + + return err; +} + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +static void ni_ai_reset(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + + ni_release_ai_mite_channel(dev); + + /* ai configuration */ + devpriv->stc_writew(dev, AI_Configuration_Start | AI_Reset, + Joint_Reset_Register); + + ni_set_bits(dev, Interrupt_A_Enable_Register, + AI_SC_TC_Interrupt_Enable | AI_START1_Interrupt_Enable | + AI_START2_Interrupt_Enable | AI_START_Interrupt_Enable | + AI_STOP_Interrupt_Enable | AI_Error_Interrupt_Enable | + AI_FIFO_Interrupt_Enable, 0); + + ni_clear_ai_fifo(dev); + + if (boardtype.reg_type != ni_reg_6143) + ni_writeb(0, Misc_Command); + + devpriv->stc_writew(dev, AI_Disarm, AI_Command_1_Register); /* reset pulses */ + devpriv->stc_writew(dev, + AI_Start_Stop | AI_Mode_1_Reserved /*| AI_Trigger_Once */ , + AI_Mode_1_Register); + devpriv->stc_writew(dev, 0x0000, AI_Mode_2_Register); + /* generate FIFO interrupts on non-empty */ + devpriv->stc_writew(dev, (0 << 6) | 0x0000, AI_Mode_3_Register); + if (boardtype.reg_type == ni_reg_611x) { + devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width | + AI_SOC_Polarity | + AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register); + devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) | + AI_EXTMUX_CLK_Output_Select(0) | + AI_LOCALMUX_CLK_Output_Select(2) | + AI_SC_TC_Output_Select(3) | + AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_High), + AI_Output_Control_Register); + } else if (boardtype.reg_type == ni_reg_6143) { + devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width | + AI_SOC_Polarity | + AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register); + devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) | + AI_EXTMUX_CLK_Output_Select(0) | + AI_LOCALMUX_CLK_Output_Select(2) | + AI_SC_TC_Output_Select(3) | + AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_Low), + AI_Output_Control_Register); + } else { + unsigned int ai_output_control_bits; + devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width | + AI_SOC_Polarity | + AI_CONVERT_Pulse_Width | + AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register); + ai_output_control_bits = AI_SCAN_IN_PROG_Output_Select(3) | + AI_EXTMUX_CLK_Output_Select(0) | + AI_LOCALMUX_CLK_Output_Select(2) | + AI_SC_TC_Output_Select(3); + if (boardtype.reg_type == ni_reg_622x) + ai_output_control_bits |= + AI_CONVERT_Output_Select + (AI_CONVERT_Output_Enable_High); + else + ai_output_control_bits |= + AI_CONVERT_Output_Select + (AI_CONVERT_Output_Enable_Low); + devpriv->stc_writew(dev, ai_output_control_bits, + AI_Output_Control_Register); + } + + /* the following registers should not be changed, because there + * are no backup registers in devpriv. If you want to change + * any of these, add a backup register and other appropriate code: + * AI_Mode_1_Register + * AI_Mode_3_Register + * AI_Personal_Register + * AI_Output_Control_Register + */ + + /* clear interrupts */ + devpriv->stc_writew(dev, AI_SC_TC_Error_Confirm | AI_START_Interrupt_Ack | + AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack | + AI_SC_TC_Interrupt_Ack | AI_Error_Interrupt_Ack | + AI_STOP_Interrupt_Ack, Interrupt_A_Ack_Register); + + devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register); +} + +static int ni_ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + const unsigned int mask = (1 << boardtype.adbits) - 1; + int i, n; + unsigned int signbits; + unsigned short d; + unsigned long dl; + uint16_t *data = (uint16_t *)insn->data; + + ni_load_channelgain_list(dev, 1, &insn->chan_desc); + + ni_clear_ai_fifo(dev); + + signbits = devpriv->ai_offset[0]; + if (boardtype.reg_type == ni_reg_611x) { + for (n = 0; n < num_adc_stages_611x; n++) { + devpriv->stc_writew(dev, AI_CONVERT_Pulse, + AI_Command_1_Register); + a4l_udelay(1); + } + for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) { + devpriv->stc_writew(dev, AI_CONVERT_Pulse, + AI_Command_1_Register); + /* The 611x has screwy 32-bit FIFOs. */ + d = 0; + for (i = 0; i < NI_TIMEOUT; i++) { + if (ni_readb(XXX_Status) & 0x80) { + d = (ni_readl(ADC_FIFO_Data_611x) >> 16) + & 0xffff; + break; + } + if (!(devpriv->stc_readw(dev, + AI_Status_1_Register) & + AI_FIFO_Empty_St)) { + d = ni_readl(ADC_FIFO_Data_611x) & + 0xffff; + break; + } + } + if (i == NI_TIMEOUT) { + a4l_warn(dev, + "ni_mio_common: " + "timeout in 611x ni_ai_insn_read\n"); + return -ETIME; + } + d += signbits; + data[n] = d; + } + } else if (boardtype.reg_type == ni_reg_6143) { + for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) { + devpriv->stc_writew(dev, AI_CONVERT_Pulse, + AI_Command_1_Register); + + /* The 6143 has 32-bit FIFOs. + You need to strobe a bit to move a single + 16bit stranded sample into the FIFO */ + dl = 0; + for (i = 0; i < NI_TIMEOUT; i++) { + if (ni_readl(AIFIFO_Status_6143) & 0x01) { + ni_writel(0x01, AIFIFO_Control_6143); // Get stranded sample into FIFO + dl = ni_readl(AIFIFO_Data_6143); + break; + } + } + if (i == NI_TIMEOUT) { + a4l_warn(dev, + "ni_mio_common: " + "timeout in 6143 ni_ai_insn_read\n"); + return -ETIME; + } + data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF; + } + } else { + for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) { + devpriv->stc_writew(dev, AI_CONVERT_Pulse, + AI_Command_1_Register); + for (i = 0; i < NI_TIMEOUT; i++) { + if (!(devpriv->stc_readw(dev, + AI_Status_1_Register) & + AI_FIFO_Empty_St)) + break; + } + if (i == NI_TIMEOUT) { + a4l_warn(dev, + "ni_mio_common: " + "timeout in ni_ai_insn_read\n"); + return -ETIME; + } + if (boardtype.reg_type & ni_reg_m_series_mask) { + data[n] = ni_readl(M_Offset_AI_FIFO_Data) & mask; + } else { + d = ni_readw(ADC_FIFO_Data_Register); + /* subtle: needs to be short addition */ + d += signbits; + data[n] = d; + } + } + } + return 0; +} + +void ni_prime_channelgain_list(struct a4l_device *dev) +{ + int i; + devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register); + for (i = 0; i < NI_TIMEOUT; ++i) { + if (!(devpriv->stc_readw(dev, + AI_Status_1_Register) & + AI_FIFO_Empty_St)) { + devpriv->stc_writew(dev, 1, ADC_FIFO_Clear); + return; + } + a4l_udelay(1); + } + a4l_warn(dev, "ni_mio_common: timeout loading channel/gain list\n"); +} + +static void ni_m_series_load_channelgain_list(struct a4l_device *dev, + unsigned int n_chan, + unsigned int *list) +{ + unsigned int chan, range, aref; + unsigned int i; + unsigned offset; + unsigned int dither; + unsigned range_code; + + devpriv->stc_writew(dev, 1, Configuration_Memory_Clear); + + if ((list[0] & CR_ALT_SOURCE)) { + unsigned bypass_bits; + chan = CR_CHAN(list[0]); + range = CR_RNG(list[0]); + range_code = ni_gainlkup[boardtype.gainlkup][range]; + dither = ((list[0] & CR_ALT_FILTER) != 0); + bypass_bits = MSeries_AI_Bypass_Config_FIFO_Bit; + bypass_bits |= chan; + bypass_bits |= + (devpriv-> + ai_calib_source) & (MSeries_AI_Bypass_Cal_Sel_Pos_Mask | + MSeries_AI_Bypass_Cal_Sel_Neg_Mask | + MSeries_AI_Bypass_Mode_Mux_Mask | + MSeries_AO_Bypass_AO_Cal_Sel_Mask); + bypass_bits |= MSeries_AI_Bypass_Gain_Bits(range_code); + if (dither) + bypass_bits |= MSeries_AI_Bypass_Dither_Bit; + // don't use 2's complement encoding + bypass_bits |= MSeries_AI_Bypass_Polarity_Bit; + ni_writel(bypass_bits, M_Offset_AI_Config_FIFO_Bypass); + } else { + ni_writel(0, M_Offset_AI_Config_FIFO_Bypass); + } + offset = 0; + for (i = 0; i < n_chan; i++) { + unsigned config_bits = 0; + chan = CR_CHAN(list[i]); + aref = CR_AREF(list[i]); + range = CR_RNG(list[i]); + dither = ((list[i] & CR_ALT_FILTER) != 0); + + range_code = ni_gainlkup[boardtype.gainlkup][range]; + devpriv->ai_offset[i] = offset; + switch (aref) { + case AREF_DIFF: + config_bits |= + MSeries_AI_Config_Channel_Type_Differential_Bits; + break; + case AREF_COMMON: + config_bits |= + MSeries_AI_Config_Channel_Type_Common_Ref_Bits; + break; + case AREF_GROUND: + config_bits |= + MSeries_AI_Config_Channel_Type_Ground_Ref_Bits; + break; + case AREF_OTHER: + break; + } + config_bits |= MSeries_AI_Config_Channel_Bits(chan); + config_bits |= + MSeries_AI_Config_Bank_Bits(boardtype.reg_type, chan); + config_bits |= MSeries_AI_Config_Gain_Bits(range_code); + if (i == n_chan - 1) + config_bits |= MSeries_AI_Config_Last_Channel_Bit; + if (dither) + config_bits |= MSeries_AI_Config_Dither_Bit; + // don't use 2's complement encoding + config_bits |= MSeries_AI_Config_Polarity_Bit; + ni_writew(config_bits, M_Offset_AI_Config_FIFO_Data); + } + ni_prime_channelgain_list(dev); +} + +/* + * Notes on the 6110 and 6111: + * These boards a slightly different than the rest of the series, since + * they have multiple A/D converters. + * From the driver side, the configuration memory is a + * little different. + * Configuration Memory Low: + * bits 15-9: same + * bit 8: unipolar/bipolar (should be 0 for bipolar) + * bits 0-3: gain. This is 4 bits instead of 3 for the other boards + * 1001 gain=0.1 (+/- 50) + * 1010 0.2 + * 1011 0.1 + * 0001 1 + * 0010 2 + * 0011 5 + * 0100 10 + * 0101 20 + * 0110 50 + * Configuration Memory High: + * bits 12-14: Channel Type + * 001 for differential + * 000 for calibration + * bit 11: coupling (this is not currently handled) + * 1 AC coupling + * 0 DC coupling + * bits 0-2: channel + * valid channels are 0-3 + */ +static void ni_load_channelgain_list(struct a4l_device *dev, + unsigned int n_chan, unsigned int *list) +{ + unsigned int chan, range, aref; + unsigned int i; + unsigned int hi, lo; + unsigned offset; + unsigned int dither; + + if (boardtype.reg_type & ni_reg_m_series_mask) { + ni_m_series_load_channelgain_list(dev, n_chan, list); + return; + } + if (n_chan == 1 && (boardtype.reg_type != ni_reg_611x) + && (boardtype.reg_type != ni_reg_6143)) { + if (devpriv->changain_state + && devpriv->changain_spec == list[0]) { + /* ready to go. */ + return; + } + devpriv->changain_state = 1; + devpriv->changain_spec = list[0]; + } else { + devpriv->changain_state = 0; + } + + devpriv->stc_writew(dev, 1, Configuration_Memory_Clear); + + /* Set up Calibration mode if required */ + if (boardtype.reg_type == ni_reg_6143) { + if ((list[0] & CR_ALT_SOURCE) + && !devpriv->ai_calib_source_enabled) { + /* Strobe Relay enable bit */ + ni_writew(devpriv-> + ai_calib_source | + Calibration_Channel_6143_RelayOn, + Calibration_Channel_6143); + ni_writew(devpriv->ai_calib_source, + Calibration_Channel_6143); + devpriv->ai_calib_source_enabled = 1; + /* Allow relays to change */ + if(rtdm_in_rt_context()) + rtdm_task_sleep(100*1000000); + else + msleep_interruptible(100); + } else if (!(list[0] & CR_ALT_SOURCE) + && devpriv->ai_calib_source_enabled) { + /* Strobe Relay disable bit */ + ni_writew(devpriv-> + ai_calib_source | + Calibration_Channel_6143_RelayOff, + Calibration_Channel_6143); + ni_writew(devpriv->ai_calib_source, + Calibration_Channel_6143); + devpriv->ai_calib_source_enabled = 0; + /* Allow relays to change */ + if(rtdm_in_rt_context()) + rtdm_task_sleep(100*1000000); + else + msleep_interruptible(100); + } + } + + offset = 1 << (boardtype.adbits - 1); + for (i = 0; i < n_chan; i++) { + if ((boardtype.reg_type != ni_reg_6143) + && (list[i] & CR_ALT_SOURCE)) { + chan = devpriv->ai_calib_source; + } else { + chan = CR_CHAN(list[i]); + } + aref = CR_AREF(list[i]); + range = CR_RNG(list[i]); + dither = ((list[i] & CR_ALT_FILTER) != 0); + + /* fix the external/internal range differences */ + range = ni_gainlkup[boardtype.gainlkup][range]; + if (boardtype.reg_type == ni_reg_611x) + devpriv->ai_offset[i] = offset; + else + devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset; + + hi = 0; + if ((list[i] & CR_ALT_SOURCE)) { + if (boardtype.reg_type == ni_reg_611x) + ni_writew(CR_CHAN(list[i]) & 0x0003, + Calibration_Channel_Select_611x); + } else { + if (boardtype.reg_type == ni_reg_611x) + aref = AREF_DIFF; + else if (boardtype.reg_type == ni_reg_6143) + aref = AREF_OTHER; + switch (aref) { + case AREF_DIFF: + hi |= AI_DIFFERENTIAL; + break; + case AREF_COMMON: + hi |= AI_COMMON; + break; + case AREF_GROUND: + hi |= AI_GROUND; + break; + case AREF_OTHER: + break; + } + } + hi |= AI_CONFIG_CHANNEL(chan); + + ni_writew(hi, Configuration_Memory_High); + + if (boardtype.reg_type != ni_reg_6143) { + lo = range; + if (i == n_chan - 1) + lo |= AI_LAST_CHANNEL; + if (dither) + lo |= AI_DITHER; + + ni_writew(lo, Configuration_Memory_Low); + } + } + + /* prime the channel/gain list */ + if ((boardtype.reg_type != ni_reg_611x) + && (boardtype.reg_type != ni_reg_6143)) { + ni_prime_channelgain_list(dev); + } +} + +static int ni_ns_to_timer(const struct a4l_device *dev, + unsigned int nanosec, int round_mode) +{ + int divider; + switch (round_mode) { + case TRIG_ROUND_NEAREST: + default: + divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns; + break; + case TRIG_ROUND_DOWN: + divider = (nanosec) / devpriv->clock_ns; + break; + case TRIG_ROUND_UP: + divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns; + break; + } + return divider - 1; +} + +static unsigned int ni_timer_to_ns(const struct a4l_device *dev, int timer) +{ + return devpriv->clock_ns * (timer + 1); +} + +static unsigned int ni_min_ai_scan_period_ns(struct a4l_device *dev, + unsigned int num_channels) +{ + switch (boardtype.reg_type) { + case ni_reg_611x: + case ni_reg_6143: + /* simultaneously-sampled inputs */ + return boardtype.ai_speed; + break; + default: + /* multiplexed inputs */ + break; + }; + return boardtype.ai_speed * num_channels; +} + +static struct a4l_cmd_desc mio_ai_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_NOW | TRIG_INT | TRIG_EXT, + .scan_begin_src = TRIG_TIMER | TRIG_EXT, + .convert_src = TRIG_TIMER | TRIG_EXT | TRIG_NOW, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_COUNT | TRIG_NONE, +}; + +int ni_ai_inttrig(struct a4l_subdevice *subd, lsampl_t trignum) +{ + struct a4l_device *dev = subd->dev; + + if (trignum != 0) + return -EINVAL; + + devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2, + AI_Command_2_Register); + + return 1; +} + +#define cfc_check_trigger_arg_is(a,b) __cfc_check_trigger_arg_is(a,b, dev, __LINE__) +static inline int __cfc_check_trigger_arg_is(unsigned int *arg, + unsigned int val, + struct a4l_device *dev, + unsigned int line) +{ + if (*arg != val) { + a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) != val (%d) \n", + line, *arg, val); + *arg = val; + return -EINVAL; + } + return 0; +} + +#define cfc_check_trigger_is_unique(a) __cfc_check_trigger_is_unique(a, dev, __LINE__) +static inline int __cfc_check_trigger_is_unique(unsigned int src, + struct a4l_device *dev, + unsigned int line) +{ + /* this test is true if more than one _src bit is set */ + if ((src & (src - 1)) != 0) { + a4l_dbg(1, drv_dbg, dev, "line %d: src (%d) \n", line, src); + return -EINVAL; + } + return 0; +} + +#define cfc_check_trigger_src(a,b) __cfc_check_trigger_src(a,b, dev, __LINE__) +static inline int __cfc_check_trigger_src(unsigned int *src, + unsigned int flags, + struct a4l_device *dev, + unsigned int line) +{ + unsigned int orig_src = *src; + + *src = orig_src & flags; + if (*src == 0 || *src != orig_src){ + a4l_dbg(1, drv_dbg, dev, "line %d: *src (%d) orig_src (%d) flags(%d) \n", + line, *src, orig_src, flags); + return -EINVAL; + } + + return 0; +} + +#define cfc_check_trigger_arg_min(a,b) __cfc_check_trigger_arg_min(a,b, dev, __LINE__) +static inline int __cfc_check_trigger_arg_min(unsigned int *arg, + unsigned int val, + struct a4l_device *dev, + unsigned int line) +{ + if (*arg < val) { + a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) < val (%d) \n", + line, *arg, val); + *arg = val; + return -EINVAL; + } + return 0; +} + +#define cfc_check_trigger_arg_max(a,b) __cfc_check_trigger_arg_max(a,b, dev, __LINE__) +static inline int __cfc_check_trigger_arg_max(unsigned int *arg, + unsigned int val, + struct a4l_device *dev, + unsigned int line) +{ + if (*arg > val) { + a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) > val (%d) \n", + line, *arg, val); + *arg = val; + return -EINVAL; + } + return 0; +} + +static int ni_ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct a4l_device *dev = subd->dev; + unsigned int sources; + int tmp, err = 0; + + /* Step 1 : check if triggers are trivially valid */ + err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT | TRIG_EXT); + err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT); + + sources = TRIG_TIMER | TRIG_EXT; + if (boardtype.reg_type == ni_reg_611x || boardtype.reg_type == ni_reg_6143) + sources |= TRIG_NOW; + + err |= cfc_check_trigger_src(&cmd->convert_src, sources); + err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); + err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); + + if (err) { + if (cmd->valid_simul_stages & BIT(1)) + return 0; + + a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 1 \n"); + return -EINVAL; + } + + /* Step 2a : make sure trigger sources are unique */ + err |= cfc_check_trigger_is_unique(cmd->start_src); + err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); + err |= cfc_check_trigger_is_unique(cmd->convert_src); + err |= cfc_check_trigger_is_unique(cmd->stop_src); + + /* Step 2b : and mutually compatible */ + + if (err) { + if (cmd->valid_simul_stages & BIT(2)) + return 0; + + a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 2 \n"); + return -EINVAL; + } + + /* Step 3: check if arguments are trivially valid */ + + if (cmd->start_src == TRIG_EXT) { + /* external trigger */ + unsigned int tmp = CR_CHAN(cmd->start_arg); + if (tmp > 16) + tmp = 16; + tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE)); + err |= cfc_check_trigger_arg_is(&cmd->start_arg, tmp); + + } else { + /* true for both TRIG_NOW and TRIG_INT */ + err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); + } + + if (cmd->scan_begin_src == TRIG_TIMER) { + err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, + ni_min_ai_scan_period_ns(dev, cmd->nb_chan)); + + err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, + devpriv->clock_ns * 0xffffff); + } else if (cmd->scan_begin_src == TRIG_EXT) { + /* external trigger */ + unsigned int tmp = CR_CHAN(cmd->scan_begin_arg); + + if (tmp > 16) + tmp = 16; + tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE)); + err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, tmp); + + } else { /* TRIG_OTHER */ + err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0); + + } + + if (cmd->convert_src == TRIG_TIMER) { + if ((boardtype.reg_type == ni_reg_611x) + || (boardtype.reg_type == ni_reg_6143)) { + err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); + + } else { + err |= cfc_check_trigger_arg_min(&cmd->convert_arg, + boardtype.ai_speed); + err |= cfc_check_trigger_arg_max(&cmd->convert_arg, + devpriv->clock_ns * 0xffff); + } + } else if (cmd->convert_src == TRIG_EXT) { + /* external trigger */ + unsigned int tmp = CR_CHAN(cmd->convert_arg); + + if (tmp > 16) + tmp = 16; + tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT)); + err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp); + } else if (cmd->convert_src == TRIG_NOW) { + err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); + } + + err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->nb_chan); + + if (cmd->stop_src == TRIG_COUNT) { + unsigned int max_count = 0x01000000; + + if (boardtype.reg_type == ni_reg_611x) + max_count -= num_adc_stages_611x; + err |= cfc_check_trigger_arg_max(&cmd->stop_arg, max_count); + err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1); + + } else { + /* TRIG_NONE */ + err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); + } + + if (err) { + if (cmd->valid_simul_stages & BIT(3)) + return 0; + + a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 3 \n"); + return 3; + } + + /* step 4: fix up any arguments */ + if (cmd->scan_begin_src == TRIG_TIMER) { + tmp = cmd->scan_begin_arg; + cmd->scan_begin_arg = + ni_timer_to_ns(dev, ni_ns_to_timer(dev, + cmd->scan_begin_arg, + cmd->flags & + TRIG_ROUND_MASK)); + if (tmp != cmd->scan_begin_arg) + err++; + } + if (cmd->convert_src == TRIG_TIMER) { + if ((boardtype.reg_type != ni_reg_611x) + && (boardtype.reg_type != ni_reg_6143)) { + tmp = cmd->convert_arg; + cmd->convert_arg = + ni_timer_to_ns(dev, ni_ns_to_timer(dev, + cmd->convert_arg, + cmd-> + flags & + TRIG_ROUND_MASK)); + if (tmp != cmd->convert_arg) + err++; + if (cmd->scan_begin_src == TRIG_TIMER && + cmd->scan_begin_arg < + cmd->convert_arg * cmd->scan_end_arg) { + cmd->scan_begin_arg = + cmd->convert_arg * cmd->scan_end_arg; + err++; + } + } + } + + if (err) { + if (cmd->valid_simul_stages & BIT(4)) + return 0; + + a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 4 \n"); + return -EINVAL; + } + + return 0; + + +} + +static int ni_ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct a4l_device *dev = subd->dev; + int timer; + int mode1 = 0; /* mode1 is needed for both stop and convert */ + int mode2 = 0; + int start_stop_select = 0; + unsigned int stop_count; + int interrupt_a_enable = 0; + + a4l_info(dev, "start\n"); + + if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) { + a4l_err(dev, "ni_ai_cmd: cannot run command without an irq"); + return -EIO; + } + ni_clear_ai_fifo(dev); + + ni_load_channelgain_list(dev, cmd->nb_chan, cmd->chan_descs); + + /* start configuration */ + devpriv->stc_writew(dev, AI_Configuration_Start, Joint_Reset_Register); + + /* disable analog triggering for now, since it + * interferes with the use of pfi0 */ + devpriv->an_trig_etc_reg &= ~Analog_Trigger_Enable; + devpriv->stc_writew(dev, devpriv->an_trig_etc_reg, + Analog_Trigger_Etc_Register); + + switch (cmd->start_src) { + case TRIG_INT: + case TRIG_NOW: + devpriv->stc_writew(dev, AI_START2_Select(0) | + AI_START1_Sync | AI_START1_Edge | AI_START1_Select(0), + AI_Trigger_Select_Register); + break; + case TRIG_EXT: + { + int chan = CR_CHAN(cmd->start_arg); + unsigned int bits = AI_START2_Select(0) | + AI_START1_Sync | AI_START1_Select(chan + 1); + + if (cmd->start_arg & CR_INVERT) + bits |= AI_START1_Polarity; + if (cmd->start_arg & CR_EDGE) + bits |= AI_START1_Edge; + devpriv->stc_writew(dev, bits, + AI_Trigger_Select_Register); + break; + } + } + + mode2 &= ~AI_Pre_Trigger; + mode2 &= ~AI_SC_Initial_Load_Source; + mode2 &= ~AI_SC_Reload_Mode; + devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); + + if (cmd->nb_chan == 1 || (boardtype.reg_type == ni_reg_611x) + || (boardtype.reg_type == ni_reg_6143)) { + start_stop_select |= AI_STOP_Polarity; + start_stop_select |= AI_STOP_Select(31);/* logic low */ + start_stop_select |= AI_STOP_Sync; + } else { + start_stop_select |= AI_STOP_Select(19);/* ai configuration memory */ + } + devpriv->stc_writew(dev, start_stop_select, + AI_START_STOP_Select_Register); + + devpriv->ai_cmd2 = 0; + switch (cmd->stop_src) { + case TRIG_COUNT: + stop_count = cmd->stop_arg - 1; + + if (boardtype.reg_type == ni_reg_611x) { + /* have to take 3 stage adc pipeline into account */ + stop_count += num_adc_stages_611x; + } + /* stage number of scans */ + devpriv->stc_writel(dev, stop_count, AI_SC_Load_A_Registers); + + mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Trigger_Once; + devpriv->stc_writew(dev, mode1, AI_Mode_1_Register); + /* load SC (Scan Count) */ + devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register); + + devpriv->ai_continuous = 0; + if (stop_count == 0) { + devpriv->ai_cmd2 |= AI_End_On_End_Of_Scan; + interrupt_a_enable |= AI_STOP_Interrupt_Enable; + /* this is required to get the last sample + for nb_chan > 1, not sure why */ + if (cmd->nb_chan > 1) + start_stop_select |= + AI_STOP_Polarity | AI_STOP_Edge; + } + break; + case TRIG_NONE: + /* stage number of scans */ + devpriv->stc_writel(dev, 0, AI_SC_Load_A_Registers); + + mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Continuous; + devpriv->stc_writew(dev, mode1, AI_Mode_1_Register); + + /* load SC (Scan Count) */ + devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register); + + devpriv->ai_continuous = 1; + + break; + } + + switch (cmd->scan_begin_src) { + case TRIG_TIMER: + /* + stop bits for non 611x boards + AI_SI_Special_Trigger_Delay=0 + AI_Pre_Trigger=0 + AI_START_STOP_Select_Register: + AI_START_Polarity=0 (?) rising edge + AI_START_Edge=1 edge triggered + AI_START_Sync=1 (?) + AI_START_Select=0 SI_TC + AI_STOP_Polarity=0 rising edge + AI_STOP_Edge=0 level + AI_STOP_Sync=1 + AI_STOP_Select=19 external pin (configuration mem) + */ + start_stop_select |= AI_START_Edge | AI_START_Sync; + devpriv->stc_writew(dev, start_stop_select, + AI_START_STOP_Select_Register); + + mode2 |= AI_SI_Reload_Mode(0); + /* AI_SI_Initial_Load_Source=A */ + mode2 &= ~AI_SI_Initial_Load_Source; + + devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); + + /* load SI */ + timer = ni_ns_to_timer(dev, cmd->scan_begin_arg, + TRIG_ROUND_NEAREST); + devpriv->stc_writel(dev, timer, AI_SI_Load_A_Registers); + devpriv->stc_writew(dev, AI_SI_Load, AI_Command_1_Register); + break; + case TRIG_EXT: + if (cmd->scan_begin_arg & CR_EDGE) + start_stop_select |= AI_START_Edge; + /* AI_START_Polarity==1 is falling edge */ + if (cmd->scan_begin_arg & CR_INVERT) + start_stop_select |= AI_START_Polarity; + if (cmd->scan_begin_src != cmd->convert_src || + (cmd->scan_begin_arg & ~CR_EDGE) != + (cmd->convert_arg & ~CR_EDGE)) + start_stop_select |= AI_START_Sync; + start_stop_select |= + AI_START_Select(1 + CR_CHAN(cmd->scan_begin_arg)); + devpriv->stc_writew(dev, start_stop_select, + AI_START_STOP_Select_Register); + break; + } + + switch (cmd->convert_src) { + case TRIG_TIMER: + case TRIG_NOW: + if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW) + timer = 1; + else + timer = ni_ns_to_timer(dev, cmd->convert_arg, + TRIG_ROUND_NEAREST); + devpriv->stc_writew(dev, 1, AI_SI2_Load_A_Register); /* 0,0 does not work. */ + devpriv->stc_writew(dev, timer, AI_SI2_Load_B_Register); + + /* AI_SI2_Reload_Mode = alternate */ + /* AI_SI2_Initial_Load_Source = A */ + mode2 &= ~AI_SI2_Initial_Load_Source; + mode2 |= AI_SI2_Reload_Mode; + devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); + + /* AI_SI2_Load */ + devpriv->stc_writew(dev, AI_SI2_Load, AI_Command_1_Register); + + mode2 |= AI_SI2_Reload_Mode; /* alternate */ + mode2 |= AI_SI2_Initial_Load_Source; /* B */ + + devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); + break; + case TRIG_EXT: + mode1 |= AI_CONVERT_Source_Select(1 + cmd->convert_arg); + if ((cmd->convert_arg & CR_INVERT) == 0) + mode1 |= AI_CONVERT_Source_Polarity; + devpriv->stc_writew(dev, mode1, AI_Mode_1_Register); + + mode2 |= AI_Start_Stop_Gate_Enable | AI_SC_Gate_Enable; + devpriv->stc_writew(dev, mode2, AI_Mode_2_Register); + + break; + } + + if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) { + + /* interrupt on FIFO, errors, SC_TC */ + interrupt_a_enable |= AI_Error_Interrupt_Enable | + AI_SC_TC_Interrupt_Enable; + +#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \ + !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + interrupt_a_enable |= AI_FIFO_Interrupt_Enable; +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + if (cmd->flags & TRIG_WAKE_EOS + || (devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) { + /* wake on end-of-scan */ + devpriv->aimode = AIMODE_SCAN; + } else { + devpriv->aimode = AIMODE_HALF_FULL; + } + + switch (devpriv->aimode) { + case AIMODE_HALF_FULL: + /* generate FIFO interrupts and DMA requests on half-full */ +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + devpriv->stc_writew(dev, AI_FIFO_Mode_HF_to_E, + AI_Mode_3_Register); +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + devpriv->stc_writew(dev, AI_FIFO_Mode_HF, + AI_Mode_3_Register); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + break; + case AIMODE_SAMPLE: + /* generate FIFO interrupts on non-empty */ + devpriv->stc_writew(dev, AI_FIFO_Mode_NE, + AI_Mode_3_Register); + break; + case AIMODE_SCAN: +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + devpriv->stc_writew(dev, AI_FIFO_Mode_NE, + AI_Mode_3_Register); +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + devpriv->stc_writew(dev, AI_FIFO_Mode_HF, + AI_Mode_3_Register); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + interrupt_a_enable |= AI_STOP_Interrupt_Enable; + break; + default: + break; + } + + /* Clear interrupts */ + devpriv->stc_writew(dev, + AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack | + AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack | + AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack | + AI_SC_TC_Error_Confirm, Interrupt_A_Ack_Register); /* clear interrupts */ + + ni_set_bits(dev, Interrupt_A_Enable_Register, + interrupt_a_enable, 1); + + a4l_info(dev, "Interrupt_A_Enable_Register = 0x%04x\n", + devpriv->int_a_enable_reg); + } else { + /* interrupt on nothing */ + ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0); + + /* XXX start polling if necessary */ + a4l_warn(dev, "ni_ai_cmd: interrupting on nothing\n"); + } + + /* end configuration */ + devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register); + + switch (cmd->scan_begin_src) { + case TRIG_TIMER: + devpriv->stc_writew(dev, + AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm, + AI_Command_1_Register); + break; + case TRIG_EXT: + /* XXX AI_SI_Arm? */ + devpriv->stc_writew(dev, + AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm, + AI_Command_1_Register); + break; + } + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + { + int retval = ni_ai_setup_MITE_dma(subd); + if (retval) + return retval; + } + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + switch (cmd->start_src) { + case TRIG_NOW: + /* AI_START1_Pulse */ + devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2, + AI_Command_2_Register); + break; + case TRIG_EXT: + /* TODO: set trigger callback field to NULL */ + break; + case TRIG_INT: + /* TODO: set trigger callback field to ni_ai_inttrig */ + break; + } + + a4l_info(dev, "exit\n"); + + return 0; +} + +int ni_ai_config_analog_trig(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int a, b, modebits; + int err = 0; + uint32_t *data = (uint32_t *)insn->data; + + /* data[1] is flags + * data[2] is analog line + * data[3] is set level + * data[4] is reset level */ + if (!boardtype.has_analog_trig) + return -EINVAL; + + if ((data[1] & 0xffff0000) != A4L_EV_SCAN_BEGIN) { + data[1] &= (A4L_EV_SCAN_BEGIN | 0xffff); + err++; + } + if (data[2] >= boardtype.n_adchan) { + data[2] = boardtype.n_adchan - 1; + err++; + } + if (data[3] > 255) { /* a */ + data[3] = 255; + err++; + } + if (data[4] > 255) { /* b */ + data[4] = 255; + err++; + } + /* + * 00 ignore + * 01 set + * 10 reset + * + * modes: + * 1 level: +b- +a- + * high mode 00 00 01 10 + * low mode 00 00 10 01 + * 2 level: (a<b) + * hysteresis low mode 10 00 00 01 + * hysteresis high mode 01 00 00 10 + * middle mode 10 01 01 10 + */ + + a = data[3]; + b = data[4]; + modebits = data[1] & 0xff; + if (modebits & 0xf0) { + /* two level mode */ + if (b < a) { + /* swap order */ + a = data[4]; + b = data[3]; + modebits = ((data[1] & 0xf) << 4) | + ((data[1] & 0xf0) >> 4); + } + devpriv->atrig_low = a; + devpriv->atrig_high = b; + switch (modebits) { + case 0x81: /* low hysteresis mode */ + devpriv->atrig_mode = 6; + break; + case 0x42: /* high hysteresis mode */ + devpriv->atrig_mode = 3; + break; + case 0x96: /* middle window mode */ + devpriv->atrig_mode = 2; + break; + default: + data[1] &= ~0xff; + err++; + } + } else { + /* one level mode */ + if (b != 0) { + data[4] = 0; + err++; + } + switch (modebits) { + case 0x06: /* high window mode */ + devpriv->atrig_high = a; + devpriv->atrig_mode = 0; + break; + case 0x09: /* low window mode */ + devpriv->atrig_low = a; + devpriv->atrig_mode = 1; + break; + default: + data[1] &= ~0xff; + err++; + } + } + + if (err) + return -EAGAIN; + + return 0; +} + +int ni_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int *)insn->data; + + if (insn->data_size < sizeof(unsigned int)) + return -EINVAL; + + switch (data[0]) { + case A4L_INSN_CONFIG_ANALOG_TRIG: + return ni_ai_config_analog_trig(subd, insn); + case A4L_INSN_CONFIG_ALT_SOURCE: + if (boardtype.reg_type & ni_reg_m_series_mask) { + if (data[1] & ~(MSeries_AI_Bypass_Cal_Sel_Pos_Mask | + MSeries_AI_Bypass_Cal_Sel_Neg_Mask | + MSeries_AI_Bypass_Mode_Mux_Mask | + MSeries_AO_Bypass_AO_Cal_Sel_Mask)) { + return -EINVAL; + } + devpriv->ai_calib_source = data[1]; + } else if (boardtype.reg_type == ni_reg_6143) { + unsigned int calib_source; + + calib_source = data[1] & 0xf; + + if (calib_source > 0xF) + return -EINVAL; + + devpriv->ai_calib_source = calib_source; + ni_writew(calib_source, Calibration_Channel_6143); + } else { + unsigned int calib_source; + unsigned int calib_source_adjust; + + calib_source = data[1] & 0xf; + calib_source_adjust = (data[1] >> 4) & 0xff; + + if (calib_source >= 8) + return -EINVAL; + devpriv->ai_calib_source = calib_source; + if (boardtype.reg_type == ni_reg_611x) { + ni_writeb(calib_source_adjust, + Cal_Gain_Select_611x); + } + } + return 0; + default: + break; + } + + return -EINVAL; +} + +/* munge data from unsigned to 2's complement for analog output bipolar modes */ +static void ni_ao_munge(struct a4l_subdevice *subd, void *buf, unsigned long size) +{ + struct a4l_device *dev = subd->dev; + struct a4l_cmd_desc *cmd = a4l_get_cmd(subd); + int chan_idx = a4l_get_chan(subd); + uint16_t *array = buf; + unsigned int i, range, offset; + + offset = 1 << (boardtype.aobits - 1); + for (i = 0; i < size / sizeof(uint16_t); i++) { + + range = CR_RNG(cmd->chan_descs[chan_idx]); + if (boardtype.ao_unipolar == 0 || (range & 1) == 0) + array[i] -= offset; + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + array[i] = cpu_to_le16(array[i]); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + chan_idx++; + chan_idx %= cmd->nb_chan; + } +} + +static int ni_m_series_ao_config_chan_descs(struct a4l_subdevice *subd, + unsigned int chanspec[], + unsigned int n_chans, int timed) +{ + unsigned int range; + unsigned int chan; + unsigned int conf; + int i, invert = 0; + struct a4l_device *dev = subd->dev; + + for (i = 0; i < boardtype.n_aochan; ++i) { + ni_writeb(0xf, M_Offset_AO_Waveform_Order(i)); + } + for (i = 0; i < n_chans; i++) { + struct a4l_range *rng; + int idx; + chan = CR_CHAN(chanspec[i]); + range = CR_RNG(chanspec[i]); + + /* TODO: this a huge hack! + Something is missing in the kernel API. We must + allow access on the proper range descriptor */ + idx = (subd->rng_desc->mode != + A4L_RNG_GLOBAL_RNGDESC) ? chan : 0; + rng = &(subd->rng_desc->rngtabs[idx]->rngs[range]); + + invert = 0; + conf = 0; + switch (rng->max - rng->min) { + case 20000000: + conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits; + ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan)); + break; + case 10000000: + conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits; + ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan)); + break; + case 4000000: + conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits; + ni_writeb(MSeries_Attenuate_x5_Bit, + M_Offset_AO_Reference_Attenuation(chan)); + break; + case 2000000: + conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits; + ni_writeb(MSeries_Attenuate_x5_Bit, + M_Offset_AO_Reference_Attenuation(chan)); + break; + default: + a4l_err(subd->dev, + "%s: bug! unhandled ao reference voltage\n", + __FUNCTION__); + break; + } + switch (rng->max + rng->min) { + case 0: + conf |= MSeries_AO_DAC_Offset_0V_Bits; + break; + case 10000000: + conf |= MSeries_AO_DAC_Offset_5V_Bits; + break; + default: + a4l_err(subd->dev, + "%s: bug! unhandled ao offset voltage\n", + __FUNCTION__); + break; + } + if (timed) + conf |= MSeries_AO_Update_Timed_Bit; + ni_writeb(conf, M_Offset_AO_Config_Bank(chan)); + devpriv->ao_conf[chan] = conf; + ni_writeb(i, M_Offset_AO_Waveform_Order(chan)); + } + return invert; +} + +static int ni_old_ao_config_chan_descs(struct a4l_subdevice *subd, + unsigned int chanspec[], + unsigned int n_chans) +{ + struct a4l_device *dev = subd->dev; + unsigned int range; + unsigned int chan; + unsigned int conf; + int i, invert = 0; + + for (i = 0; i < n_chans; i++) { + chan = CR_CHAN(chanspec[i]); + range = CR_RNG(chanspec[i]); + conf = AO_Channel(chan); + + if (boardtype.ao_unipolar) { + if ((range & 1) == 0) { + conf |= AO_Bipolar; + invert = (1 << (boardtype.aobits - 1)); + } else { + invert = 0; + } + if (range & 2) + conf |= AO_Ext_Ref; + } else { + conf |= AO_Bipolar; + invert = (1 << (boardtype.aobits - 1)); + } + + /* not all boards can deglitch, but this shouldn't hurt */ + if (chanspec[i] & CR_DEGLITCH) + conf |= AO_Deglitch; + + /* analog reference */ + /* AREF_OTHER connects AO ground to AI ground, i think */ + conf |= (CR_AREF(chanspec[i]) == + AREF_OTHER) ? AO_Ground_Ref : 0; + + ni_writew(conf, AO_Configuration); + devpriv->ao_conf[chan] = conf; + } + return invert; +} + +static int ni_ao_config_chan_descs(struct a4l_subdevice *subd, + unsigned int chanspec[], + unsigned int n_chans, int timed) +{ + struct a4l_device *dev = subd->dev; + + if (boardtype.reg_type & ni_reg_m_series_mask) + return ni_m_series_ao_config_chan_descs(subd, + chanspec, + n_chans, timed); + else + return ni_old_ao_config_chan_descs(subd, chanspec, n_chans); +} + +int ni_ao_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint16_t *data = (uint16_t *)insn->data; + + data[0] = devpriv->ao[CR_CHAN(insn->chan_desc)]; + + return 0; +} + +int ni_ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int chan = CR_CHAN(insn->chan_desc); + uint16_t *data = (uint16_t *)insn->data; + unsigned int invert; + + invert = ni_ao_config_chan_descs(subd, + &insn->chan_desc, 1, 0); + + devpriv->ao[chan] = data[0]; + + if (boardtype.reg_type & ni_reg_m_series_mask) { + ni_writew(data[0], M_Offset_DAC_Direct_Data(chan)); + } else + ni_writew(data[0] ^ invert, + (chan) ? DAC1_Direct_Data : DAC0_Direct_Data); + + return 0; +} + +int ni_ao_insn_write_671x(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int chan = CR_CHAN(insn->chan_desc); + uint16_t *data = (uint16_t *)insn->data; + unsigned int invert; + + ao_win_out(1 << chan, AO_Immediate_671x); + invert = 1 << (boardtype.aobits - 1); + + ni_ao_config_chan_descs(subd, &insn->chan_desc, 1, 0); + + devpriv->ao[chan] = data[0]; + ao_win_out(data[0] ^ invert, DACx_Direct_Data_671x(chan)); + + return 0; +} + +int ni_ao_inttrig(struct a4l_subdevice *subd, lsampl_t trignum) +{ + struct a4l_device *dev = subd->dev; + int ret, interrupt_b_bits, i; + static const int timeout = 1000; + + if (trignum != 0) + return -EINVAL; + + /* TODO: disable trigger until a command is recorded. + Null trig at beginning prevent ao start trigger from executing + more than once per command (and doing things like trying to + allocate the ao dma channel multiple times) */ + + ni_set_bits(dev, Interrupt_B_Enable_Register, + AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0); + interrupt_b_bits = AO_Error_Interrupt_Enable; + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + devpriv->stc_writew(dev, 1, DAC_FIFO_Clear); + if (boardtype.reg_type & ni_reg_6xxx_mask) + ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x); + ret = ni_ao_setup_MITE_dma(subd); + if (ret) + return ret; + ret = ni_ao_wait_for_dma_load(subd); + if (ret < 0) + return ret; +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + ret = ni_ao_prep_fifo(subd); + if (ret == 0) + return -EPIPE; + + interrupt_b_bits |= AO_FIFO_Interrupt_Enable; +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + devpriv->stc_writew(dev, devpriv->ao_mode3 | AO_Not_An_UPDATE, + AO_Mode_3_Register); + devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); + /* wait for DACs to be loaded */ + for (i = 0; i < timeout; i++) { + a4l_udelay(1); + if ((devpriv->stc_readw(dev,Joint_Status_2_Register) & + AO_TMRDACWRs_In_Progress_St) == 0) + break; + } + if (i == timeout) { + a4l_err(dev, + "ni_ao_inttrig: timed out " + "waiting for AO_TMRDACWRs_In_Progress_St to clear"); + return -EIO; + } + /* stc manual says we are need to clear error interrupt after + AO_TMRDACWRs_In_Progress_St clears */ + devpriv->stc_writew(dev, AO_Error_Interrupt_Ack, + Interrupt_B_Ack_Register); + + ni_set_bits(dev, Interrupt_B_Enable_Register, interrupt_b_bits, 1); + + devpriv->stc_writew(dev, + devpriv->ao_cmd1 | + AO_UI_Arm | AO_UC_Arm | + AO_BC_Arm | AO_DAC1_Update_Mode | + AO_DAC0_Update_Mode, + AO_Command_1_Register); + + devpriv->stc_writew(dev, + devpriv->ao_cmd2 | AO_START1_Pulse, + AO_Command_2_Register); + + return 0; +} + +int ni_ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct a4l_device *dev = subd->dev; + + int bits; + int i; + unsigned trigvar; + + if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) { + a4l_err(dev, "ni_ao_cmd: cannot run command without an irq"); + return -EIO; + } + + devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register); + + devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register); + + if (boardtype.reg_type & ni_reg_6xxx_mask) { + ao_win_out(CLEAR_WG, AO_Misc_611x); + + bits = 0; + for (i = 0; i < cmd->nb_chan; i++) { + int chan; + + chan = CR_CHAN(cmd->chan_descs[i]); + bits |= 1 << chan; + ao_win_out(chan, AO_Waveform_Generation_611x); + } + ao_win_out(bits, AO_Timed_611x); + } + + ni_ao_config_chan_descs(subd, cmd->chan_descs, cmd->nb_chan, 1); + + if (cmd->stop_src == TRIG_NONE) { + devpriv->ao_mode1 |= AO_Continuous; + devpriv->ao_mode1 &= ~AO_Trigger_Once; + } else { + devpriv->ao_mode1 &= ~AO_Continuous; + devpriv->ao_mode1 |= AO_Trigger_Once; + } + devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); + devpriv->ao_trigger_select &= + ~(AO_START1_Polarity | AO_START1_Select(-1)); + devpriv->ao_trigger_select |= AO_START1_Edge | AO_START1_Sync; + devpriv->stc_writew(dev, devpriv->ao_trigger_select, + AO_Trigger_Select_Register); + devpriv->ao_mode3 &= ~AO_Trigger_Length; + devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); + + devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); + devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source; + devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); + if (cmd->stop_src == TRIG_NONE) { + devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register); + } else { + devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register); + } + devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register); + devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source; + devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); + switch (cmd->stop_src) { + case TRIG_COUNT: + devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register); + devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); + devpriv->stc_writel(dev, cmd->stop_arg - 1, + AO_UC_Load_A_Register); + break; + case TRIG_NONE: + devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register); + devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); + devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register); + break; + default: + devpriv->stc_writel(dev, 0, AO_UC_Load_A_Register); + devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register); + devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register); + } + + devpriv->ao_mode1 &= + ~(AO_UI_Source_Select(0x1f) | AO_UI_Source_Polarity | + AO_UPDATE_Source_Select(0x1f) | AO_UPDATE_Source_Polarity); + switch (cmd->scan_begin_src) { + case TRIG_TIMER: + devpriv->ao_cmd2 &= ~AO_BC_Gate_Enable; + trigvar = + ni_ns_to_timer(dev, cmd->scan_begin_arg, + TRIG_ROUND_NEAREST); + devpriv->stc_writel(dev, 1, AO_UI_Load_A_Register); + devpriv->stc_writew(dev, AO_UI_Load, AO_Command_1_Register); + devpriv->stc_writel(dev, trigvar, AO_UI_Load_A_Register); + break; + case TRIG_EXT: + devpriv->ao_mode1 |= + AO_UPDATE_Source_Select(cmd->scan_begin_arg); + if (cmd->scan_begin_arg & CR_INVERT) + devpriv->ao_mode1 |= AO_UPDATE_Source_Polarity; + devpriv->ao_cmd2 |= AO_BC_Gate_Enable; + break; + default: + BUG(); + break; + } + devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register); + devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); + devpriv->ao_mode2 &= + ~(AO_UI_Reload_Mode(3) | AO_UI_Initial_Load_Source); + devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); + + if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) { + if (cmd->scan_end_arg > 1) { + devpriv->ao_mode1 |= AO_Multiple_Channels; + devpriv->stc_writew(dev, + AO_Number_Of_Channels(cmd->scan_end_arg - 1) | + AO_UPDATE_Output_Select + (AO_Update_Output_High_Z), + AO_Output_Control_Register); + } else { + unsigned int bits; + devpriv->ao_mode1 &= ~AO_Multiple_Channels; + bits = AO_UPDATE_Output_Select(AO_Update_Output_High_Z); + if (boardtype.reg_type & ni_reg_m_series_mask) { + bits |= AO_Number_Of_Channels(0); + } else { + bits |= AO_Number_Of_Channels(CR_CHAN(cmd-> + chan_descs[0])); + } + devpriv->stc_writew(dev, bits, + AO_Output_Control_Register); + } + devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); + } + + devpriv->stc_writew(dev, AO_DAC0_Update_Mode | AO_DAC1_Update_Mode, + AO_Command_1_Register); + + devpriv->ao_mode3 |= AO_Stop_On_Overrun_Error; + devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); + + devpriv->ao_mode2 &= ~AO_FIFO_Mode_Mask; + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + devpriv->ao_mode2 |= AO_FIFO_Mode_HF_to_F; +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + devpriv->ao_mode2 |= AO_FIFO_Mode_HF; +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + devpriv->ao_mode2 &= ~AO_FIFO_Retransmit_Enable; + devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); + + bits = AO_BC_Source_Select | AO_UPDATE_Pulse_Width | + AO_TMRDACWR_Pulse_Width; + if (boardtype.ao_fifo_depth) + bits |= AO_FIFO_Enable; + else + bits |= AO_DMA_PIO_Control; +#if 0 + /* F Hess: windows driver does not set AO_Number_Of_DAC_Packages bit for 6281, + verified with bus analyzer. */ + if (boardtype.reg_type & ni_reg_m_series_mask) + bits |= AO_Number_Of_DAC_Packages; +#endif + devpriv->stc_writew(dev, bits, AO_Personal_Register); + /* enable sending of ao dma requests */ + devpriv->stc_writew(dev, AO_AOFREQ_Enable, AO_Start_Select_Register); + + devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register); + + if (cmd->stop_src == TRIG_COUNT) { + devpriv->stc_writew(dev, AO_BC_TC_Interrupt_Ack, + Interrupt_B_Ack_Register); + ni_set_bits(dev, Interrupt_B_Enable_Register, + AO_BC_TC_Interrupt_Enable, 1); + } + + return 0; +} + +struct a4l_cmd_desc mio_ao_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_INT, + .scan_begin_src = TRIG_TIMER | TRIG_EXT, + .convert_src = TRIG_NOW, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_COUNT | TRIG_NONE, +}; + +int ni_ao_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct a4l_device *dev = subd->dev; + + /* Make sure trigger sources are unique and mutually compatible */ + + if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) + return -EINVAL; + + /* Make sure arguments are trivially compatible */ + + if (cmd->start_arg != 0) { + cmd->start_arg = 0; + return -EINVAL; + } + + if (cmd->scan_begin_src == TRIG_TIMER) { + if (cmd->scan_begin_arg < boardtype.ao_speed) { + cmd->scan_begin_arg = boardtype.ao_speed; + return -EINVAL; + } + if (cmd->scan_begin_arg > devpriv->clock_ns * 0xffffff) { + /* XXX check */ + cmd->scan_begin_arg = devpriv->clock_ns * 0xffffff; + return -EINVAL; + } + } + + if (cmd->convert_arg != 0) { + cmd->convert_arg = 0; + return -EINVAL; + } + if (cmd->scan_end_arg != cmd->nb_chan) { + cmd->scan_end_arg = cmd->nb_chan; + return -EINVAL; + } + if (cmd->stop_src == TRIG_COUNT) { + /* XXX check */ + if (cmd->stop_arg > 0x00ffffff) { + cmd->stop_arg = 0x00ffffff; + return -EINVAL; + } + } else { + /* TRIG_NONE */ + if (cmd->stop_arg != 0) { + cmd->stop_arg = 0; + return -EINVAL; + } + } + + /* step 4: fix up any arguments */ + if (cmd->scan_begin_src == TRIG_TIMER) { + + if(cmd->scan_begin_arg != + ni_timer_to_ns(dev, + ni_ns_to_timer(dev, + cmd->scan_begin_arg, + cmd->flags & TRIG_ROUND_MASK))) + return -EINVAL; + } + + return 0; +} + +void ni_ao_reset(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + + ni_release_ao_mite_channel(dev); + + devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register); + devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register); + ni_set_bits(dev, Interrupt_B_Enable_Register, ~0, 0); + devpriv->stc_writew(dev, AO_BC_Source_Select, AO_Personal_Register); + devpriv->stc_writew(dev, 0x3f98, Interrupt_B_Ack_Register); + devpriv->stc_writew(dev, AO_BC_Source_Select | AO_UPDATE_Pulse_Width | + AO_TMRDACWR_Pulse_Width, AO_Personal_Register); + devpriv->stc_writew(dev, 0, AO_Output_Control_Register); + devpriv->stc_writew(dev, 0, AO_Start_Select_Register); + devpriv->ao_cmd1 = 0; + devpriv->stc_writew(dev, devpriv->ao_cmd1, AO_Command_1_Register); + devpriv->ao_cmd2 = 0; + devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register); + devpriv->ao_mode1 = 0; + devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register); + devpriv->ao_mode2 = 0; + devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register); + if (boardtype.reg_type & ni_reg_m_series_mask) + devpriv->ao_mode3 = AO_Last_Gate_Disable; + else + devpriv->ao_mode3 = 0; + devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register); + devpriv->ao_trigger_select = 0; + devpriv->stc_writew(dev, devpriv->ao_trigger_select, + AO_Trigger_Select_Register); + if (boardtype.reg_type & ni_reg_6xxx_mask) { + ao_win_out(0x3, AO_Immediate_671x); + ao_win_out(CLEAR_WG, AO_Misc_611x); + } + devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register); +} + +/* digital io */ + +int ni_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int *)insn->data; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]); +#endif /* CONFIG_DEBUG_DIO */ + + switch (data[0]) { + case A4L_INSN_CONFIG_DIO_OUTPUT: + devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc); + break; + case A4L_INSN_CONFIG_DIO_INPUT: + devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc)); + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = (devpriv->io_bits & + (1 << CR_CHAN(insn->chan_desc))) ? + A4L_OUTPUT : A4L_INPUT; + return 0; + break; + default: + return -EINVAL; + } + + devpriv->dio_control &= ~DIO_Pins_Dir_Mask; + devpriv->dio_control |= DIO_Pins_Dir(devpriv->io_bits); + devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); + + return 1; +} + +int ni_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]); +#endif + + if (insn->data_size != 2 * sizeof(uint8_t)) + return -EINVAL; + + if (data[0]) { + /* Perform check to make sure we're not using the + serial part of the dio */ + if ((data[0] & (DIO_SDIN | DIO_SDOUT)) + && devpriv->serial_interval_ns) + return -EBUSY; + + devpriv->dio_state &= ~data[0]; + devpriv->dio_state |= (data[0] & data[1]); + devpriv->dio_output &= ~DIO_Parallel_Data_Mask; + devpriv->dio_output |= + DIO_Parallel_Data_Out(devpriv->dio_state); + devpriv->stc_writew(dev, devpriv->dio_output, + DIO_Output_Register); + } + + data[1] = (uint8_t) + devpriv->stc_readw(dev, DIO_Parallel_Input_Register); + + return 0; +} + +int ni_m_series_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int *)insn->data; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]); +#endif + switch (data[0]) { + case A4L_INSN_CONFIG_DIO_OUTPUT: + devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc); + break; + case A4L_INSN_CONFIG_DIO_INPUT: + devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc)); + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = (devpriv->io_bits & + (1 << CR_CHAN(insn->chan_desc))) ? + A4L_OUTPUT : A4L_INPUT; + return 0; + break; + default: + return -EINVAL; + } + + ni_writel(devpriv->io_bits, M_Offset_DIO_Direction); + + return 0; +} + +int ni_m_series_dio_insn_bits_8(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]); +#endif + + if (insn->data_size != 2 * sizeof(uint8_t)) + return -EINVAL; + + if (data[0]) { + devpriv->dio_state &= ~data[0]; + devpriv->dio_state |= (data[0] & data[1]); + ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output); + } + + data[1] = (uint8_t) ni_readl(M_Offset_Static_Digital_Input); + + return 0; +} + +int ni_m_series_dio_insn_bits_32(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint32_t *data = (uint32_t *)insn->data; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]); +#endif + + if (insn->data_size != 2 * sizeof(uint32_t)) + return -EINVAL; + + if (data[0]) { + devpriv->dio_state &= ~data[0]; + devpriv->dio_state |= (data[0] & data[1]); + ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output); + } + + data[1] = ni_readl(M_Offset_Static_Digital_Input); + + return 0; +} + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +struct a4l_cmd_desc mio_dio_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_INT, + .scan_begin_src = TRIG_EXT, + .convert_src = TRIG_NOW, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_NONE, +}; + +int ni_cdio_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + unsigned int i; + + /* Make sure arguments are trivially compatible */ + + if (cmd->start_arg != 0) { + cmd->start_arg = 0; + return -EINVAL; + } + + if ((cmd->scan_begin_arg & + PACK_FLAGS(CDO_Sample_Source_Select_Mask, 0, 0, CR_INVERT)) != + cmd->scan_begin_arg) + return -EINVAL; + + if (cmd->convert_arg != 0) { + cmd->convert_arg = 0; + return -EINVAL; + } + + if (cmd->scan_end_arg != cmd->nb_chan) { + cmd->scan_end_arg = cmd->nb_chan; + return -EINVAL; + } + + if (cmd->stop_arg != 0) { + cmd->stop_arg = 0; + return -EINVAL; + } + + /* Check chan_descs */ + + for (i = 0; i < cmd->nb_chan; ++i) { + if (cmd->chan_descs[i] != i) + return -EINVAL; + } + + return 0; +} + +int ni_cdio_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct a4l_device *dev = subd->dev; + unsigned cdo_mode_bits = CDO_FIFO_Mode_Bit | CDO_Halt_On_Error_Bit; + + ni_writel(CDO_Reset_Bit, M_Offset_CDIO_Command); + switch (cmd->scan_begin_src) { + case TRIG_EXT: + cdo_mode_bits |= + CR_CHAN(cmd->scan_begin_arg) & + CDO_Sample_Source_Select_Mask; + break; + default: + BUG(); + break; + } + if (cmd->scan_begin_arg & CR_INVERT) + cdo_mode_bits |= CDO_Polarity_Bit; + ni_writel(cdo_mode_bits, M_Offset_CDO_Mode); + + if (devpriv->io_bits) { + ni_writel(devpriv->dio_state, M_Offset_CDO_FIFO_Data); + ni_writel(CDO_SW_Update_Bit, M_Offset_CDIO_Command); + ni_writel(devpriv->io_bits, M_Offset_CDO_Mask_Enable); + } else { + a4l_err(dev, + "ni_cdio_cmd: attempted to run digital " + "output command with no lines configured as outputs"); + return -EIO; + } + + return 0; +} + +void ni_cdio_cancel(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + ni_writel(CDO_Disarm_Bit | CDO_Error_Interrupt_Enable_Clear_Bit | + CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit | + CDO_FIFO_Request_Interrupt_Enable_Clear_Bit, + M_Offset_CDIO_Command); + + ni_writel(0, M_Offset_CDO_Mask_Enable); + ni_release_cdo_mite_channel(dev); +} + +int ni_cdo_inttrig(struct a4l_subdevice *subd, lsampl_t trignum) +{ + struct a4l_device *dev = subd->dev; + int err; + unsigned i; + const unsigned timeout = 1000; + + /* TODO: disable trigger until a command is recorded. + Null trig at beginning prevent ao start trigger from executing + more than once per command (and doing things like trying to + allocate the ao dma channel multiple times) */ + + err = ni_cdo_setup_MITE_dma(subd); + if (err < 0) + return err; + + /* wait for dma to fill output fifo */ + for (i = 0; i < timeout; ++i) { + if (ni_readl(M_Offset_CDIO_Status) & CDO_FIFO_Full_Bit) + break; + a4l_udelay(10); + } + + if (i == timeout) { + a4l_err(dev, "ni_cdo_inttrig: dma failed to fill cdo fifo!"); + ni_cdio_cancel(subd); + return -EIO; + } + + ni_writel(CDO_Arm_Bit | + CDO_Error_Interrupt_Enable_Set_Bit | + CDO_Empty_FIFO_Interrupt_Enable_Set_Bit, + M_Offset_CDIO_Command); + + return 0; +} + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +static void handle_cdio_interrupt(struct a4l_device *dev) +{ +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + unsigned cdio_status; + unsigned long flags; + struct a4l_subdevice *subd = a4l_get_subd(dev, NI_DIO_SUBDEV); + + if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) { + return; + } + rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags); + if (devpriv->cdo_mite_chan) { + unsigned cdo_mite_status = + a4l_mite_get_status(devpriv->cdo_mite_chan); + if (cdo_mite_status & CHSR_LINKC) { + writel(CHOR_CLRLC, + devpriv->mite->mite_io_addr + + MITE_CHOR(devpriv->cdo_mite_chan->channel)); + } + a4l_mite_sync_output_dma(devpriv->cdo_mite_chan, subd); + } + rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags); + + cdio_status = ni_readl(M_Offset_CDIO_Status); + if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) { + /* XXX just guessing this is needed and does something useful */ + ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } + if (cdio_status & CDO_FIFO_Empty_Bit) { + ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit, + M_Offset_CDIO_Command); + } + a4l_buf_evt(subd, 0); +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ +} + +static int ni_serial_hw_readwrite8(struct a4l_device * dev, + unsigned char data_out, unsigned char *data_in) +{ + unsigned int status1; + int err = 0, count = 20; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "outputting 0x%x\n", data_out); +#endif + + devpriv->dio_output &= ~DIO_Serial_Data_Mask; + devpriv->dio_output |= DIO_Serial_Data_Out(data_out); + devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register); + + status1 = devpriv->stc_readw(dev, Joint_Status_1_Register); + if (status1 & DIO_Serial_IO_In_Progress_St) { + err = -EBUSY; + goto Error; + } + + devpriv->dio_control |= DIO_HW_Serial_Start; + devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); + devpriv->dio_control &= ~DIO_HW_Serial_Start; + + /* Wait until STC says we're done, but don't loop infinitely. */ + while ((status1 = + devpriv->stc_readw(dev, + Joint_Status_1_Register)) & + DIO_Serial_IO_In_Progress_St) { + /* Delay one bit per loop */ + a4l_udelay((devpriv->serial_interval_ns + 999) / 1000); + if (--count < 0) { + a4l_err(dev, + "ni_serial_hw_readwrite8: " + "SPI serial I/O didn't finish in time!\n"); + err = -ETIME; + goto Error; + } + } + + /* Delay for last bit. This delay is absolutely necessary, because + DIO_Serial_IO_In_Progress_St goes high one bit too early. */ + a4l_udelay((devpriv->serial_interval_ns + 999) / 1000); + + if (data_in != NULL) { + *data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register); +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "inputted 0x%x\n", *data_in); +#endif + } + +Error: + devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register); + + return err; +} + +static int ni_serial_sw_readwrite8(struct a4l_device * dev, + unsigned char data_out, unsigned char *data_in) +{ + unsigned char mask, input = 0; + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "outputting 0x%x\n", data_out); +#endif + + /* Wait for one bit before transfer */ + a4l_udelay((devpriv->serial_interval_ns + 999) / 1000); + + for (mask = 0x80; mask; mask >>= 1) { + /* Output current bit; note that we cannot touch devpriv->dio_state + because it is a per-subdevice field, and serial is + a separate subdevice from DIO. */ + devpriv->dio_output &= ~DIO_SDOUT; + if (data_out & mask) { + devpriv->dio_output |= DIO_SDOUT; + } + devpriv->stc_writew(dev, devpriv->dio_output, + DIO_Output_Register); + + /* Assert SDCLK (active low, inverted), wait for half of + the delay, deassert SDCLK, and wait for the other half. */ + devpriv->dio_control |= DIO_Software_Serial_Control; + devpriv->stc_writew(dev, devpriv->dio_control, + DIO_Control_Register); + + a4l_udelay((devpriv->serial_interval_ns + 999) / 2000); + + devpriv->dio_control &= ~DIO_Software_Serial_Control; + devpriv->stc_writew(dev, devpriv->dio_control, + DIO_Control_Register); + + a4l_udelay((devpriv->serial_interval_ns + 999) / 2000); + + /* Input current bit */ + if (devpriv->stc_readw(dev, + DIO_Parallel_Input_Register) & DIO_SDIN) { + input |= mask; + } + } +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "inputted 0x%x\n", input); +#endif + if (data_in) + *data_in = input; + + return 0; +} + +int ni_serial_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + int err = 0; + unsigned char byte_out, byte_in = 0; + unsigned int *data = (unsigned int *)insn->data; + + if (insn->data_size != 2 * sizeof(unsigned int)) + return -EINVAL; + + switch (data[0]) { + case A4L_INSN_CONFIG_SERIAL_CLOCK: + +#ifdef CONFIG_DEBUG_DIO + a4l_info(dev, "SPI serial clock Config %d\n", data[1]); +#endif + + devpriv->serial_hw_mode = 1; + devpriv->dio_control |= DIO_HW_Serial_Enable; + + if (data[1] == SERIAL_DISABLED) { + devpriv->serial_hw_mode = 0; + devpriv->dio_control &= ~(DIO_HW_Serial_Enable | + DIO_Software_Serial_Control); + data[1] = SERIAL_DISABLED; + devpriv->serial_interval_ns = data[1]; + } else if (data[1] <= SERIAL_600NS) { + /* Warning: this clock speed is too fast to reliably + control SCXI. */ + devpriv->dio_control &= ~DIO_HW_Serial_Timebase; + devpriv->clock_and_fout |= Slow_Internal_Timebase; + devpriv->clock_and_fout &= ~DIO_Serial_Out_Divide_By_2; + data[1] = SERIAL_600NS; + devpriv->serial_interval_ns = data[1]; + } else if (data[1] <= SERIAL_1_2US) { + devpriv->dio_control &= ~DIO_HW_Serial_Timebase; + devpriv->clock_and_fout |= Slow_Internal_Timebase | + DIO_Serial_Out_Divide_By_2; + data[1] = SERIAL_1_2US; + devpriv->serial_interval_ns = data[1]; + } else if (data[1] <= SERIAL_10US) { + devpriv->dio_control |= DIO_HW_Serial_Timebase; + devpriv->clock_and_fout |= Slow_Internal_Timebase | + DIO_Serial_Out_Divide_By_2; + /* Note: DIO_Serial_Out_Divide_By_2 only affects + 600ns/1.2us. If you turn divide_by_2 off with the + slow clock, you will still get 10us, except then + all your delays are wrong. */ + data[1] = SERIAL_10US; + devpriv->serial_interval_ns = data[1]; + } else { + devpriv->dio_control &= ~(DIO_HW_Serial_Enable | + DIO_Software_Serial_Control); + devpriv->serial_hw_mode = 0; + data[1] = (data[1] / 1000) * 1000; + devpriv->serial_interval_ns = data[1]; + } + + devpriv->stc_writew(dev, devpriv->dio_control, + DIO_Control_Register); + devpriv->stc_writew(dev, devpriv->clock_and_fout, + Clock_and_FOUT_Register); + return 0; + + break; + + case A4L_INSN_CONFIG_BIDIRECTIONAL_DATA: + + if (devpriv->serial_interval_ns == 0) { + return -EINVAL; + } + + byte_out = data[1] & 0xFF; + + if (devpriv->serial_hw_mode) { + err = ni_serial_hw_readwrite8(dev, byte_out, &byte_in); + } else if (devpriv->serial_interval_ns > 0) { + err = ni_serial_sw_readwrite8(dev, byte_out, &byte_in); + } else { + a4l_err(dev, + "ni_serial_insn_config: serial disabled!\n"); + return -EINVAL; + } + if (err < 0) + return err; + data[1] = byte_in & 0xFF; + return 0; + + break; + default: + return -EINVAL; + } + + return -EINVAL; +} + +void mio_common_detach(struct a4l_device * dev) +{ + if (dev->priv) { + if (devpriv->counter_dev) { + a4l_ni_gpct_device_destroy(devpriv->counter_dev); + } + } +} + +static void init_ao_67xx(struct a4l_device * dev) +{ + struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV); + int i; + + if (subd == NULL) { + a4l_err(dev, "%s: unable to find AO subdevice\n", __FUNCTION__); + return; + } + + for (i = 0; i < subd->chan_desc->length; i++) + ni_ao_win_outw(dev, AO_Channel(i) | 0x0, + AO_Configuration_2_67xx); +} + +static unsigned int ni_gpct_to_stc_register(enum ni_gpct_register reg) +{ + unsigned stc_register; + switch (reg) { + case NITIO_G0_Autoincrement_Reg: + stc_register = G_Autoincrement_Register(0); + break; + case NITIO_G1_Autoincrement_Reg: + stc_register = G_Autoincrement_Register(1); + break; + case NITIO_G0_Command_Reg: + stc_register = G_Command_Register(0); + break; + case NITIO_G1_Command_Reg: + stc_register = G_Command_Register(1); + break; + case NITIO_G0_HW_Save_Reg: + stc_register = G_HW_Save_Register(0); + break; + case NITIO_G1_HW_Save_Reg: + stc_register = G_HW_Save_Register(1); + break; + case NITIO_G0_SW_Save_Reg: + stc_register = G_Save_Register(0); + break; + case NITIO_G1_SW_Save_Reg: + stc_register = G_Save_Register(1); + break; + case NITIO_G0_Mode_Reg: + stc_register = G_Mode_Register(0); + break; + case NITIO_G1_Mode_Reg: + stc_register = G_Mode_Register(1); + break; + case NITIO_G0_LoadA_Reg: + stc_register = G_Load_A_Register(0); + break; + case NITIO_G1_LoadA_Reg: + stc_register = G_Load_A_Register(1); + break; + case NITIO_G0_LoadB_Reg: + stc_register = G_Load_B_Register(0); + break; + case NITIO_G1_LoadB_Reg: + stc_register = G_Load_B_Register(1); + break; + case NITIO_G0_Input_Select_Reg: + stc_register = G_Input_Select_Register(0); + break; + case NITIO_G1_Input_Select_Reg: + stc_register = G_Input_Select_Register(1); + break; + case NITIO_G01_Status_Reg: + stc_register = G_Status_Register; + break; + case NITIO_G01_Joint_Reset_Reg: + stc_register = Joint_Reset_Register; + break; + case NITIO_G01_Joint_Status1_Reg: + stc_register = Joint_Status_1_Register; + break; + case NITIO_G01_Joint_Status2_Reg: + stc_register = Joint_Status_2_Register; + break; + case NITIO_G0_Interrupt_Acknowledge_Reg: + stc_register = Interrupt_A_Ack_Register; + break; + case NITIO_G1_Interrupt_Acknowledge_Reg: + stc_register = Interrupt_B_Ack_Register; + break; + case NITIO_G0_Status_Reg: + stc_register = AI_Status_1_Register; + break; + case NITIO_G1_Status_Reg: + stc_register = AO_Status_1_Register; + break; + case NITIO_G0_Interrupt_Enable_Reg: + stc_register = Interrupt_A_Enable_Register; + break; + case NITIO_G1_Interrupt_Enable_Reg: + stc_register = Interrupt_B_Enable_Register; + break; + default: + __a4l_err("%s: unhandled register 0x%x in switch.\n", + __FUNCTION__, reg); + BUG(); + return 0; + break; + } + return stc_register; +} + +static void ni_gpct_write_register(struct ni_gpct *counter, + unsigned int bits, enum ni_gpct_register reg) +{ + struct a4l_device *dev = counter->counter_dev->dev; + unsigned stc_register; + /* bits in the join reset register which are relevant to counters */ + static const unsigned gpct_joint_reset_mask = G0_Reset | G1_Reset; + static const unsigned gpct_interrupt_a_enable_mask = + G0_Gate_Interrupt_Enable | G0_TC_Interrupt_Enable; + static const unsigned gpct_interrupt_b_enable_mask = + G1_Gate_Interrupt_Enable | G1_TC_Interrupt_Enable; + + switch (reg) { + /* m-series-only registers */ + case NITIO_G0_Counting_Mode_Reg: + ni_writew(bits, M_Offset_G0_Counting_Mode); + break; + case NITIO_G1_Counting_Mode_Reg: + ni_writew(bits, M_Offset_G1_Counting_Mode); + break; + case NITIO_G0_Second_Gate_Reg: + ni_writew(bits, M_Offset_G0_Second_Gate); + break; + case NITIO_G1_Second_Gate_Reg: + ni_writew(bits, M_Offset_G1_Second_Gate); + break; + case NITIO_G0_DMA_Config_Reg: + ni_writew(bits, M_Offset_G0_DMA_Config); + break; + case NITIO_G1_DMA_Config_Reg: + ni_writew(bits, M_Offset_G1_DMA_Config); + break; + case NITIO_G0_ABZ_Reg: + ni_writew(bits, M_Offset_G0_MSeries_ABZ); + break; + case NITIO_G1_ABZ_Reg: + ni_writew(bits, M_Offset_G1_MSeries_ABZ); + break; + + /* 32 bit registers */ + case NITIO_G0_LoadA_Reg: + case NITIO_G1_LoadA_Reg: + case NITIO_G0_LoadB_Reg: + case NITIO_G1_LoadB_Reg: + stc_register = ni_gpct_to_stc_register(reg); + devpriv->stc_writel(dev, bits, stc_register); + break; + + /* 16 bit registers */ + case NITIO_G0_Interrupt_Enable_Reg: + BUG_ON(bits & ~gpct_interrupt_a_enable_mask); + ni_set_bitfield(dev, Interrupt_A_Enable_Register, + gpct_interrupt_a_enable_mask, bits); + break; + case NITIO_G1_Interrupt_Enable_Reg: + BUG_ON(bits & ~gpct_interrupt_b_enable_mask); + ni_set_bitfield(dev, Interrupt_B_Enable_Register, + gpct_interrupt_b_enable_mask, bits); + break; + case NITIO_G01_Joint_Reset_Reg: + BUG_ON(bits & ~gpct_joint_reset_mask); + fallthrough; + default: + stc_register = ni_gpct_to_stc_register(reg); + devpriv->stc_writew(dev, bits, stc_register); + } +} + +static unsigned int ni_gpct_read_register(struct ni_gpct *counter, + enum ni_gpct_register reg) +{ + struct a4l_device *dev = counter->counter_dev->dev; + unsigned int stc_register; + switch (reg) { + /* m-series only registers */ + case NITIO_G0_DMA_Status_Reg: + return ni_readw(M_Offset_G0_DMA_Status); + break; + case NITIO_G1_DMA_Status_Reg: + return ni_readw(M_Offset_G1_DMA_Status); + break; + + /* 32 bit registers */ + case NITIO_G0_HW_Save_Reg: + case NITIO_G1_HW_Save_Reg: + case NITIO_G0_SW_Save_Reg: + case NITIO_G1_SW_Save_Reg: + stc_register = ni_gpct_to_stc_register(reg); + return devpriv->stc_readl(dev, stc_register); + break; + + /* 16 bit registers */ + default: + stc_register = ni_gpct_to_stc_register(reg); + return devpriv->stc_readw(dev, stc_register); + break; + } + return 0; +} + +int ni_freq_out_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + data[0] = FOUT_Divider(devpriv->clock_and_fout); + + return 0; +} + +int ni_freq_out_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + devpriv->clock_and_fout &= ~FOUT_Enable; + devpriv->stc_writew(dev, devpriv->clock_and_fout, + Clock_and_FOUT_Register); + devpriv->clock_and_fout &= ~FOUT_Divider_mask; + devpriv->clock_and_fout |= FOUT_Divider(data[0]); + devpriv->clock_and_fout |= FOUT_Enable; + devpriv->stc_writew(dev, devpriv->clock_and_fout, + Clock_and_FOUT_Register); + + return 0; +} + +static int ni_set_freq_out_clock(struct a4l_device * dev, lsampl_t clock_source) +{ + switch (clock_source) { + case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC: + devpriv->clock_and_fout &= ~FOUT_Timebase_Select; + break; + case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC: + devpriv->clock_and_fout |= FOUT_Timebase_Select; + break; + default: + return -EINVAL; + } + devpriv->stc_writew(dev, devpriv->clock_and_fout, + Clock_and_FOUT_Register); + + return 0; +} + +static void ni_get_freq_out_clock(struct a4l_device * dev, + unsigned int * clock_source, + unsigned int * clock_period_ns) +{ + if (devpriv->clock_and_fout & FOUT_Timebase_Select) { + *clock_source = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC; + *clock_period_ns = TIMEBASE_2_NS; + } else { + *clock_source = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC; + *clock_period_ns = TIMEBASE_1_NS * 2; + } +} + +int ni_freq_out_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int *)insn->data; + + switch (data[0]) { + case A4L_INSN_CONFIG_SET_CLOCK_SRC: + return ni_set_freq_out_clock(dev, data[1]); + break; + case A4L_INSN_CONFIG_GET_CLOCK_SRC: + ni_get_freq_out_clock(dev, &data[1], &data[2]); + return 0; + default: + break; + } + + return -EINVAL; +} + +static int ni_8255_callback(int dir, int port, int data, unsigned long arg) +{ + struct a4l_device *dev = (struct a4l_device *) arg; + + if (dir) { + ni_writeb(data, Port_A + 2 * port); + return 0; + } else { + return ni_readb(Port_A + 2 * port); + } +} + +/* + reads bytes out of eeprom +*/ + +static int ni_read_eeprom(struct a4l_device *dev, int addr) +{ + int bit; + int bitstring; + + bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff); + ni_writeb(0x04, Serial_Command); + for (bit = 0x8000; bit; bit >>= 1) { + ni_writeb(0x04 | ((bit & bitstring) ? 0x02 : 0), + Serial_Command); + ni_writeb(0x05 | ((bit & bitstring) ? 0x02 : 0), + Serial_Command); + } + bitstring = 0; + for (bit = 0x80; bit; bit >>= 1) { + ni_writeb(0x04, Serial_Command); + ni_writeb(0x05, Serial_Command); + bitstring |= ((ni_readb(XXX_Status) & PROMOUT) ? bit : 0); + } + ni_writeb(0x00, Serial_Command); + + return bitstring; +} + +/* + presents the EEPROM as a subdevice +*/ + +static int ni_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chan_desc)); + + return 0; +} + + +static int ni_m_series_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint8_t *data = (uint8_t *)insn->data; + + data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chan_desc)]; + + return 0; +} + +static int ni_get_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int*)insn->data; + + data[1] = devpriv->pwm_up_count * devpriv->clock_ns; + data[2] = devpriv->pwm_down_count * devpriv->clock_ns; + + return 0; +} + +static int ni_m_series_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int up_count, down_count; + unsigned int *data = (unsigned int*)insn->data; + + switch (data[0]) { + case A4L_INSN_CONFIG_PWM_OUTPUT: + switch (data[1]) { + case TRIG_ROUND_NEAREST: + up_count = + (data[2] + + devpriv->clock_ns / 2) / devpriv->clock_ns; + break; + case TRIG_ROUND_DOWN: + up_count = data[2] / devpriv->clock_ns; + break; + case TRIG_ROUND_UP: + up_count =(data[2] + devpriv->clock_ns - 1) / + devpriv->clock_ns; + break; + default: + return -EINVAL; + break; + } + switch (data[3]) { + case TRIG_ROUND_NEAREST: + down_count = (data[4] + devpriv->clock_ns / 2) / + devpriv->clock_ns; + break; + case TRIG_ROUND_DOWN: + down_count = data[4] / devpriv->clock_ns; + break; + case TRIG_ROUND_UP: + down_count = + (data[4] + devpriv->clock_ns - 1) / + devpriv->clock_ns; + break; + default: + return -EINVAL; + break; + } + if (up_count * devpriv->clock_ns != data[2] || + down_count * devpriv->clock_ns != data[4]) { + data[2] = up_count * devpriv->clock_ns; + data[4] = down_count * devpriv->clock_ns; + return -EAGAIN; + } + ni_writel(MSeries_Cal_PWM_High_Time_Bits(up_count) | + MSeries_Cal_PWM_Low_Time_Bits(down_count), + M_Offset_Cal_PWM); + devpriv->pwm_up_count = up_count; + devpriv->pwm_down_count = down_count; + return 0; + break; + case A4L_INSN_CONFIG_GET_PWM_OUTPUT: + return ni_get_pwm_config(subd, insn); + break; + default: + return -EINVAL; + break; + } + return 0; +} + +static int ni_6143_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int*)insn->data; + + unsigned up_count, down_count; + switch (data[0]) { + case A4L_INSN_CONFIG_PWM_OUTPUT: + switch (data[1]) { + case TRIG_ROUND_NEAREST: + up_count = + (data[2] + devpriv->clock_ns / 2) / + devpriv->clock_ns; + break; + case TRIG_ROUND_DOWN: + up_count = data[2] / devpriv->clock_ns; + break; + case TRIG_ROUND_UP: + up_count = (data[2] + devpriv->clock_ns - 1) / + devpriv->clock_ns; + break; + default: + return -EINVAL; + break; + } + switch (data[3]) { + case TRIG_ROUND_NEAREST: + down_count = (data[4] + devpriv->clock_ns / 2) / + devpriv->clock_ns; + break; + case TRIG_ROUND_DOWN: + down_count = data[4] / devpriv->clock_ns; + break; + case TRIG_ROUND_UP: + down_count = (data[4] + devpriv->clock_ns - 1) / + devpriv->clock_ns; + break; + default: + return -EINVAL; + break; + } + if (up_count * devpriv->clock_ns != data[2] || + down_count * devpriv->clock_ns != data[4]) { + data[2] = up_count * devpriv->clock_ns; + data[4] = down_count * devpriv->clock_ns; + return -EAGAIN; + } + ni_writel(up_count, Calibration_HighTime_6143); + devpriv->pwm_up_count = up_count; + ni_writel(down_count, Calibration_LowTime_6143); + devpriv->pwm_down_count = down_count; + return 0; + break; + case A4L_INSN_CONFIG_GET_PWM_OUTPUT: + return ni_get_pwm_config(subd, insn); + default: + return -EINVAL; + break; + } + return 0; +} + +static int pack_mb88341(int addr, int val, int *bitstring) +{ + /* + Fujitsu MB 88341 + Note that address bits are reversed. Thanks to + Ingo Keen for noticing this. + + Note also that the 88341 expects address values from + 1-12, whereas we use channel numbers 0-11. The NI + docs use 1-12, also, so be careful here. + */ + addr++; + *bitstring = ((addr & 0x1) << 11) | + ((addr & 0x2) << 9) | + ((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff); + return 12; +} + +static int pack_dac8800(int addr, int val, int *bitstring) +{ + *bitstring = ((addr & 0x7) << 8) | (val & 0xff); + return 11; +} + +static int pack_dac8043(int addr, int val, int *bitstring) +{ + *bitstring = val & 0xfff; + return 12; +} + +static int pack_ad8522(int addr, int val, int *bitstring) +{ + *bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000); + return 16; +} + +static int pack_ad8804(int addr, int val, int *bitstring) +{ + *bitstring = ((addr & 0xf) << 8) | (val & 0xff); + return 12; +} + +static int pack_ad8842(int addr, int val, int *bitstring) +{ + *bitstring = ((addr + 1) << 8) | (val & 0xff); + return 12; +} + +struct caldac_struct { + int n_chans; + int n_bits; + int (*packbits) (int, int, int *); +}; + +static struct caldac_struct caldacs[] = { + [mb88341] = {12, 8, pack_mb88341}, + [dac8800] = {8, 8, pack_dac8800}, + [dac8043] = {1, 12, pack_dac8043}, + [ad8522] = {2, 12, pack_ad8522}, + [ad8804] = {12, 8, pack_ad8804}, + [ad8842] = {8, 8, pack_ad8842}, + [ad8804_debug] = {16, 8, pack_ad8804}, +}; + +static void ni_write_caldac(struct a4l_device * dev, int addr, int val) +{ + unsigned int loadbit = 0, bits = 0, bit, bitstring = 0; + int i; + int type; + + if (devpriv->caldacs[addr] == val) + return; + devpriv->caldacs[addr] = val; + + for (i = 0; i < 3; i++) { + type = boardtype.caldac[i]; + if (type == caldac_none) + break; + if (addr < caldacs[type].n_chans) { + bits = caldacs[type].packbits(addr, val, &bitstring); + loadbit = SerDacLd(i); + break; + } + addr -= caldacs[type].n_chans; + } + + for (bit = 1 << (bits - 1); bit; bit >>= 1) { + ni_writeb(((bit & bitstring) ? 0x02 : 0), Serial_Command); + a4l_udelay(1); + ni_writeb(1 | ((bit & bitstring) ? 0x02 : 0), Serial_Command); + a4l_udelay(1); + } + ni_writeb(loadbit, Serial_Command); + a4l_udelay(1); + ni_writeb(0, Serial_Command); +} + +static void caldac_setup(struct a4l_device *dev, struct a4l_subdevice *subd) +{ + int i, j; + int n_dacs; + int n_chans = 0; + int n_bits; + int diffbits = 0; + int type; + int chan; + + type = boardtype.caldac[0]; + if (type == caldac_none) + return; + n_bits = caldacs[type].n_bits; + for (i = 0; i < 3; i++) { + type = boardtype.caldac[i]; + if (type == caldac_none) + break; + if (caldacs[type].n_bits != n_bits) + diffbits = 1; + n_chans += caldacs[type].n_chans; + } + n_dacs = i; + + if (diffbits) { + + if (n_chans > MAX_N_CALDACS) { + a4l_err(dev, "BUG! MAX_N_CALDACS too small\n"); + } + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + n_chans * sizeof(struct a4l_channel), GFP_KERNEL); + + memset(subd->chan_desc, + 0, + sizeof(struct a4l_channels_desc) + n_chans * sizeof(struct a4l_channel)); + + subd->chan_desc->length = n_chans; + subd->chan_desc->mode = A4L_CHAN_PERCHAN_CHANDESC; + + chan = 0; + for (i = 0; i < n_dacs; i++) { + type = boardtype.caldac[i]; + for (j = 0; j < caldacs[type].n_chans; j++) { + + subd->chan_desc->chans[chan].nb_bits = + caldacs[type].n_bits; + + chan++; + } + } + + for (chan = 0; chan < n_chans; chan++) { + unsigned long tmp = + (1 << subd->chan_desc->chans[chan].nb_bits) / 2; + ni_write_caldac(dev, chan, tmp); + } + } else { + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + + memset(subd->chan_desc, + 0, sizeof(struct a4l_channels_desc) + sizeof(struct a4l_channel)); + + subd->chan_desc->length = n_chans; + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + + type = boardtype.caldac[0]; + + subd->chan_desc->chans[0].nb_bits = caldacs[type].n_bits; + + for (chan = 0; chan < n_chans; chan++) + ni_write_caldac(dev, + chan, + (1 << subd->chan_desc->chans[0].nb_bits) / 2); + } +} + +static int ni_calib_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint16_t *data = (uint16_t *)insn->data; + + ni_write_caldac(dev, CR_CHAN(insn->chan_desc), data[0]); + return 0; +} + +static int ni_calib_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint16_t *data = (uint16_t *)insn->data; + + data[0] = devpriv->caldacs[CR_CHAN(insn->chan_desc)]; + + return 0; +} + +static int ni_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct ni_gpct *counter = (struct ni_gpct *)subd->priv; + return a4l_ni_tio_insn_config(counter, insn); +} + +static int ni_gpct_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct ni_gpct *counter = (struct ni_gpct *)subd->priv; + return a4l_ni_tio_rinsn(counter, insn); +} + +static int ni_gpct_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct ni_gpct *counter = (struct ni_gpct *)subd->priv; + return a4l_ni_tio_winsn(counter, insn); +} + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +static int ni_gpct_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + int retval; + struct a4l_device *dev = subd->dev; + struct ni_gpct *counter = (struct ni_gpct *)subd->priv; + struct mite_dma_descriptor_ring *ring; + + retval = ni_request_gpct_mite_channel(dev, + counter->counter_index, + A4L_INPUT); + if (retval) { + a4l_err(dev, + "ni_gpct_cmd: " + "no dma channel available for use by counter\n"); + return retval; + } + + ring = devpriv->gpct_mite_ring[counter->counter_index]; + retval = a4l_mite_buf_change(ring, subd); + if (retval) { + a4l_err(dev, + "ni_gpct_cmd: " + "dma ring configuration failed\n"); + return retval; + + } + + a4l_ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); + ni_e_series_enable_second_irq(dev, counter->counter_index, 1); + retval = a4l_ni_tio_cmd(counter, cmd); + + return retval; +} + +static int ni_gpct_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct ni_gpct *counter = (struct ni_gpct *)subd->priv; + return a4l_ni_tio_cmdtest(counter, cmd); +} + +static void ni_gpct_cancel(struct a4l_subdevice *subd) +{ + struct a4l_device *dev = subd->dev; + struct ni_gpct *counter = (struct ni_gpct *)subd->priv; + + a4l_ni_tio_cancel(counter); + ni_e_series_enable_second_irq(dev, counter->counter_index, 0); + ni_release_gpct_mite_channel(dev, counter->counter_index); +} + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + +/* + * + * Programmable Function Inputs + * + */ + +static int ni_m_series_set_pfi_routing(struct a4l_device *dev, + unsigned int chan, unsigned int source) +{ + unsigned int pfi_reg_index; + unsigned int array_offset; + + if ((source & 0x1f) != source) + return -EINVAL; + pfi_reg_index = 1 + chan / 3; + array_offset = pfi_reg_index - 1; + devpriv->pfi_output_select_reg[array_offset] &= + ~MSeries_PFI_Output_Select_Mask(chan); + devpriv->pfi_output_select_reg[array_offset] |= + MSeries_PFI_Output_Select_Bits(chan, source); + ni_writew(devpriv->pfi_output_select_reg[array_offset], + M_Offset_PFI_Output_Select(pfi_reg_index)); + return 2; +} + +static unsigned int ni_old_get_pfi_routing(struct a4l_device *dev, + unsigned int chan) +{ + /* pre-m-series boards have fixed signals on pfi pins */ + + switch (chan) { + case 0: + return NI_PFI_OUTPUT_AI_START1; + break; + case 1: + return NI_PFI_OUTPUT_AI_START2; + break; + case 2: + return NI_PFI_OUTPUT_AI_CONVERT; + break; + case 3: + return NI_PFI_OUTPUT_G_SRC1; + break; + case 4: + return NI_PFI_OUTPUT_G_GATE1; + break; + case 5: + return NI_PFI_OUTPUT_AO_UPDATE_N; + break; + case 6: + return NI_PFI_OUTPUT_AO_START1; + break; + case 7: + return NI_PFI_OUTPUT_AI_START_PULSE; + break; + case 8: + return NI_PFI_OUTPUT_G_SRC0; + break; + case 9: + return NI_PFI_OUTPUT_G_GATE0; + break; + default: + __a4l_err("%s: bug, unhandled case in switch.\n", + __FUNCTION__); + break; + } + return 0; +} + +static int ni_old_set_pfi_routing(struct a4l_device *dev, + unsigned int chan, unsigned int source) +{ + /* pre-m-series boards have fixed signals on pfi pins */ + if (source != ni_old_get_pfi_routing(dev, chan)) + return -EINVAL; + + return 2; +} + +static int ni_set_pfi_routing(struct a4l_device *dev, + unsigned int chan, unsigned int source) +{ + if (boardtype.reg_type & ni_reg_m_series_mask) + return ni_m_series_set_pfi_routing(dev, chan, source); + else + return ni_old_set_pfi_routing(dev, chan, source); +} + +static unsigned int ni_m_series_get_pfi_routing(struct a4l_device *dev, + unsigned int chan) +{ + const unsigned int array_offset = chan / 3; + return MSeries_PFI_Output_Select_Source(chan, + devpriv->pfi_output_select_reg[array_offset]); +} + +static unsigned int ni_get_pfi_routing(struct a4l_device *dev, unsigned int chan) +{ + if (boardtype.reg_type & ni_reg_m_series_mask) + return ni_m_series_get_pfi_routing(dev, chan); + else + return ni_old_get_pfi_routing(dev, chan); +} + +static int ni_config_filter(struct a4l_device *dev, + unsigned int pfi_channel, int filter) +{ + unsigned int bits; + if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) { + return -ENOTSUPP; + } + bits = ni_readl(M_Offset_PFI_Filter); + bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel); + bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter); + ni_writel(bits, M_Offset_PFI_Filter); + return 0; +} + +static int ni_pfi_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint16_t *data = (uint16_t *)insn->data; + + if (data[0]) { + devpriv->pfi_state &= ~data[0]; + devpriv->pfi_state |= (data[0] & data[1]); + ni_writew(devpriv->pfi_state, M_Offset_PFI_DO); + } + + data[1] = ni_readw(M_Offset_PFI_DI); + + return 0; +} + +static int ni_pfi_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int chan, *data = (unsigned int *)insn->data; + + if (insn->data_size < sizeof(unsigned int)) + return -EINVAL; + + chan = CR_CHAN(insn->chan_desc); + + switch (data[0]) { + case A4L_OUTPUT: + ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 1); + break; + case A4L_INPUT: + ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 0); + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = (devpriv->io_bidirection_pin_reg & (1 << chan)) ? + A4L_OUTPUT : A4L_INPUT; + return 0; + break; + case A4L_INSN_CONFIG_SET_ROUTING: + return ni_set_pfi_routing(dev, chan, data[1]); + break; + case A4L_INSN_CONFIG_GET_ROUTING: + data[1] = ni_get_pfi_routing(dev, chan); + break; + case A4L_INSN_CONFIG_FILTER: + return ni_config_filter(dev, chan, data[1]); + break; + default: + return -EINVAL; + } + return 0; +} + +/* + * + * RTSI Bus Functions + * + */ + +/* Find best multiplier/divider to try and get the PLL running at 80 MHz + * given an arbitrary frequency input clock */ +static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns, + unsigned int *freq_divider, + unsigned int *freq_multiplier, + unsigned int *actual_period_ns) +{ + unsigned div; + unsigned best_div = 1; + static const unsigned max_div = 0x10; + unsigned mult; + unsigned best_mult = 1; + static const unsigned max_mult = 0x100; + static const unsigned pico_per_nano = 1000; + + const unsigned reference_picosec = reference_period_ns * pico_per_nano; + /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to + * 20 MHz for most timing clocks */ + static const unsigned target_picosec = 12500; + static const unsigned fudge_factor_80_to_20Mhz = 4; + int best_period_picosec = 0; + for (div = 1; div <= max_div; ++div) { + for (mult = 1; mult <= max_mult; ++mult) { + unsigned new_period_ps = + (reference_picosec * div) / mult; + if (abs(new_period_ps - target_picosec) < + abs(best_period_picosec - target_picosec)) { + best_period_picosec = new_period_ps; + best_div = div; + best_mult = mult; + } + } + } + if (best_period_picosec == 0) { + __a4l_err("%s: bug, failed to find pll parameters\n", + __FUNCTION__); + return -EIO; + } + *freq_divider = best_div; + *freq_multiplier = best_mult; + *actual_period_ns = + (best_period_picosec * fudge_factor_80_to_20Mhz + + (pico_per_nano / 2)) / pico_per_nano; + return 0; +} + +static int ni_mseries_set_pll_master_clock(struct a4l_device * dev, + unsigned int source, + unsigned int period_ns) +{ + static const unsigned min_period_ns = 50; + static const unsigned max_period_ns = 1000; + static const unsigned timeout = 1000; + unsigned pll_control_bits; + unsigned freq_divider; + unsigned freq_multiplier; + unsigned i; + int retval; + if (source == NI_MIO_PLL_PXI10_CLOCK) + period_ns = 100; + /* These limits are somewhat arbitrary, but NI advertises 1 to + 20MHz range so we'll use that */ + if (period_ns < min_period_ns || period_ns > max_period_ns) { + a4l_err(dev, + "%s: you must specify an input clock frequency " + "between %i and %i nanosec " + "for the phased-lock loop.\n", + __FUNCTION__, min_period_ns, max_period_ns); + return -EINVAL; + } + devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit; + devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg, + RTSI_Trig_Direction_Register); + pll_control_bits = + MSeries_PLL_Enable_Bit | MSeries_PLL_VCO_Mode_75_150MHz_Bits; + devpriv->clock_and_fout2 |= + MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit; + devpriv->clock_and_fout2 &= ~MSeries_PLL_In_Source_Select_Mask; + switch (source) { + case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK: + devpriv->clock_and_fout2 |= + MSeries_PLL_In_Source_Select_Star_Trigger_Bits; + retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider, + &freq_multiplier, &devpriv->clock_ns); + if (retval < 0) + return retval; + break; + case NI_MIO_PLL_PXI10_CLOCK: + /* pxi clock is 10MHz */ + devpriv->clock_and_fout2 |= + MSeries_PLL_In_Source_Select_PXI_Clock10; + retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider, + &freq_multiplier, &devpriv->clock_ns); + if (retval < 0) + return retval; + break; + default: + { + unsigned rtsi_channel; + static const unsigned max_rtsi_channel = 7; + for (rtsi_channel = 0; rtsi_channel <= max_rtsi_channel; + ++rtsi_channel) { + if (source == + NI_MIO_PLL_RTSI_CLOCK(rtsi_channel)) { + devpriv->clock_and_fout2 |= + MSeries_PLL_In_Source_Select_RTSI_Bits + (rtsi_channel); + break; + } + } + if (rtsi_channel > max_rtsi_channel) + return -EINVAL; + retval = ni_mseries_get_pll_parameters(period_ns, + &freq_divider, &freq_multiplier, + &devpriv->clock_ns); + if (retval < 0) + return retval; + } + break; + } + ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2); + pll_control_bits |= + MSeries_PLL_Divisor_Bits(freq_divider) | + MSeries_PLL_Multiplier_Bits(freq_multiplier); + ni_writew(pll_control_bits, M_Offset_PLL_Control); + devpriv->clock_source = source; + /* It seems to typically take a few hundred microseconds for PLL to lock */ + for (i = 0; i < timeout; ++i) { + if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) { + break; + } + udelay(1); + } + if (i == timeout) { + a4l_err(dev, + "%s: timed out waiting for PLL to lock " + "to reference clock source %i with period %i ns.\n", + __FUNCTION__, source, period_ns); + return -ETIMEDOUT; + } + return 3; +} + +static int ni_set_master_clock(struct a4l_device *dev, + unsigned int source, unsigned int period_ns) +{ + if (source == NI_MIO_INTERNAL_CLOCK) { + devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit; + devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg, + RTSI_Trig_Direction_Register); + devpriv->clock_ns = TIMEBASE_1_NS; + if (boardtype.reg_type & ni_reg_m_series_mask) { + devpriv->clock_and_fout2 &= + ~(MSeries_Timebase1_Select_Bit | + MSeries_Timebase3_Select_Bit); + ni_writew(devpriv->clock_and_fout2, + M_Offset_Clock_and_Fout2); + ni_writew(0, M_Offset_PLL_Control); + } + devpriv->clock_source = source; + } else { + if (boardtype.reg_type & ni_reg_m_series_mask) { + return ni_mseries_set_pll_master_clock(dev, source, + period_ns); + } else { + if (source == NI_MIO_RTSI_CLOCK) { + devpriv->rtsi_trig_direction_reg |= + Use_RTSI_Clock_Bit; + devpriv->stc_writew(dev, + devpriv->rtsi_trig_direction_reg, + RTSI_Trig_Direction_Register); + if (devpriv->clock_ns == 0) { + a4l_err(dev, + "%s: we don't handle an " + "unspecified clock period " + "correctly yet, returning error.\n", + __FUNCTION__); + return -EINVAL; + } else { + devpriv->clock_ns = period_ns; + } + devpriv->clock_source = source; + } else + return -EINVAL; + } + } + return 3; +} + +static void ni_rtsi_init(struct a4l_device * dev) +{ + /* Initialise the RTSI bus signal switch to a default state */ + + /* Set clock mode to internal */ + devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit; + if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) { + a4l_err(dev, "ni_set_master_clock failed, bug?"); + } + + /* Default internal lines routing to RTSI bus lines */ + devpriv->rtsi_trig_a_output_reg = + RTSI_Trig_Output_Bits(0, NI_RTSI_OUTPUT_ADR_START1) | + RTSI_Trig_Output_Bits(1, NI_RTSI_OUTPUT_ADR_START2) | + RTSI_Trig_Output_Bits(2, NI_RTSI_OUTPUT_SCLKG) | + RTSI_Trig_Output_Bits(3, NI_RTSI_OUTPUT_DACUPDN); + devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg, + RTSI_Trig_A_Output_Register); + devpriv->rtsi_trig_b_output_reg = + RTSI_Trig_Output_Bits(4, NI_RTSI_OUTPUT_DA_START1) | + RTSI_Trig_Output_Bits(5, NI_RTSI_OUTPUT_G_SRC0) | + RTSI_Trig_Output_Bits(6, NI_RTSI_OUTPUT_G_GATE0); + + if (boardtype.reg_type & ni_reg_m_series_mask) + devpriv->rtsi_trig_b_output_reg |= + RTSI_Trig_Output_Bits(7, NI_RTSI_OUTPUT_RTSI_OSC); + devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg, + RTSI_Trig_B_Output_Register); +} + +int a4l_ni_E_init(struct a4l_device *dev) +{ + int ret; + unsigned int j, counter_variant; + struct a4l_subdevice *subd; + + if (boardtype.n_aochan > MAX_N_AO_CHAN) { + a4l_err(dev, "bug! boardtype.n_aochan > MAX_N_AO_CHAN\n"); + return -EINVAL; + } + + /* analog input subdevice */ + + a4l_dbg(1, drv_dbg, dev, "mio_common: starting attach procedure...\n"); + + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, "mio_common: registering AI subdevice...\n"); + + if (boardtype.n_adchan) { + + a4l_dbg(1, drv_dbg, dev, + "mio_common: AI: %d channels\n", boardtype.n_adchan); + + subd->flags = A4L_SUBD_AI | A4L_SUBD_CMD | A4L_SUBD_MMAP; + subd->rng_desc = ni_range_lkup[boardtype.gainlkup]; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = boardtype.n_adchan; + subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_DIFF; + if (boardtype.reg_type != ni_reg_611x) + subd->chan_desc->chans[0].flags |= A4L_CHAN_AREF_GROUND | + A4L_CHAN_AREF_COMMON | A4L_CHAN_AREF_OTHER; + subd->chan_desc->chans[0].nb_bits = boardtype.adbits; + + subd->insn_read = ni_ai_insn_read; + subd->insn_config = ni_ai_insn_config; + subd->do_cmdtest = ni_ai_cmdtest; + subd->do_cmd = ni_ai_cmd; + subd->cancel = ni_ai_reset; + subd->trigger = ni_ai_inttrig; + + subd->munge = (boardtype.adbits > 16) ? + ni_ai_munge32 : ni_ai_munge16; + + subd->cmd_mask = &mio_ai_cmd_mask; + } else { + a4l_dbg(1, drv_dbg, dev, + "mio_common: AI subdevice not present\n"); + subd->flags = A4L_SUBD_UNUSED; + } + + ret = a4l_add_subd(dev, subd); + if(ret != NI_AI_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: AI subdevice registered\n"); + + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, "mio_common: registering AO subdevice...\n"); + + /* analog output subdevice */ + if (boardtype.n_aochan) { + + a4l_dbg(1, drv_dbg, dev, + "mio_common: AO: %d channels\n", boardtype.n_aochan); + + subd->flags = A4L_SUBD_AO; + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = boardtype.n_aochan; + subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND; + subd->chan_desc->chans[0].nb_bits = boardtype.aobits; + + subd->rng_desc = boardtype.ao_range_table; + + subd->insn_read = ni_ao_insn_read; + if (boardtype.reg_type & ni_reg_6xxx_mask) + subd->insn_write = &ni_ao_insn_write_671x; + else + subd->insn_write = &ni_ao_insn_write; + + + if (boardtype.ao_fifo_depth) { + subd->flags |= A4L_SUBD_CMD | A4L_SUBD_MMAP; + subd->do_cmd = &ni_ao_cmd; + subd->cmd_mask = &mio_ao_cmd_mask; + subd->do_cmdtest = &ni_ao_cmdtest; + subd->trigger = ni_ao_inttrig; + if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) + subd->munge = &ni_ao_munge; + } + + subd->cancel = &ni_ao_reset; + + } else { + a4l_dbg(1, drv_dbg, dev, + "mio_common: AO subdevice not present\n"); + subd->flags = A4L_SUBD_UNUSED; + } + + ret = a4l_add_subd(dev, subd); + if(ret != NI_AO_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: AO subdevice registered\n"); + + if ((boardtype.reg_type & ni_reg_67xx_mask)) + init_ao_67xx(dev); + + /* digital i/o subdevice */ + + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, "mio_common: registering DIO subdevice...\n"); + a4l_dbg(1, drv_dbg, dev, + "mio_common: DIO: %d channels\n", + boardtype.num_p0_dio_channels); + + subd->flags = A4L_SUBD_DIO; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = boardtype.num_p0_dio_channels; + subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND; + subd->chan_desc->chans[0].nb_bits = 1; + devpriv->io_bits = 0; /* all bits input */ + + subd->rng_desc = &range_digital; + + if (boardtype.reg_type & ni_reg_m_series_mask) { + + if (subd->chan_desc->length == 8) + subd->insn_bits = ni_m_series_dio_insn_bits_8; + else + subd->insn_bits = ni_m_series_dio_insn_bits_32; + + subd->insn_config = ni_m_series_dio_insn_config; + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + + a4l_dbg(1, drv_dbg, dev, + "mio_common: DIO: command feature available\n"); + + subd->flags |= A4L_SUBD_CMD; + subd->do_cmd = ni_cdio_cmd; + subd->do_cmdtest = ni_cdio_cmdtest; + subd->cmd_mask = &mio_dio_cmd_mask; + subd->cancel = ni_cdio_cancel; + subd->trigger = ni_cdo_inttrig; + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + ni_writel(CDO_Reset_Bit | CDI_Reset_Bit, M_Offset_CDIO_Command); + ni_writel(devpriv->io_bits, M_Offset_DIO_Direction); + } else { + + subd->insn_bits = ni_dio_insn_bits; + subd->insn_config = ni_dio_insn_config; + devpriv->dio_control = DIO_Pins_Dir(devpriv->io_bits); + ni_writew(devpriv->dio_control, DIO_Control_Register); + } + + ret = a4l_add_subd(dev, subd); + if(ret != NI_DIO_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: DIO subdevice registered\n"); + + /* 8255 device */ + subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, "mio_common: registering 8255 subdevice...\n"); + + if (boardtype.has_8255) { + devpriv->subd_8255.cb_arg = (unsigned long)dev; + devpriv->subd_8255.cb_func = ni_8255_callback; + a4l_subdev_8255_init(subd); + } else { + a4l_dbg(1, drv_dbg, dev, + "mio_common: 8255 subdevice not present\n"); + subd->flags = A4L_SUBD_UNUSED; + } + + ret = a4l_add_subd(dev, subd); + if(ret != NI_8255_DIO_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: 8255 subdevice registered\n"); + + /* formerly general purpose counter/timer device, but no longer used */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + subd->flags = A4L_SUBD_UNUSED; + ret = a4l_add_subd(dev, subd); + if(ret != NI_UNUSED_SUBDEV) + return ret; + + /* calibration subdevice -- ai and ao */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, "mio_common: registering calib subdevice...\n"); + + subd->flags = A4L_SUBD_CALIB; + if (boardtype.reg_type & ni_reg_m_series_mask) { + /* internal PWM analog output + used for AI nonlinearity calibration */ + a4l_dbg(1, drv_dbg, dev, + "mio_common: calib: M series calibration"); + subd->insn_config = ni_m_series_pwm_config; + ni_writel(0x0, M_Offset_Cal_PWM); + } else if (boardtype.reg_type == ni_reg_6143) { + /* internal PWM analog output + used for AI nonlinearity calibration */ + a4l_dbg(1, drv_dbg, dev, + "mio_common: calib: 6143 calibration"); + subd->insn_config = ni_6143_pwm_config; + } else { + a4l_dbg(1, drv_dbg, dev, + "mio_common: calib: common calibration"); + subd->insn_read = ni_calib_insn_read; + subd->insn_write = ni_calib_insn_write; + caldac_setup(dev, subd); + } + + ret = a4l_add_subd(dev, subd); + if(ret != NI_CALIBRATION_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: calib subdevice registered\n"); + + /* EEPROM */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: registering EEPROM subdevice...\n"); + + subd->flags = A4L_SUBD_MEMORY; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->chans[0].flags = 0; + subd->chan_desc->chans[0].nb_bits = 8; + + if (boardtype.reg_type & ni_reg_m_series_mask) { + subd->chan_desc->length = M_SERIES_EEPROM_SIZE; + subd->insn_read = ni_m_series_eeprom_insn_read; + } else { + subd->chan_desc->length = 512; + subd->insn_read = ni_eeprom_insn_read; + } + + a4l_dbg(1, drv_dbg, dev, + "mio_common: EEPROM: size = %lu\n", subd->chan_desc->length); + + ret = a4l_add_subd(dev, subd); + if(ret != NI_EEPROM_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: EEPROM subdevice registered\n"); + + /* PFI */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: registering PFI(DIO) subdevice...\n"); + + subd->flags = A4L_SUBD_DIO; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->chans[0].flags = 0; + subd->chan_desc->chans[0].nb_bits = 1; + + if (boardtype.reg_type & ni_reg_m_series_mask) { + unsigned int i; + subd->chan_desc->length = 16; + ni_writew(devpriv->dio_state, M_Offset_PFI_DO); + for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) { + ni_writew(devpriv->pfi_output_select_reg[i], + M_Offset_PFI_Output_Select(i + 1)); + } + } else + subd->chan_desc->length = 10; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: PFI: %lu bits...\n", subd->chan_desc->length); + + if (boardtype.reg_type & ni_reg_m_series_mask) { + subd->insn_bits = ni_pfi_insn_bits; + } + + subd->insn_config = ni_pfi_insn_config; + ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0); + + ret = a4l_add_subd(dev, subd); + if(ret != NI_PFI_DIO_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: PFI subdevice registered\n"); + + /* cs5529 calibration adc */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + +#if 0 /* TODO: add subdevices callbacks */ + subd->flags = A4L_SUBD_AI; + + if (boardtype.reg_type & ni_reg_67xx_mask) { + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = boardtype.n_aochan; + subd->chan_desc->chans[0].flags = 0; + subd->chan_desc->chans[0].nb_bits = 16; + + /* one channel for each analog output channel */ + subd->rng_desc = &a4l_range_unknown; /* XXX */ + s->insn_read = cs5529_ai_insn_read; + init_cs5529(dev); + } else +#endif /* TODO: add subdevices callbacks */ + subd->flags = A4L_SUBD_UNUSED; + + ret = a4l_add_subd(dev, subd); + if(ret != NI_CS5529_CALIBRATION_SUBDEV) + return ret; + + /* Serial */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: registering serial subdevice...\n"); + + subd->flags = A4L_SUBD_SERIAL; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = 1; + subd->chan_desc->chans[0].flags = 0; + subd->chan_desc->chans[0].nb_bits = 8; + + subd->insn_config = ni_serial_insn_config; + + devpriv->serial_interval_ns = 0; + devpriv->serial_hw_mode = 0; + + ret = a4l_add_subd(dev, subd); + if(ret != NI_SERIAL_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, "mio_common: serial subdevice registered\n"); + + /* RTSI */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + +#if 1 /* TODO: add RTSI subdevice */ + subd->flags = A4L_SUBD_UNUSED; + ni_rtsi_init(dev); + +#else /* TODO: add RTSI subdevice */ + subd->flags = A4L_SUBD_DIO; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = 8; + subd->chan_desc->chans[0].flags = 0; + subd->chan_desc->chans[0].nb_bits = 1; + + subd->insn_bits = ni_rtsi_insn_bits; + subd->insn_config = ni_rtsi_insn_config; + ni_rtsi_init(dev); + +#endif /* TODO: add RTSI subdevice */ + + ret = a4l_add_subd(dev, subd); + if(ret != NI_RTSI_SUBDEV) + return ret; + + if (boardtype.reg_type & ni_reg_m_series_mask) { + counter_variant = ni_gpct_variant_m_series; + } else { + counter_variant = ni_gpct_variant_e_series; + } + devpriv->counter_dev = + a4l_ni_gpct_device_construct(dev, + &ni_gpct_write_register, + &ni_gpct_read_register, + counter_variant, NUM_GPCT); + + /* General purpose counters */ + for (j = 0; j < NUM_GPCT; ++j) { + struct ni_gpct *counter; + + subd = a4l_alloc_subd(sizeof(struct ni_gpct), NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: registering GPCT[%d] subdevice...\n", j); + + subd->flags = A4L_SUBD_COUNTER; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = 3; + subd->chan_desc->chans[0].flags = 0; + + if (boardtype.reg_type & ni_reg_m_series_mask) + subd->chan_desc->chans[0].nb_bits = 32; + else + subd->chan_desc->chans[0].nb_bits = 24; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: GPCT[%d]: %lu bits\n", + j, subd->chan_desc->chans[0].nb_bits); + + subd->insn_read = ni_gpct_insn_read; + subd->insn_write = ni_gpct_insn_write; + subd->insn_config = ni_gpct_insn_config; + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + + a4l_dbg(1, drv_dbg, dev, + "mio_common: GPCT[%d]: command feature available\n", j); + subd->flags |= A4L_SUBD_CMD; + subd->cmd_mask = &a4l_ni_tio_cmd_mask; + subd->do_cmd = ni_gpct_cmd; + subd->do_cmdtest = ni_gpct_cmdtest; + subd->cancel = ni_gpct_cancel; +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + + counter = (struct ni_gpct *)subd->priv; + rtdm_lock_init(&counter->lock); + counter->chip_index = 0; + counter->counter_index = j; + counter->counter_dev = devpriv->counter_dev; + devpriv->counter_dev->counters[j] = counter; + + a4l_ni_tio_init_counter(counter); + + ret = a4l_add_subd(dev, subd); + if(ret != NI_GPCT_SUBDEV(j)) + return ret; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: GCPT[%d] subdevice registered\n", j); + } + + /* Frequency output */ + subd = a4l_alloc_subd(0, NULL); + if(subd == NULL) + return -ENOMEM; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: registering counter subdevice...\n"); + + subd->flags = A4L_SUBD_COUNTER; + + subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) + + sizeof(struct a4l_channel), GFP_KERNEL); + subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC; + subd->chan_desc->length = 1; + subd->chan_desc->chans[0].flags = 0; + subd->chan_desc->chans[0].nb_bits = 4; + + subd->insn_read = ni_freq_out_insn_read; + subd->insn_write = ni_freq_out_insn_write; + subd->insn_config = ni_freq_out_insn_config; + + ret = a4l_add_subd(dev, subd); + if(ret != NI_FREQ_OUT_SUBDEV) + return ret; + + a4l_dbg(1, drv_dbg, dev, + "mio_common: counter subdevice registered\n"); + + a4l_dbg(1, drv_dbg, dev, "mio_common: initializing AI...\n"); + + /* ai configuration */ + ni_ai_reset(a4l_get_subd(dev, NI_AI_SUBDEV)); + if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) { + // BEAM is this needed for PCI-6143 ?? + devpriv->clock_and_fout = + Slow_Internal_Time_Divide_By_2 | + Slow_Internal_Timebase | + Clock_To_Board_Divide_By_2 | + Clock_To_Board | + AI_Output_Divide_By_2 | AO_Output_Divide_By_2; + } else { + devpriv->clock_and_fout = + Slow_Internal_Time_Divide_By_2 | + Slow_Internal_Timebase | + Clock_To_Board_Divide_By_2 | Clock_To_Board; + } + devpriv->stc_writew(dev, devpriv->clock_and_fout, + Clock_and_FOUT_Register); + + a4l_dbg(1, drv_dbg, dev, "mio_common: AI initialization OK\n"); + + a4l_dbg(1, drv_dbg, dev, "mio_common: initializing A0...\n"); + + /* analog output configuration */ + ni_ao_reset(a4l_get_subd(dev, NI_AO_SUBDEV)); + + if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) { + devpriv->stc_writew(dev, + (devpriv->irq_polarity ? Interrupt_Output_Polarity : 0) | + (Interrupt_Output_On_3_Pins & 0) | Interrupt_A_Enable | + Interrupt_B_Enable | + Interrupt_A_Output_Select(devpriv->irq_pin) | + Interrupt_B_Output_Select(devpriv->irq_pin), + Interrupt_Control_Register); + } + + a4l_dbg(1, drv_dbg, dev, "mio_common: A0 initialization OK\n"); + + /* DMA setup */ + + a4l_dbg(1, drv_dbg, dev, "mio_common: DMA setup\n"); + + ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select); + ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select); + + if (boardtype.reg_type & ni_reg_6xxx_mask) { + ni_writeb(0, Magic_611x); + } else if (boardtype.reg_type & ni_reg_m_series_mask) { + int channel; + for (channel = 0; channel < boardtype.n_aochan; ++channel) { + ni_writeb(0xf, M_Offset_AO_Waveform_Order(channel)); + ni_writeb(0x0, + M_Offset_AO_Reference_Attenuation(channel)); + } + ni_writeb(0x0, M_Offset_AO_Calibration); + } + + a4l_dbg(1, drv_dbg, dev, "mio_common: attach procedure complete\n"); + + return 0; +} + +MODULE_DESCRIPTION("Analogy support for NI DAQ-STC based boards"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai); +EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited); +EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited14); +EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_bipolar4); +EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_611x); +EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_622x); +EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_628x); +EXPORT_SYMBOL_GPL(a4l_range_ni_S_ai_6143); +EXPORT_SYMBOL_GPL(a4l_range_ni_E_ao_ext); +EXPORT_SYMBOL_GPL(a4l_ni_E_interrupt); +EXPORT_SYMBOL_GPL(a4l_ni_E_init); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c new file mode 100644 index 0000000..4eb9eee --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c @@ -0,0 +1,839 @@ +/* + * Hardware driver for NI Mite PCI interface chip + * + * Copyright (C) 1999 David A. Schleef <ds@schleef.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The NI Mite driver was originally written by Tomasz Motylewski + * <...>, and ported to comedi by ds. + * + * References for specifications: + * + * 321747b.pdf Register Level Programmer Manual (obsolete) + * 321747c.pdf Register Level Programmer Manual (new) + * DAQ-STC reference manual + * + * Other possibly relevant info: + * + * 320517c.pdf User manual (obsolete) + * 320517f.pdf User manual (new) + * 320889a.pdf delete + * 320906c.pdf maximum signal ratings + * 321066a.pdf about 16x + * 321791a.pdf discontinuation of at-mio-16e-10 rev. c + * 321808a.pdf about at-mio-16e-10 rev P + * 321837a.pdf discontinuation of at-mio-16de-10 rev d + * 321838a.pdf about at-mio-16de-10 rev N + * + * ISSUES: + */ + +#include <linux/module.h> +#include "mite.h" + +#ifdef CONFIG_DEBUG_MITE +#define MDPRINTK(fmt, args...) rtdm_printk(fmt, ##args) +#else /* !CONFIG_DEBUG_MITE */ +#define MDPRINTK(fmt, args...) +#endif /* CONFIG_DEBUG_MITE */ + +static LIST_HEAD(mite_devices); + +static struct pci_device_id mite_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_ANY_ID), }, + {0, } +}; + +static int mite_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int i, err = 0; + struct mite_struct *mite; + + mite = kmalloc(sizeof(struct mite_struct), GFP_KERNEL); + if(mite == NULL) + return -ENOMEM; + + memset(mite, 0, sizeof(struct mite_struct)); + + rtdm_lock_init(&mite->lock); + + mite->pcidev = dev; + if (pci_enable_device(dev) < 0) { + __a4l_err("error enabling mite\n"); + err = -EIO; + goto out; + } + + for(i = 0; i < MAX_MITE_DMA_CHANNELS; i++) { + mite->channels[i].mite = mite; + mite->channels[i].channel = i; + mite->channels[i].done = 1; + } + + list_add(&mite->list, &mite_devices); + +out: + if (err < 0) + kfree(mite); + + return err; +} + +static void mite_remove(struct pci_dev *dev) +{ + struct list_head *this; + + list_for_each(this, &mite_devices) { + struct mite_struct *mite = + list_entry(this, struct mite_struct, list); + + if(mite->pcidev == dev) { + list_del(this); + kfree(mite); + break; + } + } +} + +static struct pci_driver mite_driver = { + .name = "analogy_mite", + .id_table = mite_id, + .probe = mite_probe, + .remove = mite_remove, +}; + +int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1) +{ + unsigned long length; + resource_size_t addr; + int i; + u32 csigr_bits; + unsigned unknown_dma_burst_bits; + + __a4l_dbg(1, drv_dbg, "starting setup...\n"); + + pci_set_master(mite->pcidev); + + if (pci_request_regions(mite->pcidev, "mite")) { + __a4l_err("failed to request mite io regions\n"); + return -EIO; + }; + + /* The PCI BAR0 is the Mite */ + addr = pci_resource_start(mite->pcidev, 0); + length = pci_resource_len(mite->pcidev, 0); + mite->mite_phys_addr = addr; + mite->mite_io_addr = ioremap(addr, length); + if (!mite->mite_io_addr) { + __a4l_err("failed to remap mite io memory address\n"); + pci_release_regions(mite->pcidev); + return -ENOMEM; + } + + __a4l_dbg(1, drv_dbg, "bar0(mite) 0x%08llx mapped to %p\n", + (unsigned long long)mite->mite_phys_addr, + mite->mite_io_addr); + + + /* The PCI BAR1 is the DAQ */ + addr = pci_resource_start(mite->pcidev, 1); + length = pci_resource_len(mite->pcidev, 1); + mite->daq_phys_addr = addr; + mite->daq_io_addr = ioremap(mite->daq_phys_addr, length); + if (!mite->daq_io_addr) { + __a4l_err("failed to remap daq io memory address\n"); + pci_release_regions(mite->pcidev); + return -ENOMEM; + } + + __a4l_dbg(1, drv_dbg, "bar0(daq) 0x%08llx mapped to %p\n", + (unsigned long long)mite->daq_phys_addr, + mite->daq_io_addr); + + if (use_iodwbsr_1) { + __a4l_dbg(1, drv_dbg, "using I/O Window Base Size register 1\n"); + writel(0, mite->mite_io_addr + MITE_IODWBSR); + writel(mite-> + daq_phys_addr | WENAB | + MITE_IODWBSR_1_WSIZE_bits(length), + mite->mite_io_addr + MITE_IODWBSR_1); + writel(0, mite->mite_io_addr + MITE_IODWCR_1); + } else { + writel(mite->daq_phys_addr | WENAB, + mite->mite_io_addr + MITE_IODWBSR); + } + + /* Make sure dma bursts work. I got this from running a bus analyzer + on a pxi-6281 and a pxi-6713. 6713 powered up with register value + of 0x61f and bursts worked. 6281 powered up with register value of + 0x1f and bursts didn't work. The NI windows driver reads the register, + then does a bitwise-or of 0x600 with it and writes it back. + */ + unknown_dma_burst_bits = + readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG); + unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS; + writel(unknown_dma_burst_bits, + mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG); + + csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR); + mite->num_channels = mite_csigr_dmac(csigr_bits); + if (mite->num_channels > MAX_MITE_DMA_CHANNELS) { + __a4l_err("MITE: bug? chip claims to have %i dma channels. " + "Setting to %i.\n", + mite->num_channels, MAX_MITE_DMA_CHANNELS); + mite->num_channels = MAX_MITE_DMA_CHANNELS; + } + + __a4l_dbg(1, drv_dbg, " version = %i, type = %i, mite mode = %i, " + "interface mode = %i\n", + mite_csigr_version(csigr_bits), + mite_csigr_type(csigr_bits), + mite_csigr_mmode(csigr_bits), + mite_csigr_imode(csigr_bits)); + __a4l_dbg(1, drv_dbg, " num channels = %i, write post fifo depth = %i, " + "wins = %i, iowins = %i\n", + mite_csigr_dmac(csigr_bits), + mite_csigr_wpdep(csigr_bits), + mite_csigr_wins(csigr_bits), + mite_csigr_iowins(csigr_bits)); + + for (i = 0; i < mite->num_channels; i++) { + /* Registers the channel as a free one */ + mite->channel_allocated[i] = 0; + /* Reset the channel */ + writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i)); + /* Disable interrupts */ + writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE | + CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | + CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, + mite->mite_io_addr + MITE_CHCR(i)); + + __a4l_dbg(1, drv_dbg, "channel[%d] initialized\n", i); + } + + mite->used = 1; + + return 0; +} + +void a4l_mite_unsetup(struct mite_struct *mite) +{ + if (!mite) + return; + + if (mite->mite_io_addr) { + iounmap(mite->mite_io_addr); + mite->mite_io_addr = NULL; + } + + if (mite->daq_io_addr) { + iounmap(mite->daq_io_addr); + mite->daq_io_addr = NULL; + } + + if(mite->used) + pci_release_regions( mite->pcidev ); + + mite->used = 0; +} + +void a4l_mite_list_devices(void) +{ + struct list_head *this; + + printk("Analogy: MITE: Available NI device IDs:"); + list_for_each(this, &mite_devices) { + struct mite_struct *mite = + list_entry(this, struct mite_struct, list); + + printk(" 0x%04x", mite_device_id(mite)); + if(mite->used) + printk("(used)"); + } + + printk("\n"); +} + + + +struct mite_struct * a4l_mite_find_device(int bus, + int slot, unsigned short device_id) +{ + struct list_head *this; + + list_for_each(this, &mite_devices) { + struct mite_struct *mite = + list_entry(this, struct mite_struct, list); + + if(mite->pcidev->device != device_id) + continue; + + if((bus <= 0 && slot <= 0) || + (bus == mite->pcidev->bus->number && + slot == PCI_SLOT(mite->pcidev->devfn))) + return mite; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(a4l_mite_find_device); + +struct mite_channel * +a4l_mite_request_channel_in_range(struct mite_struct *mite, + struct mite_dma_descriptor_ring *ring, + unsigned min_channel, unsigned max_channel) +{ + int i; + unsigned long flags; + struct mite_channel *channel = NULL; + + __a4l_dbg(1, drv_dbg, " min_channel = %u, max_channel = %u\n", + min_channel, max_channel); + + /* spin lock so a4l_mite_release_channel can be called safely + from interrupts */ + rtdm_lock_get_irqsave(&mite->lock, flags); + for (i = min_channel; i <= max_channel; ++i) { + + __a4l_dbg(1, drv_dbg, " channel[%d] allocated = %d\n", + i, mite->channel_allocated[i]); + + if (mite->channel_allocated[i] == 0) { + mite->channel_allocated[i] = 1; + channel = &mite->channels[i]; + channel->ring = ring; + break; + } + } + rtdm_lock_put_irqrestore(&mite->lock, flags); + return channel; +} + +void a4l_mite_release_channel(struct mite_channel *mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + unsigned long flags; + + /* Spin lock to prevent races with mite_request_channel */ + rtdm_lock_get_irqsave(&mite->lock, flags); + if (mite->channel_allocated[mite_chan->channel]) { + /* disable all channel's interrupts */ + writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | + CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE | + CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | + CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE, + mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); + a4l_mite_dma_disarm(mite_chan); + mite_dma_reset(mite_chan); + mite->channel_allocated[mite_chan->channel] = 0; + mite_chan->ring = NULL; + mmiowb(); + } + rtdm_lock_put_irqrestore(&mite->lock, flags); +} + +void a4l_mite_dma_arm(struct mite_channel *mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + int chor; + unsigned long flags; + + MDPRINTK("a4l_mite_dma_arm ch%i\n", mite_chan->channel); + /* Memory barrier is intended to insure any twiddling with the buffer + is done before writing to the mite to arm dma transfer */ + smp_mb(); + /* arm */ + chor = CHOR_START; + rtdm_lock_get_irqsave(&mite->lock, flags); + mite_chan->done = 0; + writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); + mmiowb(); + rtdm_lock_put_irqrestore(&mite->lock, flags); +} + +void a4l_mite_dma_disarm(struct mite_channel *mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + unsigned chor; + + /* disarm */ + chor = CHOR_ABORT; + writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); +} + +int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd) +{ + struct a4l_buffer *buf = subd->buf; + unsigned int n_links; + int i; + + if (ring->descriptors) { + dma_free_coherent(&ring->pcidev->dev, + ring->n_links * sizeof(struct mite_dma_descriptor), + ring->descriptors, ring->descriptors_dma_addr); + } + ring->descriptors = NULL; + ring->descriptors_dma_addr = 0; + ring->n_links = 0; + + if (buf->size == 0) { + return 0; + } + n_links = buf->size >> PAGE_SHIFT; + + MDPRINTK("ring->pcidev=%p, n_links=0x%04x\n", ring->pcidev, n_links); + + ring->descriptors = + dma_alloc_coherent(&ring->pcidev->dev, + n_links * sizeof(struct mite_dma_descriptor), + &ring->descriptors_dma_addr, GFP_ATOMIC); + if (!ring->descriptors) { + printk("MITE: ring buffer allocation failed\n"); + return -ENOMEM; + } + ring->n_links = n_links; + + for (i = 0; i < n_links; i++) { + ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE); + ring->descriptors[i].addr = cpu_to_le32(buf->pg_list[i]); + ring->descriptors[i].next = + cpu_to_le32(ring->descriptors_dma_addr + + (i + 1) * sizeof(struct mite_dma_descriptor)); + } + + ring->descriptors[n_links - 1].next = + cpu_to_le32(ring->descriptors_dma_addr); + + /* Barrier is meant to insure that all the writes to the dma descriptors + have completed before the dma controller is commanded to read them */ + smp_wmb(); + + return 0; +} + +void a4l_mite_prep_dma(struct mite_channel *mite_chan, + unsigned int num_device_bits, unsigned int num_memory_bits) +{ + unsigned int chor, chcr, mcr, dcr, lkcr; + struct mite_struct *mite = mite_chan->mite; + + MDPRINTK("a4l_mite_prep_dma ch%i\n", mite_chan->channel); + + /* reset DMA and FIFO */ + chor = CHOR_DMARESET | CHOR_FRESET; + writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); + + /* short link chaining mode */ + chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE | + CHCR_BURSTEN; + /* + * Link Complete Interrupt: interrupt every time a link + * in MITE_RING is completed. This can generate a lot of + * extra interrupts, but right now we update the values + * of buf_int_ptr and buf_int_count at each interrupt. A + * better method is to poll the MITE before each user + * "read()" to calculate the number of bytes available. + */ + chcr |= CHCR_SET_LC_IE; + if (num_memory_bits == 32 && num_device_bits == 16) { + /* Doing a combined 32 and 16 bit byteswap gets the 16 + bit samples into the fifo in the right order. + Tested doing 32 bit memory to 16 bit device + transfers to the analog out of a pxi-6281, which + has mite version = 1, type = 4. This also works + for dma reads from the counters on e-series boards. + */ + chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY; + } + + if (mite_chan->dir == A4L_INPUT) { + chcr |= CHCR_DEV_TO_MEM; + } + writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel)); + + /* to/from memory */ + mcr = CR_RL(64) | CR_ASEQUP; + switch (num_memory_bits) { + case 8: + mcr |= CR_PSIZE8; + break; + case 16: + mcr |= CR_PSIZE16; + break; + case 32: + mcr |= CR_PSIZE32; + break; + default: + __a4l_err("MITE: bug! " + "invalid mem bit width for dma transfer\n"); + break; + } + writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel)); + + /* from/to device */ + dcr = CR_RL(64) | CR_ASEQUP; + dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel); + switch (num_device_bits) { + case 8: + dcr |= CR_PSIZE8; + break; + case 16: + dcr |= CR_PSIZE16; + break; + case 32: + dcr |= CR_PSIZE32; + break; + default: + __a4l_info("MITE: bug! " + "invalid dev bit width for dma transfer\n"); + break; + } + writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel)); + + /* reset the DAR */ + writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel)); + + /* the link is 32bits */ + lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32; + writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel)); + + /* starting address for link chaining */ + writel(mite_chan->ring->descriptors_dma_addr, + mite->mite_io_addr + MITE_LKAR(mite_chan->channel)); + + MDPRINTK("exit a4l_mite_prep_dma\n"); +} + +u32 mite_device_bytes_transferred(struct mite_channel *mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel)); +} + +u32 a4l_mite_bytes_in_transit(struct mite_channel * mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + return readl(mite->mite_io_addr + + MITE_FCR(mite_chan->channel)) & 0x000000FF; +} + +/* Returns lower bound for number of bytes transferred from device to memory */ +u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel * mite_chan) +{ + u32 device_byte_count; + + device_byte_count = mite_device_bytes_transferred(mite_chan); + return device_byte_count - a4l_mite_bytes_in_transit(mite_chan); +} + +/* Returns upper bound for number of bytes transferred from device to memory */ +u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel * mite_chan) +{ + u32 in_transit_count; + + in_transit_count = a4l_mite_bytes_in_transit(mite_chan); + return mite_device_bytes_transferred(mite_chan) - in_transit_count; +} + +/* Returns lower bound for number of bytes read from memory for transfer to device */ +u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel * mite_chan) +{ + u32 device_byte_count; + + device_byte_count = mite_device_bytes_transferred(mite_chan); + return device_byte_count + a4l_mite_bytes_in_transit(mite_chan); +} + +/* Returns upper bound for number of bytes read from memory for transfer to device */ +u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel * mite_chan) +{ + u32 in_transit_count; + + in_transit_count = a4l_mite_bytes_in_transit(mite_chan); + return mite_device_bytes_transferred(mite_chan) + in_transit_count; +} + +int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd) +{ + unsigned int nbytes_lb, nbytes_ub; + + nbytes_lb = a4l_mite_bytes_written_to_memory_lb(mite_chan); + nbytes_ub = a4l_mite_bytes_written_to_memory_ub(mite_chan); + + if(a4l_buf_prepare_absput(subd, nbytes_ub) != 0) { + __a4l_err("MITE: DMA overwrite of free area\n"); + return -EPIPE; + } + + return a4l_buf_commit_absput(subd, nbytes_lb); +} + +int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd) +{ + struct a4l_buffer *buf = subd->buf; + unsigned int nbytes_ub, nbytes_lb; + int err; + + nbytes_lb = a4l_mite_bytes_read_from_memory_lb(mite_chan); + nbytes_ub = a4l_mite_bytes_read_from_memory_ub(mite_chan); + + err = a4l_buf_prepare_absget(subd, nbytes_ub); + if(err < 0) { + __a4l_info("MITE: DMA underrun\n"); + return -EPIPE; + } + + err = a4l_buf_commit_absget(subd, nbytes_lb); + + /* If the MITE has already transfered more than required, we + can disable it */ + if (test_bit(A4L_BUF_EOA_NR, &buf->flags)) + writel(CHOR_STOP, + mite_chan->mite->mite_io_addr + + MITE_CHOR(mite_chan->channel)); + + return err; +} + +u32 a4l_mite_get_status(struct mite_channel *mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + u32 status; + unsigned long flags; + + rtdm_lock_get_irqsave(&mite->lock, flags); + status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel)); + if (status & CHSR_DONE) { + mite_chan->done = 1; + writel(CHOR_CLRDONE, + mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); + } + mmiowb(); + rtdm_lock_put_irqrestore(&mite->lock, flags); + return status; +} + +int a4l_mite_done(struct mite_channel *mite_chan) +{ + struct mite_struct *mite = mite_chan->mite; + unsigned long flags; + int done; + + a4l_mite_get_status(mite_chan); + rtdm_lock_get_irqsave(&mite->lock, flags); + done = mite_chan->done; + rtdm_lock_put_irqrestore(&mite->lock, flags); + return done; +} + +#ifdef CONFIG_DEBUG_MITE + +static void a4l_mite_decode(const char *const bit_str[], unsigned int bits); + +/* names of bits in mite registers */ + +static const char *const mite_CHOR_strings[] = { + "start", "cont", "stop", "abort", + "freset", "clrlc", "clrrb", "clrdone", + "clr_lpause", "set_lpause", "clr_send_tc", + "set_send_tc", "12", "13", "14", + "15", "16", "17", "18", + "19", "20", "21", "22", + "23", "24", "25", "26", + "27", "28", "29", "30", + "dmareset", +}; + +static const char *const mite_CHCR_strings[] = { + "continue", "ringbuff", "2", "3", + "4", "5", "6", "7", + "8", "9", "10", "11", + "12", "13", "bursten", "fifodis", + "clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie", + "clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie", + "clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie", + "clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie", +}; + +static const char *const mite_MCR_strings[] = { + "amdevice", "1", "2", "3", + "4", "5", "portio", "portvxi", + "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11", + "12", "13", "blocken", "berhand", + "reqsintlim/reqs0", "reqs1", "reqs2", "rd32", + "rd512", "rl1", "rl2", "rl8", + "24", "25", "26", "27", + "28", "29", "30", "stopen", +}; + +static const char *const mite_DCR_strings[] = { + "amdevice", "1", "2", "3", + "4", "5", "portio", "portvxi", + "psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2", + "aseqxp8", "13", "blocken", "berhand", + "reqsintlim", "reqs1", "reqs2", "rd32", + "rd512", "rl1", "rl2", "rl8", + "23", "24", "25", "27", + "28", "wsdevc", "wsdevs", "rwdevpack", +}; + +static const char *const mite_LKCR_strings[] = { + "amdevice", "1", "2", "3", + "4", "5", "portio", "portvxi", + "psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown", + "12", "13", "14", "berhand", + "16", "17", "18", "rd32", + "rd512", "rl1", "rl2", "rl8", + "24", "25", "26", "27", + "28", "29", "30", "chngend", +}; + +static const char *const mite_CHSR_strings[] = { + "d.err0", "d.err1", "m.err0", "m.err1", + "l.err0", "l.err1", "drq0", "drq1", + "end", "xferr", "operr0", "operr1", + "stops", "habort", "sabort", "error", + "16", "conts_rb", "18", "linkc", + "20", "drdy", "22", "mrdy", + "24", "done", "26", "sars", + "28", "lpauses", "30", "int", +}; + +void a4l_mite_dump_regs(struct mite_channel *mite_chan) +{ + unsigned long mite_io_addr = + (unsigned long)mite_chan->mite->mite_io_addr; + unsigned long addr = 0; + unsigned long temp = 0; + + printk("a4l_mite_dump_regs ch%i\n", mite_chan->channel); + printk("mite address is =0x%08lx\n", mite_io_addr); + + addr = mite_io_addr + MITE_CHOR(mite_chan->channel); + printk("mite status[CHOR]at 0x%08lx =0x%08lx\n", addr, temp = + readl((void *)addr)); + a4l_mite_decode(mite_CHOR_strings, temp); + addr = mite_io_addr + MITE_CHCR(mite_chan->channel); + printk("mite status[CHCR]at 0x%08lx =0x%08lx\n", addr, temp = + readl((void *)addr)); + a4l_mite_decode(mite_CHCR_strings, temp); + addr = mite_io_addr + MITE_TCR(mite_chan->channel); + printk("mite status[TCR] at 0x%08lx =0x%08x\n", addr, + readl((void *)addr)); + addr = mite_io_addr + MITE_MCR(mite_chan->channel); + printk("mite status[MCR] at 0x%08lx =0x%08lx\n", addr, temp = + readl((void *)addr)); + a4l_mite_decode(mite_MCR_strings, temp); + + addr = mite_io_addr + MITE_MAR(mite_chan->channel); + printk("mite status[MAR] at 0x%08lx =0x%08x\n", addr, + readl((void *)addr)); + addr = mite_io_addr + MITE_DCR(mite_chan->channel); + printk("mite status[DCR] at 0x%08lx =0x%08lx\n", addr, temp = + readl((void *)addr)); + a4l_mite_decode(mite_DCR_strings, temp); + addr = mite_io_addr + MITE_DAR(mite_chan->channel); + printk("mite status[DAR] at 0x%08lx =0x%08x\n", addr, + readl((void *)addr)); + addr = mite_io_addr + MITE_LKCR(mite_chan->channel); + printk("mite status[LKCR]at 0x%08lx =0x%08lx\n", addr, temp = + readl((void *)addr)); + a4l_mite_decode(mite_LKCR_strings, temp); + addr = mite_io_addr + MITE_LKAR(mite_chan->channel); + printk("mite status[LKAR]at 0x%08lx =0x%08x\n", addr, + readl((void *)addr)); + + addr = mite_io_addr + MITE_CHSR(mite_chan->channel); + printk("mite status[CHSR]at 0x%08lx =0x%08lx\n", addr, temp = + readl((void *)addr)); + a4l_mite_decode(mite_CHSR_strings, temp); + addr = mite_io_addr + MITE_FCR(mite_chan->channel); + printk("mite status[FCR] at 0x%08lx =0x%08x\n\n", addr, + readl((void *)addr)); +} + + +static void a4l_mite_decode(const char *const bit_str[], unsigned int bits) +{ + int i; + + for (i = 31; i >= 0; i--) { + if (bits & (1 << i)) { + printk(" %s", bit_str[i]); + } + } + printk("\n"); +} + +#endif /* CONFIG_DEBUG_MITE */ + + +static int __init mite_init(void) +{ + int err; + + /* Register the mite's PCI driver */ + err = pci_register_driver(&mite_driver); + + if(err == 0) + a4l_mite_list_devices(); + + return err; +} + +static void __exit mite_cleanup(void) +{ + + /* Unregister the PCI structure driver */ + pci_unregister_driver(&mite_driver); + + /* Just paranoia... */ + while(&mite_devices != mite_devices.next) { + struct list_head *this = mite_devices.next; + struct mite_struct *mite = + list_entry(this, struct mite_struct, list); + + list_del(this); + kfree(mite); + } +} + +MODULE_LICENSE("GPL"); +module_init(mite_init); +module_exit(mite_cleanup); + +EXPORT_SYMBOL_GPL(a4l_mite_dma_arm); +EXPORT_SYMBOL_GPL(a4l_mite_dma_disarm); +EXPORT_SYMBOL_GPL(a4l_mite_sync_input_dma); +EXPORT_SYMBOL_GPL(a4l_mite_sync_output_dma); +EXPORT_SYMBOL_GPL(a4l_mite_setup); +EXPORT_SYMBOL_GPL(a4l_mite_unsetup); +EXPORT_SYMBOL_GPL(a4l_mite_list_devices); +EXPORT_SYMBOL_GPL(a4l_mite_request_channel_in_range); +EXPORT_SYMBOL_GPL(a4l_mite_release_channel); +EXPORT_SYMBOL_GPL(a4l_mite_prep_dma); +EXPORT_SYMBOL_GPL(a4l_mite_buf_change); +EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_lb); +EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_ub); +EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_lb); +EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_ub); +EXPORT_SYMBOL_GPL(a4l_mite_bytes_in_transit); +EXPORT_SYMBOL_GPL(a4l_mite_get_status); +EXPORT_SYMBOL_GPL(a4l_mite_done); +#ifdef CONFIG_DEBUG_MITE +EXPORT_SYMBOL_GPL(a4l_mite_decode); +EXPORT_SYMBOL_GPL(a4l_mite_dump_regs); +#endif /* CONFIG_DEBUG_MITE */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h new file mode 100644 index 0000000..3b9aaba --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h @@ -0,0 +1,435 @@ +/* + * Hardware driver for NI Mite PCI interface chip + * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef __ANALOGY_NI_MITE_H__ +#define __ANALOGY_NI_MITE_H__ + +#include <linux/pci.h> +#include <linux/slab.h> +#include <rtdm/analogy/device.h> + +#define PCI_VENDOR_ID_NATINST 0x1093 +#define PCI_MITE_SIZE 4096 +#define PCI_DAQ_SIZE 4096 +#define PCI_DAQ_SIZE_660X 8192 +#define PCIMIO_COMPAT +#define MAX_MITE_DMA_CHANNELS 8 + +#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK))) + +struct mite_dma_descriptor { + u32 count; + u32 addr; + u32 next; + u32 dar; +}; + +struct mite_dma_descriptor_ring { + struct pci_dev *pcidev; + u32 n_links; + struct mite_dma_descriptor *descriptors; + dma_addr_t descriptors_dma_addr; +}; + +struct mite_channel { + struct mite_struct *mite; + u32 channel; + u32 dir; + u32 done; + struct mite_dma_descriptor_ring *ring; +}; + +struct mite_struct { + struct list_head list; + rtdm_lock_t lock; + u32 used; + u32 num_channels; + + struct mite_channel channels[MAX_MITE_DMA_CHANNELS]; + u32 channel_allocated[MAX_MITE_DMA_CHANNELS]; + + struct pci_dev *pcidev; + resource_size_t mite_phys_addr; + void *mite_io_addr; + resource_size_t daq_phys_addr; + void *daq_io_addr; +}; + +static inline +struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite) +{ + struct mite_dma_descriptor_ring *ring = + kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_DMA); + + if (ring == NULL) + return ring; + + memset(ring, 0, sizeof(struct mite_dma_descriptor_ring)); + + ring->pcidev = mite->pcidev; + if (ring->pcidev == NULL) { + kfree(ring); + return NULL; + } + + return ring; +}; + +static inline void mite_free_ring(struct mite_dma_descriptor_ring *ring) +{ + if (ring) { + if (ring->descriptors) { + dma_free_coherent( + &ring->pcidev->dev, + ring->n_links * + sizeof(struct mite_dma_descriptor), + ring->descriptors, ring->descriptors_dma_addr); + } + kfree(ring); + } +}; + +static inline unsigned int mite_irq(struct mite_struct *mite) +{ + return mite->pcidev->irq; +}; +static inline unsigned int mite_device_id(struct mite_struct *mite) +{ + return mite->pcidev->device; +}; + +int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1); +void a4l_mite_unsetup(struct mite_struct *mite); +void a4l_mite_list_devices(void); +struct mite_struct * a4l_mite_find_device(int bus, + int slot, unsigned short device_id); +struct mite_channel * +a4l_mite_request_channel_in_range(struct mite_struct *mite, + struct mite_dma_descriptor_ring *ring, + unsigned min_channel, unsigned max_channel); +static inline struct mite_channel *mite_request_channel(struct mite_struct + *mite, struct mite_dma_descriptor_ring *ring) +{ + return a4l_mite_request_channel_in_range(mite, ring, 0, + mite->num_channels - 1); +} +void a4l_mite_release_channel(struct mite_channel *mite_chan); + +void a4l_mite_dma_arm(struct mite_channel *mite_chan); +void a4l_mite_dma_disarm(struct mite_channel *mite_chan); +int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd); +int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd); +u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan); +u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan); +u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan); +u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan); +u32 a4l_mite_bytes_in_transit(struct mite_channel *mite_chan); +u32 a4l_mite_get_status(struct mite_channel *mite_chan); +int a4l_mite_done(struct mite_channel *mite_chan); +void a4l_mite_prep_dma(struct mite_channel *mite_chan, + unsigned int num_device_bits, unsigned int num_memory_bits); +int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd); + +#ifdef CONFIG_DEBUG_MITE +void mite_print_chsr(unsigned int chsr); +void a4l_mite_dump_regs(struct mite_channel *mite_chan); +#endif + +static inline int CHAN_OFFSET(int channel) +{ + return 0x500 + 0x100 * channel; +}; + +enum mite_registers { + /* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be + written and read back. The bits 0x1f always read as 1. + The rest always read as zero. */ + MITE_UNKNOWN_DMA_BURST_REG = 0x28, + MITE_IODWBSR = 0xc0, //IO Device Window Base Size Register + MITE_IODWBSR_1 = 0xc4, // IO Device Window Base Size Register 1 + MITE_IODWCR_1 = 0xf4, + MITE_PCI_CONFIG_OFFSET = 0x300, + MITE_CSIGR = 0x460 //chip signature +}; +static inline int MITE_CHOR(int channel) // channel operation +{ + return CHAN_OFFSET(channel) + 0x0; +}; +static inline int MITE_CHCR(int channel) // channel control +{ + return CHAN_OFFSET(channel) + 0x4; +}; +static inline int MITE_TCR(int channel) // transfer count +{ + return CHAN_OFFSET(channel) + 0x8; +}; +static inline int MITE_MCR(int channel) // memory configuration +{ + return CHAN_OFFSET(channel) + 0xc; +}; +static inline int MITE_MAR(int channel) // memory address +{ + return CHAN_OFFSET(channel) + 0x10; +}; +static inline int MITE_DCR(int channel) // device configuration +{ + return CHAN_OFFSET(channel) + 0x14; +}; +static inline int MITE_DAR(int channel) // device address +{ + return CHAN_OFFSET(channel) + 0x18; +}; +static inline int MITE_LKCR(int channel) // link configuration +{ + return CHAN_OFFSET(channel) + 0x1c; +}; +static inline int MITE_LKAR(int channel) // link address +{ + return CHAN_OFFSET(channel) + 0x20; +}; +static inline int MITE_LLKAR(int channel) // see mite section of tnt5002 manual +{ + return CHAN_OFFSET(channel) + 0x24; +}; +static inline int MITE_BAR(int channel) // base address +{ + return CHAN_OFFSET(channel) + 0x28; +}; +static inline int MITE_BCR(int channel) // base count +{ + return CHAN_OFFSET(channel) + 0x2c; +}; +static inline int MITE_SAR(int channel) // ? address +{ + return CHAN_OFFSET(channel) + 0x30; +}; +static inline int MITE_WSCR(int channel) // ? +{ + return CHAN_OFFSET(channel) + 0x34; +}; +static inline int MITE_WSER(int channel) // ? +{ + return CHAN_OFFSET(channel) + 0x38; +}; +static inline int MITE_CHSR(int channel) // channel status +{ + return CHAN_OFFSET(channel) + 0x3c; +}; +static inline int MITE_FCR(int channel) // fifo count +{ + return CHAN_OFFSET(channel) + 0x40; +}; + +enum MITE_IODWBSR_bits { + WENAB = 0x80, // window enable +}; + +static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size) +{ + unsigned order = 0; + while (size >>= 1) + ++order; + BUG_ON(order < 1); + return (order - 1) & 0x1f; +} + +enum MITE_UNKNOWN_DMA_BURST_bits { + UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600 +}; + +static inline int mite_csigr_version(u32 csigr_bits) +{ + return csigr_bits & 0xf; +}; +static inline int mite_csigr_type(u32 csigr_bits) +{ // original mite = 0, minimite = 1 + return (csigr_bits >> 4) & 0xf; +}; +static inline int mite_csigr_mmode(u32 csigr_bits) +{ // mite mode, minimite = 1 + return (csigr_bits >> 8) & 0x3; +}; +static inline int mite_csigr_imode(u32 csigr_bits) +{ // cpu port interface mode, pci = 0x3 + return (csigr_bits >> 12) & 0x3; +}; +static inline int mite_csigr_dmac(u32 csigr_bits) +{ // number of dma channels + return (csigr_bits >> 16) & 0xf; +}; +static inline int mite_csigr_wpdep(u32 csigr_bits) +{ // write post fifo depth + unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7; + if (wpdep_bits == 0) + return 0; + else + return 1 << (wpdep_bits - 1); +}; +static inline int mite_csigr_wins(u32 csigr_bits) +{ + return (csigr_bits >> 24) & 0x1f; +}; +static inline int mite_csigr_iowins(u32 csigr_bits) +{ // number of io windows + return (csigr_bits >> 29) & 0x7; +}; + +enum MITE_MCR_bits { + MCRPON = 0, +}; + +enum MITE_DCR_bits { + DCR_NORMAL = (1 << 29), + DCRPON = 0, +}; + +enum MITE_CHOR_bits { + CHOR_DMARESET = (1 << 31), + CHOR_SET_SEND_TC = (1 << 11), + CHOR_CLR_SEND_TC = (1 << 10), + CHOR_SET_LPAUSE = (1 << 9), + CHOR_CLR_LPAUSE = (1 << 8), + CHOR_CLRDONE = (1 << 7), + CHOR_CLRRB = (1 << 6), + CHOR_CLRLC = (1 << 5), + CHOR_FRESET = (1 << 4), + CHOR_ABORT = (1 << 3), /* stop without emptying fifo */ + CHOR_STOP = (1 << 2), /* stop after emptying fifo */ + CHOR_CONT = (1 << 1), + CHOR_START = (1 << 0), + CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE), +}; + +enum MITE_CHCR_bits { + CHCR_SET_DMA_IE = (1 << 31), + CHCR_CLR_DMA_IE = (1 << 30), + CHCR_SET_LINKP_IE = (1 << 29), + CHCR_CLR_LINKP_IE = (1 << 28), + CHCR_SET_SAR_IE = (1 << 27), + CHCR_CLR_SAR_IE = (1 << 26), + CHCR_SET_DONE_IE = (1 << 25), + CHCR_CLR_DONE_IE = (1 << 24), + CHCR_SET_MRDY_IE = (1 << 23), + CHCR_CLR_MRDY_IE = (1 << 22), + CHCR_SET_DRDY_IE = (1 << 21), + CHCR_CLR_DRDY_IE = (1 << 20), + CHCR_SET_LC_IE = (1 << 19), + CHCR_CLR_LC_IE = (1 << 18), + CHCR_SET_CONT_RB_IE = (1 << 17), + CHCR_CLR_CONT_RB_IE = (1 << 16), + CHCR_FIFODIS = (1 << 15), + CHCR_FIFO_ON = 0, + CHCR_BURSTEN = (1 << 14), + CHCR_NO_BURSTEN = 0, + CHCR_BYTE_SWAP_DEVICE = (1 << 6), + CHCR_BYTE_SWAP_MEMORY = (1 << 4), + CHCR_DIR = (1 << 3), + CHCR_DEV_TO_MEM = CHCR_DIR, + CHCR_MEM_TO_DEV = 0, + CHCR_NORMAL = (0 << 0), + CHCR_CONTINUE = (1 << 0), + CHCR_RINGBUFF = (2 << 0), + CHCR_LINKSHORT = (4 << 0), + CHCR_LINKLONG = (5 << 0), + CHCRPON = + (CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE | + CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE | + CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE), +}; + +enum ConfigRegister_bits { + CR_REQS_MASK = 0x7 << 16, + CR_ASEQDONT = 0x0 << 10, + CR_ASEQUP = 0x1 << 10, + CR_ASEQDOWN = 0x2 << 10, + CR_ASEQ_MASK = 0x3 << 10, + CR_PSIZE8 = (1 << 8), + CR_PSIZE16 = (2 << 8), + CR_PSIZE32 = (3 << 8), + CR_PORTCPU = (0 << 6), + CR_PORTIO = (1 << 6), + CR_PORTVXI = (2 << 6), + CR_PORTMXI = (3 << 6), + CR_AMDEVICE = (1 << 0), +}; +static inline int CR_REQS(int source) +{ + return (source & 0x7) << 16; +}; +static inline int CR_REQSDRQ(unsigned drq_line) +{ + /* This also works on m-series when + using channels (drq_line) 4 or 5. */ + return CR_REQS((drq_line & 0x3) | 0x4); +} +static inline int CR_RL(unsigned int retry_limit) +{ + int value = 0; + + while (retry_limit) { + retry_limit >>= 1; + value++; + } + if (value > 0x7) + __a4l_err("bug! retry_limit too large\n"); + + return (value & 0x7) << 21; +} + +enum CHSR_bits { + CHSR_INT = (1 << 31), + CHSR_LPAUSES = (1 << 29), + CHSR_SARS = (1 << 27), + CHSR_DONE = (1 << 25), + CHSR_MRDY = (1 << 23), + CHSR_DRDY = (1 << 21), + CHSR_LINKC = (1 << 19), + CHSR_CONTS_RB = (1 << 17), + CHSR_ERROR = (1 << 15), + CHSR_SABORT = (1 << 14), + CHSR_HABORT = (1 << 13), + CHSR_STOPS = (1 << 12), + CHSR_OPERR_mask = (3 << 10), + CHSR_OPERR_NOERROR = (0 << 10), + CHSR_OPERR_FIFOERROR = (1 << 10), + CHSR_OPERR_LINKERROR = (1 << 10), /* ??? */ + CHSR_XFERR = (1 << 9), + CHSR_END = (1 << 8), + CHSR_DRQ1 = (1 << 7), + CHSR_DRQ0 = (1 << 6), + CHSR_LxERR_mask = (3 << 4), + CHSR_LBERR = (1 << 4), + CHSR_LRERR = (2 << 4), + CHSR_LOERR = (3 << 4), + CHSR_MxERR_mask = (3 << 2), + CHSR_MBERR = (1 << 2), + CHSR_MRERR = (2 << 2), + CHSR_MOERR = (3 << 2), + CHSR_DxERR_mask = (3 << 0), + CHSR_DBERR = (1 << 0), + CHSR_DRERR = (2 << 0), + CHSR_DOERR = (3 << 0), +}; + +static inline void mite_dma_reset(struct mite_channel *mite_chan) +{ + writel(CHOR_DMARESET | CHOR_FRESET, + mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel)); +}; + +#endif /* !__ANALOGY_NI_MITE_H__ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c new file mode 100644 index 0000000..40a4b26 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c @@ -0,0 +1,1481 @@ +/* + * comedi/drivers/ni_660x.c + * Hardware driver for NI 660x devices + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * Driver: ni_660x + * Description: National Instruments 660x counter/timer boards + * Devices: + * [National Instruments] PCI-6601 (ni_660x), PCI-6602, PXI-6602, + * PXI-6608 + * Author: J.P. Mellor <jpmellor@rose-hulman.edu>, + * Herman.Bruyninckx@mech.kuleuven.ac.be, + * Wim.Meeussen@mech.kuleuven.ac.be, + * Klaas.Gadeyne@mech.kuleuven.ac.be, + * Frank Mori Hess <fmhess@users.sourceforge.net> + * Updated: Thu Oct 18 12:56:06 EDT 2007 + * Status: experimental + + * Encoders work. PulseGeneration (both single pulse and pulse train) + * works. Buffered commands work for input but not output. + + * References: + * DAQ 660x Register-Level Programmer Manual (NI 370505A-01) + * DAQ 6601/6602 User Manual (NI 322137B-01) + */ + +/* + * Integration with Xenomai/Analogy layer based on the + * comedi driver. Adaptation made by + * Julien Delange <julien.delange@esa.int> + */ + +#include <linux/interrupt.h> + +#include <linux/module.h> +#include <rtdm/analogy/device.h> + +#include "../intel/8255.h" +#include "ni_stc.h" +#include "ni_mio.h" +#include "ni_tio.h" +#include "mite.h" + +enum io_direction { + DIRECTION_INPUT = 0, + DIRECTION_OUTPUT = 1, + DIRECTION_OPENDRAIN = 2 +}; + + +enum ni_660x_constants { + min_counter_pfi_chan = 8, + max_dio_pfi_chan = 31, + counters_per_chip = 4 +}; + +struct ni_660x_subd_priv { + int io_bits; + unsigned int state; + uint16_t readback[2]; + uint16_t config; + struct ni_gpct* counter; +}; + +#define NUM_PFI_CHANNELS 40 +/* Really there are only up to 3 dma channels, but the register layout + allows for 4 */ +#define MAX_DMA_CHANNEL 4 + +static struct a4l_channels_desc chandesc_ni660x = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = NUM_PFI_CHANNELS, + .chans = { + {A4L_CHAN_AREF_GROUND, sizeof(sampl_t)}, + }, +}; + +#define subdev_priv ((struct ni_660x_subd_priv*)s->priv) + +/* See Register-Level Programmer Manual page 3.1 */ +enum NI_660x_Register { + G0InterruptAcknowledge, + G0StatusRegister, + G1InterruptAcknowledge, + G1StatusRegister, + G01StatusRegister, + G0CommandRegister, + STCDIOParallelInput, + G1CommandRegister, + G0HWSaveRegister, + G1HWSaveRegister, + STCDIOOutput, + STCDIOControl, + G0SWSaveRegister, + G1SWSaveRegister, + G0ModeRegister, + G01JointStatus1Register, + G1ModeRegister, + STCDIOSerialInput, + G0LoadARegister, + G01JointStatus2Register, + G0LoadBRegister, + G1LoadARegister, + G1LoadBRegister, + G0InputSelectRegister, + G1InputSelectRegister, + G0AutoincrementRegister, + G1AutoincrementRegister, + G01JointResetRegister, + G0InterruptEnable, + G1InterruptEnable, + G0CountingModeRegister, + G1CountingModeRegister, + G0SecondGateRegister, + G1SecondGateRegister, + G0DMAConfigRegister, + G0DMAStatusRegister, + G1DMAConfigRegister, + G1DMAStatusRegister, + G2InterruptAcknowledge, + G2StatusRegister, + G3InterruptAcknowledge, + G3StatusRegister, + G23StatusRegister, + G2CommandRegister, + G3CommandRegister, + G2HWSaveRegister, + G3HWSaveRegister, + G2SWSaveRegister, + G3SWSaveRegister, + G2ModeRegister, + G23JointStatus1Register, + G3ModeRegister, + G2LoadARegister, + G23JointStatus2Register, + G2LoadBRegister, + G3LoadARegister, + G3LoadBRegister, + G2InputSelectRegister, + G3InputSelectRegister, + G2AutoincrementRegister, + G3AutoincrementRegister, + G23JointResetRegister, + G2InterruptEnable, + G3InterruptEnable, + G2CountingModeRegister, + G3CountingModeRegister, + G3SecondGateRegister, + G2SecondGateRegister, + G2DMAConfigRegister, + G2DMAStatusRegister, + G3DMAConfigRegister, + G3DMAStatusRegister, + DIO32Input, + DIO32Output, + ClockConfigRegister, + GlobalInterruptStatusRegister, + DMAConfigRegister, + GlobalInterruptConfigRegister, + IOConfigReg0_1, + IOConfigReg2_3, + IOConfigReg4_5, + IOConfigReg6_7, + IOConfigReg8_9, + IOConfigReg10_11, + IOConfigReg12_13, + IOConfigReg14_15, + IOConfigReg16_17, + IOConfigReg18_19, + IOConfigReg20_21, + IOConfigReg22_23, + IOConfigReg24_25, + IOConfigReg26_27, + IOConfigReg28_29, + IOConfigReg30_31, + IOConfigReg32_33, + IOConfigReg34_35, + IOConfigReg36_37, + IOConfigReg38_39, + NumRegisters, +}; + +static inline unsigned IOConfigReg(unsigned pfi_channel) +{ + unsigned reg = IOConfigReg0_1 + pfi_channel / 2; + BUG_ON(reg > IOConfigReg38_39); + return reg; +} + +enum ni_660x_register_width { + DATA_1B, + DATA_2B, + DATA_4B +}; + +enum ni_660x_register_direction { + NI_660x_READ, + NI_660x_WRITE, + NI_660x_READ_WRITE +}; + +enum ni_660x_pfi_output_select { + pfi_output_select_high_Z = 0, + pfi_output_select_counter = 1, + pfi_output_select_do = 2, + num_pfi_output_selects +}; + +enum ni_660x_subdevices { + NI_660X_DIO_SUBDEV = 1, + NI_660X_GPCT_SUBDEV_0 = 2 +}; + +static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index) +{ + return NI_660X_GPCT_SUBDEV_0 + index; +} + +struct NI_660xRegisterData { + + const char *name; /* Register Name */ + int offset; /* Offset from base address from GPCT chip */ + enum ni_660x_register_direction direction; + enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */ +}; + +static const struct NI_660xRegisterData registerData[NumRegisters] = { + {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B}, + {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B}, + {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B}, + {"G1 Status Register", 0x006, NI_660x_READ, DATA_2B}, + {"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B}, + {"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B}, + {"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B}, + {"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B}, + {"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B}, + {"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B}, + {"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B}, + {"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B}, + {"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B}, + {"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B}, + {"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B}, + {"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B}, + {"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B}, + {"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B}, + {"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B}, + {"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B}, + {"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B}, + {"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B}, + {"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B}, + {"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B}, + {"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B}, + {"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B}, + {"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B}, + {"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B}, + {"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B}, + {"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B}, + {"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B}, + {"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B}, + {"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B}, + {"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B}, + {"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B}, + {"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B}, + {"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B}, + {"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B}, + {"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B}, + {"G2 Status Register", 0x104, NI_660x_READ, DATA_2B}, + {"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B}, + {"G3 Status Register", 0x106, NI_660x_READ, DATA_2B}, + {"G23 Status Register", 0x108, NI_660x_READ, DATA_2B}, + {"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B}, + {"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B}, + {"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B}, + {"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B}, + {"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B}, + {"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B}, + {"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B}, + {"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B}, + {"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B}, + {"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B}, + {"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B}, + {"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B}, + {"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B}, + {"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B}, + {"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B}, + {"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B}, + {"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B}, + {"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B}, + {"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B}, + {"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B}, + {"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B}, + {"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B}, + {"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B}, + {"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B}, + {"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B}, + {"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B}, + {"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B}, + {"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B}, + {"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B}, + {"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B}, + {"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B}, + {"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B}, + {"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B}, + {"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B}, + {"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B}, + {"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B}, + {"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B} +}; + +/* kind of ENABLE for the second counter */ +enum clock_config_register_bits { + CounterSwap = 0x1 << 21 +}; + +/* ioconfigreg */ +static inline unsigned ioconfig_bitshift(unsigned pfi_channel) +{ + if (pfi_channel % 2) + return 0; + else + return 8; +} + +static inline unsigned pfi_output_select_mask(unsigned pfi_channel) +{ + return 0x3 << ioconfig_bitshift(pfi_channel); +} + +static inline unsigned pfi_output_select_bits(unsigned pfi_channel, + unsigned output_select) +{ + return (output_select & 0x3) << ioconfig_bitshift(pfi_channel); +} + +static inline unsigned pfi_input_select_mask(unsigned pfi_channel) +{ + return 0x7 << (4 + ioconfig_bitshift(pfi_channel)); +} + +static inline unsigned pfi_input_select_bits(unsigned pfi_channel, + unsigned input_select) +{ + return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel)); +} + +/* Dma configuration register bits */ +static inline unsigned dma_select_mask(unsigned dma_channel) +{ + BUG_ON(dma_channel >= MAX_DMA_CHANNEL); + return 0x1f << (8 * dma_channel); +} + +enum dma_selection { + dma_selection_none = 0x1f, +}; + +static inline unsigned dma_selection_counter(unsigned counter_index) +{ + BUG_ON(counter_index >= counters_per_chip); + return counter_index; +} + +static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection) +{ + BUG_ON(dma_channel >= MAX_DMA_CHANNEL); + return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel); +} + +static inline unsigned dma_reset_bit(unsigned dma_channel) +{ + BUG_ON(dma_channel >= MAX_DMA_CHANNEL); + return 0x80 << (8 * dma_channel); +} + +enum global_interrupt_status_register_bits { + Counter_0_Int_Bit = 0x100, + Counter_1_Int_Bit = 0x200, + Counter_2_Int_Bit = 0x400, + Counter_3_Int_Bit = 0x800, + Cascade_Int_Bit = 0x20000000, + Global_Int_Bit = 0x80000000 +}; + +enum global_interrupt_config_register_bits { + Cascade_Int_Enable_Bit = 0x20000000, + Global_Int_Polarity_Bit = 0x40000000, + Global_Int_Enable_Bit = 0x80000000 +}; + +/* Offset of the GPCT chips from the base-adress of the card: + First chip is at base-address +0x00, etc. */ +static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 }; + +/* Board description */ +struct ni_660x_board { + unsigned short dev_id; /* `lspci` will show you this */ + const char *name; + unsigned n_chips; /* total number of TIO chips */ +}; + +static const struct ni_660x_board ni_660x_boards[] = { + { + .dev_id = 0x2c60, + .name = "PCI-6601", + .n_chips = 1, + }, + { + .dev_id = 0x1310, + .name = "PCI-6602", + .n_chips = 2, + }, + { + .dev_id = 0x1360, + .name = "PXI-6602", + .n_chips = 2, + }, + { + .dev_id = 0x2cc0, + .name = "PXI-6608", + .n_chips = 2, + }, +}; + +#define NI_660X_MAX_NUM_CHIPS 2 +#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip) + +static const struct pci_device_id ni_660x_pci_table[] = { + { + PCI_VENDOR_ID_NATINST, 0x2c60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { + PCI_VENDOR_ID_NATINST, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { + PCI_VENDOR_ID_NATINST, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { + PCI_VENDOR_ID_NATINST, 0x2cc0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { + 0} +}; + +MODULE_DEVICE_TABLE(pci, ni_660x_pci_table); + +struct ni_660x_private { + struct mite_struct *mite; + struct ni_gpct_device *counter_dev; + uint64_t pfi_direction_bits; + + struct mite_dma_descriptor_ring + *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip]; + + rtdm_lock_t mite_channel_lock; + /* Interrupt_lock prevents races between interrupt and + comedi_poll */ + rtdm_lock_t interrupt_lock; + unsigned int dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS]; + rtdm_lock_t soft_reg_copy_lock; + unsigned short pfi_output_selects[NUM_PFI_CHANNELS]; + + struct ni_660x_board *board_ptr; +}; + +#undef devpriv +#define devpriv ((struct ni_660x_private *)dev->priv) + +static inline struct ni_660x_private *private(struct a4l_device *dev) +{ + return (struct ni_660x_private*) dev->priv; +} + +/* Initialized in ni_660x_find_device() */ +static inline const struct ni_660x_board *board(struct a4l_device *dev) +{ + return ((struct ni_660x_private*)dev->priv)->board_ptr; +} + +#define n_ni_660x_boards ARRAY_SIZE(ni_660x_boards) + +static int ni_660x_attach(struct a4l_device *dev, + a4l_lnkdesc_t *arg); +static int ni_660x_detach(struct a4l_device *dev); +static void init_tio_chip(struct a4l_device *dev, int chipset); +static void ni_660x_select_pfi_output(struct a4l_device *dev, + unsigned pfi_channel, + unsigned output_select); + +static struct a4l_driver ni_660x_drv = { + .board_name = "analogy_ni_660x", + .driver_name = "ni_660x", + .owner = THIS_MODULE, + .attach = ni_660x_attach, + .detach = ni_660x_detach, + .privdata_size = sizeof(struct ni_660x_private), +}; + +static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan, + unsigned source); + +/* Possible instructions for a GPCT */ +static int ni_660x_GPCT_rinsn( + struct a4l_subdevice *s, + struct a4l_kernel_instruction *insn); +static int ni_660x_GPCT_insn_config( + struct a4l_subdevice *s, + struct a4l_kernel_instruction *insn); +static int ni_660x_GPCT_winsn( + struct a4l_subdevice *s, + struct a4l_kernel_instruction *insn); + +/* Possible instructions for Digital IO */ +static int ni_660x_dio_insn_config( + struct a4l_subdevice *s, + struct a4l_kernel_instruction *insn); +static int ni_660x_dio_insn_bits( + struct a4l_subdevice *s, + struct a4l_kernel_instruction *insn); + +static inline unsigned ni_660x_num_counters(struct a4l_device *dev) +{ + return board(dev)->n_chips * counters_per_chip; +} + +static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg) +{ + + enum NI_660x_Register ni_660x_register; + switch (reg) { + case NITIO_G0_Autoincrement_Reg: + ni_660x_register = G0AutoincrementRegister; + break; + case NITIO_G1_Autoincrement_Reg: + ni_660x_register = G1AutoincrementRegister; + break; + case NITIO_G2_Autoincrement_Reg: + ni_660x_register = G2AutoincrementRegister; + break; + case NITIO_G3_Autoincrement_Reg: + ni_660x_register = G3AutoincrementRegister; + break; + case NITIO_G0_Command_Reg: + ni_660x_register = G0CommandRegister; + break; + case NITIO_G1_Command_Reg: + ni_660x_register = G1CommandRegister; + break; + case NITIO_G2_Command_Reg: + ni_660x_register = G2CommandRegister; + break; + case NITIO_G3_Command_Reg: + ni_660x_register = G3CommandRegister; + break; + case NITIO_G0_HW_Save_Reg: + ni_660x_register = G0HWSaveRegister; + break; + case NITIO_G1_HW_Save_Reg: + ni_660x_register = G1HWSaveRegister; + break; + case NITIO_G2_HW_Save_Reg: + ni_660x_register = G2HWSaveRegister; + break; + case NITIO_G3_HW_Save_Reg: + ni_660x_register = G3HWSaveRegister; + break; + case NITIO_G0_SW_Save_Reg: + ni_660x_register = G0SWSaveRegister; + break; + case NITIO_G1_SW_Save_Reg: + ni_660x_register = G1SWSaveRegister; + break; + case NITIO_G2_SW_Save_Reg: + ni_660x_register = G2SWSaveRegister; + break; + case NITIO_G3_SW_Save_Reg: + ni_660x_register = G3SWSaveRegister; + break; + case NITIO_G0_Mode_Reg: + ni_660x_register = G0ModeRegister; + break; + case NITIO_G1_Mode_Reg: + ni_660x_register = G1ModeRegister; + break; + case NITIO_G2_Mode_Reg: + ni_660x_register = G2ModeRegister; + break; + case NITIO_G3_Mode_Reg: + ni_660x_register = G3ModeRegister; + break; + case NITIO_G0_LoadA_Reg: + ni_660x_register = G0LoadARegister; + break; + case NITIO_G1_LoadA_Reg: + ni_660x_register = G1LoadARegister; + break; + case NITIO_G2_LoadA_Reg: + ni_660x_register = G2LoadARegister; + break; + case NITIO_G3_LoadA_Reg: + ni_660x_register = G3LoadARegister; + break; + case NITIO_G0_LoadB_Reg: + ni_660x_register = G0LoadBRegister; + break; + case NITIO_G1_LoadB_Reg: + ni_660x_register = G1LoadBRegister; + break; + case NITIO_G2_LoadB_Reg: + ni_660x_register = G2LoadBRegister; + break; + case NITIO_G3_LoadB_Reg: + ni_660x_register = G3LoadBRegister; + break; + case NITIO_G0_Input_Select_Reg: + ni_660x_register = G0InputSelectRegister; + break; + case NITIO_G1_Input_Select_Reg: + ni_660x_register = G1InputSelectRegister; + break; + case NITIO_G2_Input_Select_Reg: + ni_660x_register = G2InputSelectRegister; + break; + case NITIO_G3_Input_Select_Reg: + ni_660x_register = G3InputSelectRegister; + break; + case NITIO_G01_Status_Reg: + ni_660x_register = G01StatusRegister; + break; + case NITIO_G23_Status_Reg: + ni_660x_register = G23StatusRegister; + break; + case NITIO_G01_Joint_Reset_Reg: + ni_660x_register = G01JointResetRegister; + break; + case NITIO_G23_Joint_Reset_Reg: + ni_660x_register = G23JointResetRegister; + break; + case NITIO_G01_Joint_Status1_Reg: + ni_660x_register = G01JointStatus1Register; + break; + case NITIO_G23_Joint_Status1_Reg: + ni_660x_register = G23JointStatus1Register; + break; + case NITIO_G01_Joint_Status2_Reg: + ni_660x_register = G01JointStatus2Register; + break; + case NITIO_G23_Joint_Status2_Reg: + ni_660x_register = G23JointStatus2Register; + break; + case NITIO_G0_Counting_Mode_Reg: + ni_660x_register = G0CountingModeRegister; + break; + case NITIO_G1_Counting_Mode_Reg: + ni_660x_register = G1CountingModeRegister; + break; + case NITIO_G2_Counting_Mode_Reg: + ni_660x_register = G2CountingModeRegister; + break; + case NITIO_G3_Counting_Mode_Reg: + ni_660x_register = G3CountingModeRegister; + break; + case NITIO_G0_Second_Gate_Reg: + ni_660x_register = G0SecondGateRegister; + break; + case NITIO_G1_Second_Gate_Reg: + ni_660x_register = G1SecondGateRegister; + break; + case NITIO_G2_Second_Gate_Reg: + ni_660x_register = G2SecondGateRegister; + break; + case NITIO_G3_Second_Gate_Reg: + ni_660x_register = G3SecondGateRegister; + break; + case NITIO_G0_DMA_Config_Reg: + ni_660x_register = G0DMAConfigRegister; + break; + case NITIO_G0_DMA_Status_Reg: + ni_660x_register = G0DMAStatusRegister; + break; + case NITIO_G1_DMA_Config_Reg: + ni_660x_register = G1DMAConfigRegister; + break; + case NITIO_G1_DMA_Status_Reg: + ni_660x_register = G1DMAStatusRegister; + break; + case NITIO_G2_DMA_Config_Reg: + ni_660x_register = G2DMAConfigRegister; + break; + case NITIO_G2_DMA_Status_Reg: + ni_660x_register = G2DMAStatusRegister; + break; + case NITIO_G3_DMA_Config_Reg: + ni_660x_register = G3DMAConfigRegister; + break; + case NITIO_G3_DMA_Status_Reg: + ni_660x_register = G3DMAStatusRegister; + break; + case NITIO_G0_Interrupt_Acknowledge_Reg: + ni_660x_register = G0InterruptAcknowledge; + break; + case NITIO_G1_Interrupt_Acknowledge_Reg: + ni_660x_register = G1InterruptAcknowledge; + break; + case NITIO_G2_Interrupt_Acknowledge_Reg: + ni_660x_register = G2InterruptAcknowledge; + break; + case NITIO_G3_Interrupt_Acknowledge_Reg: + ni_660x_register = G3InterruptAcknowledge; + break; + case NITIO_G0_Status_Reg: + ni_660x_register = G0StatusRegister; + break; + case NITIO_G1_Status_Reg: + ni_660x_register = G0StatusRegister; + break; + case NITIO_G2_Status_Reg: + ni_660x_register = G0StatusRegister; + break; + case NITIO_G3_Status_Reg: + ni_660x_register = G0StatusRegister; + break; + case NITIO_G0_Interrupt_Enable_Reg: + ni_660x_register = G0InterruptEnable; + break; + case NITIO_G1_Interrupt_Enable_Reg: + ni_660x_register = G1InterruptEnable; + break; + case NITIO_G2_Interrupt_Enable_Reg: + ni_660x_register = G2InterruptEnable; + break; + case NITIO_G3_Interrupt_Enable_Reg: + ni_660x_register = G3InterruptEnable; + break; + default: + __a4l_err("%s: unhandled register 0x%x in switch.\n", + __FUNCTION__, reg); + BUG(); + return 0; + break; + } + return ni_660x_register; +} + +static inline void ni_660x_write_register(struct a4l_device *dev, + unsigned chip_index, unsigned bits, + enum NI_660x_Register reg) +{ + void *const write_address = + private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] + + registerData[reg].offset; + + switch (registerData[reg].size) { + case DATA_2B: + writew(bits, write_address); + break; + case DATA_4B: + writel(bits, write_address); + break; + default: + __a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n", + __FILE__, __FUNCTION__, reg); + BUG(); + break; + } +} + +static inline unsigned ni_660x_read_register(struct a4l_device *dev, + unsigned chip_index, + enum NI_660x_Register reg) +{ + void *const read_address = + private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] + + registerData[reg].offset; + + switch (registerData[reg].size) { + case DATA_2B: + return readw(read_address); + break; + case DATA_4B: + return readl(read_address); + break; + default: + __a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n", + __FILE__, __FUNCTION__, reg); + BUG(); + break; + } + return 0; +} + +static void ni_gpct_write_register(struct ni_gpct *counter, + unsigned int bits, enum ni_gpct_register reg) +{ + struct a4l_device *dev = counter->counter_dev->dev; + enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg); + + ni_660x_write_register(dev, counter->chip_index, bits, + ni_660x_register); +} + +static unsigned ni_gpct_read_register(struct ni_gpct *counter, + enum ni_gpct_register reg) +{ + struct a4l_device *dev = counter->counter_dev->dev; + enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg); + + return ni_660x_read_register(dev, counter->chip_index, + ni_660x_register); +} + +static inline +struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private *priv, + struct ni_gpct *counter) +{ + + return priv->mite_rings[counter->chip_index][counter->counter_index]; +} + +static inline +void ni_660x_set_dma_channel(struct a4l_device *dev, + unsigned int mite_channel, struct ni_gpct *counter) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags); + private(dev)->dma_configuration_soft_copies[counter->chip_index] &= + ~dma_select_mask(mite_channel); + private(dev)->dma_configuration_soft_copies[counter->chip_index] |= + dma_select_bits(mite_channel, + dma_selection_counter(counter->counter_index)); + ni_660x_write_register(dev, counter->chip_index, + private(dev)-> + dma_configuration_soft_copies + [counter->chip_index] | + dma_reset_bit(mite_channel), DMAConfigRegister); + mmiowb(); + rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags); +} + +static inline +void ni_660x_unset_dma_channel(struct a4l_device *dev, + unsigned int mite_channel, + struct ni_gpct *counter) +{ + unsigned long flags; + rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags); + private(dev)->dma_configuration_soft_copies[counter->chip_index] &= + ~dma_select_mask(mite_channel); + private(dev)->dma_configuration_soft_copies[counter->chip_index] |= + dma_select_bits(mite_channel, dma_selection_none); + ni_660x_write_register(dev, counter->chip_index, + private(dev)-> + dma_configuration_soft_copies + [counter->chip_index], DMAConfigRegister); + mmiowb(); + rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags); +} + +static int ni_660x_request_mite_channel(struct a4l_device *dev, + struct ni_gpct *counter, + enum io_direction direction) +{ + unsigned long flags; + struct mite_channel *mite_chan; + + rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags); + BUG_ON(counter->mite_chan); + mite_chan = mite_request_channel(private(dev)->mite, + mite_ring(private(dev), counter)); + if (mite_chan == NULL) { + rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags); + a4l_err(dev, + "%s: failed to reserve mite dma channel for counter.\n", + __FUNCTION__); + return -EBUSY; + } + mite_chan->dir = direction; + a4l_ni_tio_set_mite_channel(counter, mite_chan); + ni_660x_set_dma_channel(dev, mite_chan->channel, counter); + rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags); + return 0; +} + +void ni_660x_release_mite_channel(struct a4l_device *dev, + struct ni_gpct *counter) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags); + if (counter->mite_chan) { + struct mite_channel *mite_chan = counter->mite_chan; + + ni_660x_unset_dma_channel(dev, mite_chan->channel, counter); + a4l_ni_tio_set_mite_channel(counter, NULL); + a4l_mite_release_channel(mite_chan); + } + rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags); +} + +static int ni_660x_cmd(struct a4l_subdevice *s, struct a4l_cmd_desc* cmd) +{ + int retval; + + struct ni_gpct *counter = subdev_priv->counter; + + retval = ni_660x_request_mite_channel(s->dev, counter, A4L_INPUT); + if (retval) { + a4l_err(s->dev, + "%s: no dma channel available for use by counter", + __FUNCTION__); + return retval; + } + + a4l_ni_tio_acknowledge_and_confirm (counter, NULL, NULL, NULL, NULL); + retval = a4l_ni_tio_cmd(counter, cmd); + + return retval; +} + +static int ni_660x_cmdtest(struct a4l_subdevice *s, struct a4l_cmd_desc *cmd) +{ + struct ni_gpct *counter = subdev_priv->counter; + return a4l_ni_tio_cmdtest(counter, cmd); +} + +static int ni_660x_cancel(struct a4l_subdevice *s) +{ + struct ni_gpct *counter = subdev_priv->counter; + int retval; + + retval = a4l_ni_tio_cancel(counter); + ni_660x_release_mite_channel(s->dev, counter); + return retval; +} + +static void set_tio_counterswap(struct a4l_device *dev, int chipset) +{ + /* See P. 3.5 of the Register-Level Programming manual. The + CounterSwap bit has to be set on the second chip, otherwise + it will try to use the same pins as the first chip. + */ + + if (chipset) + ni_660x_write_register(dev, + chipset, + CounterSwap, ClockConfigRegister); + else + ni_660x_write_register(dev, + chipset, 0, ClockConfigRegister); +} + +static void ni_660x_handle_gpct_interrupt(struct a4l_device *dev, + struct a4l_subdevice *s) +{ + struct a4l_buffer *buf = s->buf; + + a4l_ni_tio_handle_interrupt(subdev_priv->counter, dev); + if ( test_bit(A4L_BUF_EOA_NR, &buf->flags) && + test_bit(A4L_BUF_ERROR_NR, &buf->flags) && + test_bit(A4L_BUF_EOA_NR, &buf->flags)) + ni_660x_cancel(s); + else + a4l_buf_evt(s, 0); +} + +static int ni_660x_interrupt(unsigned int irq, void *d) +{ + struct a4l_device *dev = d; + unsigned long flags; + + if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) + return -ENOENT; + + /* Lock to avoid race with comedi_poll */ + rtdm_lock_get_irqsave(&private(dev)->interrupt_lock, flags); + smp_mb(); + + while (&dev->subdvsq != dev->subdvsq.next) { + struct list_head *this = dev->subdvsq.next; + struct a4l_subdevice *tmp = list_entry(this, struct a4l_subdevice, list); + ni_660x_handle_gpct_interrupt(dev, tmp); + } + + rtdm_lock_put_irqrestore(&private(dev)->interrupt_lock, flags); + return 0; +} + +static int ni_660x_alloc_mite_rings(struct a4l_device *dev) +{ + unsigned int i; + unsigned int j; + + for (i = 0; i < board(dev)->n_chips; ++i) { + for (j = 0; j < counters_per_chip; ++j) { + private(dev)->mite_rings[i][j] = + mite_alloc_ring(private(dev)->mite); + if (private(dev)->mite_rings[i][j] == NULL) + return -ENOMEM; + } + } + + return 0; +} + +static void ni_660x_free_mite_rings(struct a4l_device *dev) +{ + unsigned int i; + unsigned int j; + + for (i = 0; i < board(dev)->n_chips; ++i) + for (j = 0; j < counters_per_chip; ++j) + mite_free_ring(private(dev)->mite_rings[i][j]); +} + + +static int __init driver_ni_660x_init_module(void) +{ + return a4l_register_drv (&ni_660x_drv); +} + +static void __exit driver_ni_660x_cleanup_module(void) +{ + a4l_unregister_drv (&ni_660x_drv); +} + +module_init(driver_ni_660x_init_module); +module_exit(driver_ni_660x_cleanup_module); + +static int ni_660x_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + struct a4l_subdevice *s; + int ret; + int err; + int bus, slot; + unsigned i; + int nsubdev = 0; + unsigned global_interrupt_config_bits; + struct mite_struct *mitedev; + struct ni_660x_board* boardptr = NULL; + + ret = 0; + bus = slot = 0; + mitedev = NULL; + nsubdev = 0; + + if(arg->opts == NULL || arg->opts_size == 0) + bus = slot = 0; + else { + bus = arg->opts_size >= sizeof(unsigned long) ? + ((unsigned long *)arg->opts)[0] : 0; + slot = arg->opts_size >= sizeof(unsigned long) * 2 ? + ((unsigned long *)arg->opts)[1] : 0; + } + + for (i = 0; ( i < n_ni_660x_boards ) && ( mitedev == NULL ); i++) { + mitedev = a4l_mite_find_device(bus, slot, + ni_660x_boards[i].dev_id); + boardptr = (struct ni_660x_board*) &ni_660x_boards[i]; + } + + + if(mitedev == NULL) { + a4l_info(dev, "mite device not found\n"); + return -ENOENT; + } + + a4l_info(dev, "Board found (name=%s), continue initialization ...", + boardptr->name); + + private(dev)->mite = mitedev; + private(dev)->board_ptr = boardptr; + + rtdm_lock_init(&private(dev)->mite_channel_lock); + rtdm_lock_init(&private(dev)->interrupt_lock); + rtdm_lock_init(&private(dev)->soft_reg_copy_lock); + for (i = 0; i < NUM_PFI_CHANNELS; ++i) { + private(dev)->pfi_output_selects[i] = pfi_output_select_counter; + } + + ret = a4l_mite_setup(private(dev)->mite, 1); + if (ret < 0) { + a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__); + return ret; + } + + ret = ni_660x_alloc_mite_rings(dev); + if (ret < 0) { + a4l_err(dev, "%s: error setting up mite rings\n", __FUNCTION__); + return ret; + } + + /* Setup first subdevice */ + s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL); + if (s == NULL) + return -ENOMEM; + + s->flags = A4L_SUBD_UNUSED; + + err = a4l_add_subd(dev, s); + if (err != nsubdev) { + a4l_info(dev, "cannot add first subdevice, returns %d, expect %d\n", err, i); + return err; + } + + nsubdev++; + + /* Setup second subdevice */ + s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL); + if (s == NULL) { + a4l_info(dev, "cannot allocate second subdevice\n"); + return -ENOMEM; + } + + s->flags = A4L_SUBD_DIO; + s->flags |= A4L_SUBD_CMD; + s->chan_desc = &chandesc_ni660x; + s->rng_desc = &range_digital; + s->insn_bits = ni_660x_dio_insn_bits; + s->insn_config = ni_660x_dio_insn_config; + s->dev = dev; + subdev_priv->io_bits = 0; + ni_660x_write_register(dev, 0, 0, STCDIOControl); + + err = a4l_add_subd(dev, s); + if (err != nsubdev) + return err; + + nsubdev++; + + private(dev)->counter_dev = + a4l_ni_gpct_device_construct(dev, + &ni_gpct_write_register, + &ni_gpct_read_register, + ni_gpct_variant_660x, + ni_660x_num_counters (dev)); + if (private(dev)->counter_dev == NULL) + return -ENOMEM; + + for (i = 0; i < ni_660x_num_counters(dev); ++i) { + /* TODO: check why there are kmalloc here... and in pcimio */ + private(dev)->counter_dev->counters[i] = + kmalloc(sizeof(struct ni_gpct), GFP_KERNEL); + private(dev)->counter_dev->counters[i]->counter_dev = + private(dev)->counter_dev; + rtdm_lock_init(&(private(dev)->counter_dev->counters[i]->lock)); + } + + for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) { + if (i < ni_660x_num_counters(dev)) { + /* Setup other subdevice */ + s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL); + + if (s == NULL) + return -ENOMEM; + + s->flags = A4L_SUBD_COUNTER; + s->chan_desc = rtdm_malloc (sizeof (struct a4l_channels_desc)); + s->chan_desc->length = 3; + s->insn_read = ni_660x_GPCT_rinsn; + s->insn_write = ni_660x_GPCT_winsn; + s->insn_config = ni_660x_GPCT_insn_config; + s->do_cmd = &ni_660x_cmd; + s->do_cmdtest = &ni_660x_cmdtest; + s->cancel = &ni_660x_cancel; + + subdev_priv->counter = private(dev)->counter_dev->counters[i]; + + private(dev)->counter_dev->counters[i]->chip_index = + i / counters_per_chip; + private(dev)->counter_dev->counters[i]->counter_index = + i % counters_per_chip; + } else { + s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL); + if (s == NULL) + return -ENOMEM; + s->flags = A4L_SUBD_UNUSED; + } + + err = a4l_add_subd(dev, s); + + if (err != nsubdev) + return err; + + nsubdev++; + } + + for (i = 0; i < board(dev)->n_chips; ++i) + init_tio_chip(dev, i); + + for (i = 0; i < ni_660x_num_counters(dev); ++i) + a4l_ni_tio_init_counter(private(dev)->counter_dev->counters[i]); + + for (i = 0; i < NUM_PFI_CHANNELS; ++i) { + if (i < min_counter_pfi_chan) + ni_660x_set_pfi_routing(dev, i, pfi_output_select_do); + else + ni_660x_set_pfi_routing(dev, i, + pfi_output_select_counter); + ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z); + } + + + /* To be safe, set counterswap bits on tio chips after all the + counter outputs have been set to high impedance mode */ + + for (i = 0; i < board(dev)->n_chips; ++i) + set_tio_counterswap(dev, i); + + ret = a4l_request_irq(dev, + mite_irq(private(dev)->mite), + ni_660x_interrupt, RTDM_IRQTYPE_SHARED, dev); + + if (ret < 0) { + a4l_err(dev, "%s: IRQ not available\n", __FUNCTION__); + return ret; + } + + global_interrupt_config_bits = Global_Int_Enable_Bit; + if (board(dev)->n_chips > 1) + global_interrupt_config_bits |= Cascade_Int_Enable_Bit; + + ni_660x_write_register(dev, 0, global_interrupt_config_bits, + GlobalInterruptConfigRegister); + + a4l_info(dev, "attach succeed, ready to be used\n"); + + return 0; +} + +static int ni_660x_detach(struct a4l_device *dev) +{ + int i; + + a4l_info(dev, "begin to detach the driver ..."); + + /* Free irq */ + if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED) + a4l_free_irq(dev,a4l_get_irq(dev)); + + if (dev->priv) { + + if (private(dev)->counter_dev) { + + for (i = 0; i < ni_660x_num_counters(dev); ++i) + if ((private(dev)->counter_dev->counters[i]) != NULL) + kfree (private(dev)->counter_dev->counters[i]); + + a4l_ni_gpct_device_destroy(private(dev)->counter_dev); + } + + if (private(dev)->mite) { + ni_660x_free_mite_rings(dev); + a4l_mite_unsetup(private(dev)->mite); + } + } + + a4l_info(dev, "driver detached !\n"); + + return 0; +} + +static int ni_660x_GPCT_rinsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn) +{ + return a4l_ni_tio_rinsn(subdev_priv->counter, insn); +} + +static void init_tio_chip(struct a4l_device *dev, int chipset) +{ + unsigned int i; + + /* Init dma configuration register */ + private(dev)->dma_configuration_soft_copies[chipset] = 0; + for (i = 0; i < MAX_DMA_CHANNEL; ++i) { + private(dev)->dma_configuration_soft_copies[chipset] |= + dma_select_bits(i, dma_selection_none) & dma_select_mask(i); + } + + ni_660x_write_register(dev, chipset, + private(dev)-> + dma_configuration_soft_copies[chipset], + DMAConfigRegister); + + for (i = 0; i < NUM_PFI_CHANNELS; ++i) + ni_660x_write_register(dev, chipset, 0, IOConfigReg(i)); +} + +static int ni_660x_GPCT_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn) +{ + return a4l_ni_tio_insn_config (subdev_priv->counter, insn); +} + +static int ni_660x_GPCT_winsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn) +{ + return a4l_ni_tio_winsn(subdev_priv->counter, insn); +} + +static int ni_660x_dio_insn_bits(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn) +{ + unsigned int* data = (unsigned int*) insn->data; + unsigned int base_bitfield_channel = CR_CHAN(insn->chan_desc); + + /* Check if we have to write some bits */ + if (data[0]) { + subdev_priv->state &= ~(data[0] << base_bitfield_channel); + subdev_priv->state |= (data[0] & data[1]) << base_bitfield_channel; + /* Write out the new digital output lines */ + ni_660x_write_register(s->dev, 0, subdev_priv->state, DIO32Output); + } + + /* On return, data[1] contains the value of the digital input + and output lines. */ + data[1] = ni_660x_read_register(s->dev, 0,DIO32Input) >> + base_bitfield_channel; + + return 0; +} + +static void ni_660x_select_pfi_output(struct a4l_device *dev, + unsigned pfi_channel, + unsigned output_select) +{ + static const unsigned counter_4_7_first_pfi = 8; + static const unsigned counter_4_7_last_pfi = 23; + unsigned active_chipset = 0; + unsigned idle_chipset = 0; + unsigned active_bits; + unsigned idle_bits; + + if (board(dev)->n_chips > 1) { + if (output_select == pfi_output_select_counter && + pfi_channel >= counter_4_7_first_pfi && + pfi_channel <= counter_4_7_last_pfi) { + active_chipset = 1; + idle_chipset = 0; + } else { + active_chipset = 0; + idle_chipset = 1; + } + } + + if (idle_chipset != active_chipset) { + + idle_bits =ni_660x_read_register(dev, idle_chipset, + IOConfigReg(pfi_channel)); + idle_bits &= ~pfi_output_select_mask(pfi_channel); + idle_bits |= + pfi_output_select_bits(pfi_channel, + pfi_output_select_high_Z); + ni_660x_write_register(dev, idle_chipset, idle_bits, + IOConfigReg(pfi_channel)); + } + + active_bits = + ni_660x_read_register(dev, active_chipset, + IOConfigReg(pfi_channel)); + active_bits &= ~pfi_output_select_mask(pfi_channel); + active_bits |= pfi_output_select_bits(pfi_channel, output_select); + ni_660x_write_register(dev, active_chipset, active_bits, + IOConfigReg(pfi_channel)); +} + +static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan, + unsigned source) +{ + BUG_ON(chan >= NUM_PFI_CHANNELS); + + if (source > num_pfi_output_selects) + return -EINVAL; + if (source == pfi_output_select_high_Z) + return -EINVAL; + if (chan < min_counter_pfi_chan) { + if (source == pfi_output_select_counter) + return -EINVAL; + } else if (chan > max_dio_pfi_chan) { + if (source == pfi_output_select_do) + return -EINVAL; + } + BUG_ON(chan >= NUM_PFI_CHANNELS); + + private(dev)->pfi_output_selects[chan] = source; + if (private(dev)->pfi_direction_bits & (((uint64_t) 1) << chan)) + ni_660x_select_pfi_output(dev, chan, + private(dev)-> + pfi_output_selects[chan]); + return 0; +} + +static unsigned ni_660x_get_pfi_routing(struct a4l_device *dev, + unsigned chan) +{ + BUG_ON(chan >= NUM_PFI_CHANNELS); + return private(dev)->pfi_output_selects[chan]; +} + +static void ni660x_config_filter(struct a4l_device *dev, + unsigned pfi_channel, + int filter) +{ + unsigned int bits; + + bits = ni_660x_read_register(dev, 0, IOConfigReg(pfi_channel)); + bits &= ~pfi_input_select_mask(pfi_channel); + bits |= pfi_input_select_bits(pfi_channel, filter); + ni_660x_write_register(dev, 0, bits, IOConfigReg(pfi_channel)); +} + +static int ni_660x_dio_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn) +{ + unsigned int* data = insn->data; + int chan = CR_CHAN(insn->chan_desc); + struct a4l_device* dev = s->dev; + + if (data == NULL) + return -EINVAL; + + /* The input or output configuration of each digital line is + * configured by a special insn_config instruction. chanspec + * contains the channel to be changed, and data[0] contains the + * value COMEDI_INPUT or COMEDI_OUTPUT. */ + + switch (data[0]) { + case A4L_INSN_CONFIG_DIO_OUTPUT: + private(dev)->pfi_direction_bits |= ((uint64_t) 1) << chan; + ni_660x_select_pfi_output(dev, chan, + private(dev)-> + pfi_output_selects[chan]); + break; + case A4L_INSN_CONFIG_DIO_INPUT: + private(dev)->pfi_direction_bits &= ~(((uint64_t) 1) << chan); + ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z); + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = + (private(dev)->pfi_direction_bits & + (((uint64_t) 1) << chan)) ? A4L_OUTPUT : A4L_INPUT; + return 0; + case A4L_INSN_CONFIG_SET_ROUTING: + return ni_660x_set_pfi_routing(dev, chan, data[1]); + break; + case A4L_INSN_CONFIG_GET_ROUTING: + data[1] = ni_660x_get_pfi_routing(dev, chan); + break; + case A4L_INSN_CONFIG_FILTER: + ni660x_config_filter(dev, chan, data[1]); + break; + default: + return -EINVAL; + break; + }; + + return 0; +} + + +MODULE_DESCRIPTION("Analogy driver for NI660x series cards"); +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c new file mode 100644 index 0000000..35749be --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c @@ -0,0 +1,443 @@ +/* + comedi/drivers/ni_670x.c + Hardware driver for NI 670x devices + + COMEDI - Linux Control and Measurement Device Interface + Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +*/ +/* +Driver: ni_670x +Description: National Instruments 670x +Author: Bart Joris <bjoris@advalvas.be> +Updated: Wed, 11 Dec 2002 18:25:35 -0800 +Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704 +Status: unknown + +Commands are not supported. +*/ + +/* + Bart Joris <bjoris@advalvas.be> Last updated on 20/08/2001 + + Manuals: + + 322110a.pdf PCI/PXI-6704 User Manual + 322110b.pdf PCI/PXI-6703/6704 User Manual +*/ + +/* + * Integration with Xenomai/Analogy layer based on the + * comedi driver. Adaptation made by + * Julien Delange <julien.delange@esa.int> + */ + +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <rtdm/analogy/device.h> + +#include "../intel/8255.h" +#include "ni_mio.h" +#include "mite.h" + +#define PCIMIO_IRQ_POLARITY 1 + +#define AO_VALUE_OFFSET 0x00 +#define AO_CHAN_OFFSET 0x0c +#define AO_STATUS_OFFSET 0x10 +#define AO_CONTROL_OFFSET 0x10 +#define DIO_PORT0_DIR_OFFSET 0x20 +#define DIO_PORT0_DATA_OFFSET 0x24 +#define DIO_PORT1_DIR_OFFSET 0x28 +#define DIO_PORT1_DATA_OFFSET 0x2c +#define MISC_STATUS_OFFSET 0x14 +#define MISC_CONTROL_OFFSET 0x14 + +/* Board description*/ + +struct ni_670x_board { + unsigned short device_id; + const char *name; + unsigned short ao_chans; + unsigned short ao_bits; +}; + +#define thisboard ((struct ni_670x_board *)dev->board_ptr) + +struct ni_670x_private { + struct mite_struct *mite; + int boardtype; + int dio; + unsigned int ao_readback[32]; + + /* + * Added when porting to xenomai + */ + int irq_polarity; + int irq_pin; + int irq; + struct ni_670x_board *board_ptr; + /* + * END OF ADDED when porting to xenomai + */ +}; + +struct ni_670x_subd_priv { + int io_bits; + unsigned int state; + uint16_t readback[2]; + uint16_t config; + void* counter; +}; + +static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn); +static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn); +static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn); +static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn); + +static struct a4l_channels_desc ni_670x_desc_dio = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 8, + .chans = { + {A4L_CHAN_AREF_GROUND, 1}, + }, +}; + +static struct a4l_channels_desc ni_670x_desc_ao = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 0, /* initialized later according to the board found */ + .chans = { + {A4L_CHAN_AREF_GROUND, 16}, + }, +}; + + +static struct a4l_rngtab range_0_20mA = { 1, {RANGE_mA(0, 20)} }; +static struct a4l_rngtab rng_bipolar10 = { 1, {RANGE_V(-10, 10) }}; + +struct a4l_rngtab *range_table_list[32] = { + &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, + &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, + &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, + &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10, + &range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA, + &range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA, + &range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA, + &range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA}; + +static A4L_RNGDESC(32) ni670x_ao_desc; + +static void setup_subd_ao(struct a4l_subdevice *subd) +{ + int i; + int nchans; + + nchans = ((struct ni_670x_private*)(subd->dev->priv))->board_ptr->ao_chans; + subd->flags = A4L_SUBD_AO; + subd->chan_desc = &ni_670x_desc_ao; + subd->chan_desc->length = nchans; + if (nchans == 32) { + + subd->rng_desc = (struct a4l_rngdesc*) &ni670x_ao_desc; + subd->rng_desc->mode = A4L_RNG_PERCHAN_RNGDESC; + for (i = 0 ; i < 16 ; i++) { + subd->rng_desc->rngtabs[i] =&rng_bipolar10; + subd->rng_desc->rngtabs[16+i] =&range_0_20mA; + } + } else + subd->rng_desc = &a4l_range_bipolar10; + + subd->insn_write = &ni_670x_ao_winsn; + subd->insn_read = &ni_670x_ao_rinsn; +} + +static void setup_subd_dio(struct a4l_subdevice *s) +{ + /* Digital i/o subdevice */ + s->flags = A4L_SUBD_DIO; + s->chan_desc = &ni_670x_desc_dio; + s->rng_desc = &range_digital; + s->insn_bits = ni_670x_dio_insn_bits; + s->insn_config = ni_670x_dio_insn_config; +} + +struct setup_subd { + void (*setup_func) (struct a4l_subdevice *); + int sizeof_priv; +}; + +static struct setup_subd setup_subds[2] = { + { + .setup_func = setup_subd_ao, + .sizeof_priv = sizeof(struct ni_670x_subd_priv), + }, + { + .setup_func = setup_subd_dio, + .sizeof_priv = sizeof(struct ni_670x_subd_priv), + }, +}; + +static const struct ni_670x_board ni_670x_boards[] = { + { + .device_id = 0x2c90, + .name = "PCI-6703", + .ao_chans = 16, + .ao_bits = 16, + }, + { + .device_id = 0x1920, + .name = "PXI-6704", + .ao_chans = 32, + .ao_bits = 16, + }, + { + .device_id = 0x1290, + .name = "PCI-6704", + .ao_chans = 32, + .ao_bits = 16, + }, +}; + +#define n_ni_670x_boards ((sizeof(ni_670x_boards)/sizeof(ni_670x_boards[0]))) + +static const struct pci_device_id ni_670x_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c90)}, + {PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1920)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, ni_670x_pci_table); + +#define devpriv ((struct ni_670x_private *)dev->priv) + +static inline struct ni_670x_private *private(struct a4l_device *dev) +{ + return (struct ni_670x_private*) dev->priv; +} + + +static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg); +static int ni_670x_detach(struct a4l_device *dev); + +static struct a4l_driver ni_670x_drv = { + .owner = THIS_MODULE, + .board_name = "analogy_ni_670x", + .driver_name = "ni_670x", + .attach = ni_670x_attach, + .detach = ni_670x_detach, + .privdata_size = sizeof(struct ni_670x_private), +}; + +static int __init driver_ni_670x_init_module(void) +{ + return a4l_register_drv (&ni_670x_drv); +} + +static void __exit driver_ni_670x_cleanup_module(void) +{ + a4l_unregister_drv (&ni_670x_drv); +} + +module_init(driver_ni_670x_init_module); +module_exit(driver_ni_670x_cleanup_module); + +static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + int ret, bus, slot, i, irq; + struct mite_struct *mite; + struct ni_670x_board* board = NULL; + int err; + + if(arg->opts == NULL || arg->opts_size == 0) + bus = slot = 0; + else { + bus = arg->opts_size >= sizeof(unsigned long) ? + ((unsigned long *)arg->opts)[0] : 0; + slot = arg->opts_size >= sizeof(unsigned long) * 2 ? + ((unsigned long *)arg->opts)[1] : 0; + } + + a4l_info(dev, "ni670x attach procedure started(bus=%d/slot=%d)...\n", + bus, slot); + + mite = NULL; + + for(i = 0; i < n_ni_670x_boards && mite == NULL; i++) { + mite = a4l_mite_find_device(bus, + slot, ni_670x_boards[i].device_id); + board = (struct ni_670x_board*) &ni_670x_boards[i]; + } + + if(mite == NULL) { + a4l_err(dev, "%s: cannot find the MITE device\n", __FUNCTION__); + return -ENOENT; + } + + a4l_info(dev, "Found device %d %s\n", i, ni_670x_boards[i].name); + + devpriv->irq_polarity = PCIMIO_IRQ_POLARITY; + devpriv->irq_pin = 0; + + devpriv->mite = mite; + devpriv->board_ptr = board; + + ret = a4l_mite_setup(devpriv->mite, 0); + if (ret < 0) { + a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__); + return ret; + } + + irq = mite_irq(devpriv->mite); + devpriv->irq = irq; + + a4l_info(dev, "found %s board\n", board->name); + + for (i = 0; i < 2; i++) { + struct a4l_subdevice *subd = + a4l_alloc_subd(setup_subds[i].sizeof_priv, NULL); + + if (subd == NULL) { + a4l_err(dev, + "%s: cannot allocate subdevice\n", + __FUNCTION__); + return -ENOMEM; + } + + err = a4l_add_subd(dev, subd); + if (err != i) { + a4l_err(dev, + "%s: cannot add subdevice\n", + __FUNCTION__); + return err; + } + + setup_subds[i].setup_func (subd); + } + + /* Config of misc registers */ + writel(0x10, devpriv->mite->daq_io_addr + MISC_CONTROL_OFFSET); + /* Config of ao registers */ + writel(0x00, devpriv->mite->daq_io_addr + AO_CONTROL_OFFSET); + + a4l_info(dev, "ni670x attached\n"); + + return 0; +} + +static int ni_670x_detach(struct a4l_device *dev) +{ + a4l_info(dev, "ni670x detach procedure started...\n"); + + if(dev->priv != NULL && devpriv->mite != NULL) + a4l_mite_unsetup(devpriv->mite); + + a4l_info(dev, "ni670x detach procedure succeeded...\n"); + + return 0; +} + + +static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + unsigned int *data = (unsigned int *)insn->data; + int chan = CR_CHAN(insn->chan_desc); + struct ni_670x_subd_priv *subdpriv = + (struct ni_670x_subd_priv *)subd->priv; + + switch (data[0]) { + case A4L_INSN_CONFIG_DIO_OUTPUT: + subdpriv->io_bits |= 1 << chan; + break; + case A4L_INSN_CONFIG_DIO_INPUT: + subdpriv->io_bits &= ~(1 << chan); + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = (subdpriv->io_bits & (1 << chan)) ? + A4L_OUTPUT : A4L_INPUT; + return 0; + break; + default: + return -EINVAL; + break; + } + + writel(subdpriv->io_bits, + devpriv->mite->daq_io_addr + DIO_PORT0_DIR_OFFSET); + + return 0; +} + +static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + int i; + unsigned int tmp; + unsigned int* dtmp; + int chan; + dtmp = (unsigned int*)insn->data; + chan = CR_CHAN(insn->chan_desc); + + /* Channel number mapping : + + NI 6703/ NI 6704 | NI 6704 Only + ---------------------------------------------------- + vch(0) : 0 | ich(16) : 1 + vch(1) : 2 | ich(17) : 3 + . : . | . . + . : . | . . + . : . | . . + vch(15) : 30 | ich(31) : 31 */ + + for (i = 0; i < insn->data_size / sizeof(unsigned int); i++) { + + tmp = dtmp[i]; + + /* First write in channel register which channel to use */ + writel(((chan & 15) << 1) | ((chan & 16) >> 4), + private (subd->dev)->mite->daq_io_addr + AO_CHAN_OFFSET); + + /* write channel value */ + writel(dtmp[i], + private(subd->dev)->mite->daq_io_addr + AO_VALUE_OFFSET); + private(subd->dev)->ao_readback[chan] = tmp; + } + + return 0; +} + +static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + int i; + unsigned int* dtmp; + int chan = CR_CHAN(insn->chan_desc); + + dtmp = (unsigned int*)insn->data; + + for (i = 0; i < insn->data_size / sizeof(unsigned int); i++) + dtmp[i] = private(subd->dev)->ao_readback[chan]; + + return 0; +} + + +static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + return -ENOSYS; +} + +MODULE_DESCRIPTION("Analogy driver for NI670x series cards"); +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h new file mode 100644 index 0000000..7fee167 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h @@ -0,0 +1,122 @@ +/* + * Hardware driver for NI Mite PCI interface chip + * Copyright (C) 1999 David A. Schleef <ds@schleef.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef __ANALOGY_NI_MIO_H__ +#define __ANALOGY_NI_MIO_H__ + +/* Debug stuff */ + +#ifdef CONFIG_DEBUG_MIO +#define MDPRINTK(fmt, args...) rtdm_printk(format, ##args) +#else /* !CONFIG_DEBUG_MIO */ +#define MDPRINTK(fmt, args...) +#endif /* CONFIG_DEBUG_MIO */ + +/* Subdevice related defines */ + +#define AIMODE_NONE 0 +#define AIMODE_HALF_FULL 1 +#define AIMODE_SCAN 2 +#define AIMODE_SAMPLE 3 + +#define NI_AI_SUBDEV 0 +#define NI_AO_SUBDEV 1 +#define NI_DIO_SUBDEV 2 +#define NI_8255_DIO_SUBDEV 3 +#define NI_UNUSED_SUBDEV 4 +#define NI_CALIBRATION_SUBDEV 5 +#define NI_EEPROM_SUBDEV 6 +#define NI_PFI_DIO_SUBDEV 7 +#define NI_CS5529_CALIBRATION_SUBDEV 8 +#define NI_SERIAL_SUBDEV 9 +#define NI_RTSI_SUBDEV 10 +#define NI_GPCT0_SUBDEV 11 +#define NI_GPCT1_SUBDEV 12 +#define NI_FREQ_OUT_SUBDEV 13 +#define NI_NUM_SUBDEVICES 14 + +#define NI_GPCT_SUBDEV(x) ((x == 1) ? NI_GPCT1_SUBDEV : NI_GPCT0_SUBDEV) + +#define TIMEBASE_1_NS 50 +#define TIMEBASE_2_NS 10000 + +#define SERIAL_DISABLED 0 +#define SERIAL_600NS 600 +#define SERIAL_1_2US 1200 +#define SERIAL_10US 10000 + +/* PFI digital filtering options for ni m-series for use with + INSN_CONFIG_FILTER. */ +#define NI_PFI_FILTER_OFF 0x0 +#define NI_PFI_FILTER_125ns 0x1 +#define NI_PFI_FILTER_6425ns 0x2 +#define NI_PFI_FILTER_2550us 0x3 + +/* Signals which can be routed to an NI PFI pin on an m-series board + with INSN_CONFIG_SET_ROUTING. These numbers are also returned by + INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their + routing cannot be changed. The numbers assigned are not arbitrary, + they correspond to the bits required to program the board. */ +#define NI_PFI_OUTPUT_PFI_DEFAULT 0 +#define NI_PFI_OUTPUT_AI_START1 1 +#define NI_PFI_OUTPUT_AI_START2 2 +#define NI_PFI_OUTPUT_AI_CONVERT 3 +#define NI_PFI_OUTPUT_G_SRC1 4 +#define NI_PFI_OUTPUT_G_GATE1 5 +#define NI_PFI_OUTPUT_AO_UPDATE_N 6 +#define NI_PFI_OUTPUT_AO_START1 7 +#define NI_PFI_OUTPUT_AI_START_PULSE 8 +#define NI_PFI_OUTPUT_G_SRC0 9 +#define NI_PFI_OUTPUT_G_GATE0 10 +#define NI_PFI_OUTPUT_EXT_STROBE 11 +#define NI_PFI_OUTPUT_AI_EXT_MUX_CLK 12 +#define NI_PFI_OUTPUT_GOUT0 13 +#define NI_PFI_OUTPUT_GOUT1 14 +#define NI_PFI_OUTPUT_FREQ_OUT 15 +#define NI_PFI_OUTPUT_PFI_DO 16 +#define NI_PFI_OUTPUT_I_ATRIG 17 +#define NI_PFI_OUTPUT_RTSI0 18 +#define NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN 26 +#define NI_PFI_OUTPUT_SCXI_TRIG1 27 +#define NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI 28 +#define NI_PFI_OUTPUT_CDI_SAMPLE 29 +#define NI_PFI_OUTPUT_CDO_UPDATE 30 + +static inline unsigned int NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel) { + return NI_PFI_OUTPUT_RTSI0 + rtsi_channel; +} + +/* Ranges declarations */ + +extern struct a4l_rngdesc a4l_range_ni_E_ai; +extern struct a4l_rngdesc a4l_range_ni_E_ai_limited; +extern struct a4l_rngdesc a4l_range_ni_E_ai_limited14; +extern struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4; +extern struct a4l_rngdesc a4l_range_ni_E_ai_611x; +extern struct a4l_rngdesc range_ni_E_ai_622x; +extern struct a4l_rngdesc range_ni_E_ai_628x; +extern struct a4l_rngdesc a4l_range_ni_S_ai_6143; +extern struct a4l_rngdesc a4l_range_ni_E_ao_ext; + +/* Misc functions declarations */ + +int a4l_ni_E_interrupt(unsigned int irq, void *d); +int a4l_ni_E_init(struct a4l_device *dev); + + +#endif /* !__ANALOGY_NI_MIO_H__ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h new file mode 100644 index 0000000..d600a32 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h @@ -0,0 +1,1417 @@ +/* + * Register descriptions for NI DAQ-STC chip + * + * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this code; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * References: + * 340934b.pdf DAQ-STC reference manual + * + */ +#ifndef __ANALOGY_NI_STC_H__ +#define __ANALOGY_NI_STC_H__ + +#include "ni_tio.h" + +#define _bit15 0x8000 +#define _bit14 0x4000 +#define _bit13 0x2000 +#define _bit12 0x1000 +#define _bit11 0x0800 +#define _bit10 0x0400 +#define _bit9 0x0200 +#define _bit8 0x0100 +#define _bit7 0x0080 +#define _bit6 0x0040 +#define _bit5 0x0020 +#define _bit4 0x0010 +#define _bit3 0x0008 +#define _bit2 0x0004 +#define _bit1 0x0002 +#define _bit0 0x0001 + +#define NUM_PFI_OUTPUT_SELECT_REGS 6 + +/* Registers in the National Instruments DAQ-STC chip */ + +#define Interrupt_A_Ack_Register 2 +#define G0_Gate_Interrupt_Ack _bit15 +#define G0_TC_Interrupt_Ack _bit14 +#define AI_Error_Interrupt_Ack _bit13 +#define AI_STOP_Interrupt_Ack _bit12 +#define AI_START_Interrupt_Ack _bit11 +#define AI_START2_Interrupt_Ack _bit10 +#define AI_START1_Interrupt_Ack _bit9 +#define AI_SC_TC_Interrupt_Ack _bit8 +#define AI_SC_TC_Error_Confirm _bit7 +#define G0_TC_Error_Confirm _bit6 +#define G0_Gate_Error_Confirm _bit5 + +#define AI_Status_1_Register 2 +#define Interrupt_A_St _bit15 +#define AI_FIFO_Full_St _bit14 +#define AI_FIFO_Half_Full_St _bit13 +#define AI_FIFO_Empty_St _bit12 +#define AI_Overrun_St _bit11 +#define AI_Overflow_St _bit10 +#define AI_SC_TC_Error_St _bit9 +#define AI_START2_St _bit8 +#define AI_START1_St _bit7 +#define AI_SC_TC_St _bit6 +#define AI_START_St _bit5 +#define AI_STOP_St _bit4 +#define G0_TC_St _bit3 +#define G0_Gate_Interrupt_St _bit2 +#define AI_FIFO_Request_St _bit1 +#define Pass_Thru_0_Interrupt_St _bit0 + +#define AI_Status_2_Register 5 + +#define Interrupt_B_Ack_Register 3 +#define G1_Gate_Error_Confirm _bit1 +#define G1_TC_Error_Confirm _bit2 +#define AO_BC_TC_Trigger_Error_Confirm _bit3 +#define AO_BC_TC_Error_Confirm _bit4 +#define AO_UI2_TC_Error_Confrim _bit5 +#define AO_UI2_TC_Interrupt_Ack _bit6 +#define AO_UC_TC_Interrupt_Ack _bit7 +#define AO_BC_TC_Interrupt_Ack _bit8 +#define AO_START1_Interrupt_Ack _bit9 +#define AO_UPDATE_Interrupt_Ack _bit10 +#define AO_START_Interrupt_Ack _bit11 +#define AO_STOP_Interrupt_Ack _bit12 +#define AO_Error_Interrupt_Ack _bit13 +#define G1_TC_Interrupt_Ack _bit14 +#define G1_Gate_Interrupt_Ack _bit15 + +#define AO_Status_1_Register 3 +#define Interrupt_B_St _bit15 +#define AO_FIFO_Full_St _bit14 +#define AO_FIFO_Half_Full_St _bit13 +#define AO_FIFO_Empty_St _bit12 +#define AO_BC_TC_Error_St _bit11 +#define AO_START_St _bit10 +#define AO_Overrun_St _bit9 +#define AO_START1_St _bit8 +#define AO_BC_TC_St _bit7 +#define AO_UC_TC_St _bit6 +#define AO_UPDATE_St _bit5 +#define AO_UI2_TC_St _bit4 +#define G1_TC_St _bit3 +#define G1_Gate_Interrupt_St _bit2 +#define AO_FIFO_Request_St _bit1 +#define Pass_Thru_1_Interrupt_St _bit0 + + +#define AI_Command_2_Register 4 +#define AI_End_On_SC_TC _bit15 +#define AI_End_On_End_Of_Scan _bit14 +#define AI_START1_Disable _bit11 +#define AI_SC_Save_Trace _bit10 +#define AI_SI_Switch_Load_On_SC_TC _bit9 +#define AI_SI_Switch_Load_On_STOP _bit8 +#define AI_SI_Switch_Load_On_TC _bit7 +#define AI_SC_Switch_Load_On_TC _bit4 +#define AI_STOP_Pulse _bit3 +#define AI_START_Pulse _bit2 +#define AI_START2_Pulse _bit1 +#define AI_START1_Pulse _bit0 + +#define AO_Command_2_Register 5 +#define AO_End_On_BC_TC(x) (((x) & 0x3) << 14) +#define AO_Start_Stop_Gate_Enable _bit13 +#define AO_UC_Save_Trace _bit12 +#define AO_BC_Gate_Enable _bit11 +#define AO_BC_Save_Trace _bit10 +#define AO_UI_Switch_Load_On_BC_TC _bit9 +#define AO_UI_Switch_Load_On_Stop _bit8 +#define AO_UI_Switch_Load_On_TC _bit7 +#define AO_UC_Switch_Load_On_BC_TC _bit6 +#define AO_UC_Switch_Load_On_TC _bit5 +#define AO_BC_Switch_Load_On_TC _bit4 +#define AO_Mute_B _bit3 +#define AO_Mute_A _bit2 +#define AO_UPDATE2_Pulse _bit1 +#define AO_START1_Pulse _bit0 + +#define AO_Status_2_Register 6 + +#define DIO_Parallel_Input_Register 7 + +#define AI_Command_1_Register 8 +#define AI_Analog_Trigger_Reset _bit14 +#define AI_Disarm _bit13 +#define AI_SI2_Arm _bit12 +#define AI_SI2_Load _bit11 +#define AI_SI_Arm _bit10 +#define AI_SI_Load _bit9 +#define AI_DIV_Arm _bit8 +#define AI_DIV_Load _bit7 +#define AI_SC_Arm _bit6 +#define AI_SC_Load _bit5 +#define AI_SCAN_IN_PROG_Pulse _bit4 +#define AI_EXTMUX_CLK_Pulse _bit3 +#define AI_LOCALMUX_CLK_Pulse _bit2 +#define AI_SC_TC_Pulse _bit1 +#define AI_CONVERT_Pulse _bit0 + +#define AO_Command_1_Register 9 +#define AO_Analog_Trigger_Reset _bit15 +#define AO_START_Pulse _bit14 +#define AO_Disarm _bit13 +#define AO_UI2_Arm_Disarm _bit12 +#define AO_UI2_Load _bit11 +#define AO_UI_Arm _bit10 +#define AO_UI_Load _bit9 +#define AO_UC_Arm _bit8 +#define AO_UC_Load _bit7 +#define AO_BC_Arm _bit6 +#define AO_BC_Load _bit5 +#define AO_DAC1_Update_Mode _bit4 +#define AO_LDAC1_Source_Select _bit3 +#define AO_DAC0_Update_Mode _bit2 +#define AO_LDAC0_Source_Select _bit1 +#define AO_UPDATE_Pulse _bit0 + + +#define DIO_Output_Register 10 +#define DIO_Parallel_Data_Out(a) ((a)&0xff) +#define DIO_Parallel_Data_Mask 0xff +#define DIO_SDOUT _bit0 +#define DIO_SDIN _bit4 +#define DIO_Serial_Data_Out(a) (((a)&0xff)<<8) +#define DIO_Serial_Data_Mask 0xff00 + +#define DIO_Control_Register 11 +#define DIO_Software_Serial_Control _bit11 +#define DIO_HW_Serial_Timebase _bit10 +#define DIO_HW_Serial_Enable _bit9 +#define DIO_HW_Serial_Start _bit8 +#define DIO_Pins_Dir(a) ((a)&0xff) +#define DIO_Pins_Dir_Mask 0xff + +#define AI_Mode_1_Register 12 +#define AI_CONVERT_Source_Select(a) (((a) & 0x1f) << 11) +#define AI_SI_Source_select(a) (((a) & 0x1f) << 6) +#define AI_CONVERT_Source_Polarity _bit5 +#define AI_SI_Source_Polarity _bit4 +#define AI_Start_Stop _bit3 +#define AI_Mode_1_Reserved _bit2 +#define AI_Continuous _bit1 +#define AI_Trigger_Once _bit0 + +#define AI_Mode_2_Register 13 +#define AI_SC_Gate_Enable _bit15 +#define AI_Start_Stop_Gate_Enable _bit14 +#define AI_Pre_Trigger _bit13 +#define AI_External_MUX_Present _bit12 +#define AI_SI2_Initial_Load_Source _bit9 +#define AI_SI2_Reload_Mode _bit8 +#define AI_SI_Initial_Load_Source _bit7 +#define AI_SI_Reload_Mode(a) (((a) & 0x7)<<4) +#define AI_SI_Write_Switch _bit3 +#define AI_SC_Initial_Load_Source _bit2 +#define AI_SC_Reload_Mode _bit1 +#define AI_SC_Write_Switch _bit0 + +#define AI_SI_Load_A_Registers 14 +#define AI_SI_Load_B_Registers 16 +#define AI_SC_Load_A_Registers 18 +#define AI_SC_Load_B_Registers 20 +#define AI_SI_Save_Registers 64 +#define AI_SC_Save_Registers 66 + +#define AI_SI2_Load_A_Register 23 +#define AI_SI2_Load_B_Register 25 + +#define Joint_Status_1_Register 27 +#define DIO_Serial_IO_In_Progress_St _bit12 + +#define DIO_Serial_Input_Register 28 +#define Joint_Status_2_Register 29 +#define AO_TMRDACWRs_In_Progress_St _bit5 + +#define AO_Mode_1_Register 38 +#define AO_UPDATE_Source_Select(x) (((x)&0x1f)<<11) +#define AO_UI_Source_Select(x) (((x)&0x1f)<<6) +#define AO_Multiple_Channels _bit5 +#define AO_UPDATE_Source_Polarity _bit4 +#define AO_UI_Source_Polarity _bit3 +#define AO_UC_Switch_Load_Every_TC _bit2 +#define AO_Continuous _bit1 +#define AO_Trigger_Once _bit0 + +#define AO_Mode_2_Register 39 +#define AO_FIFO_Mode_Mask ( 0x3 << 14 ) +#define AO_FIFO_Mode_HF_to_F (3<<14) +#define AO_FIFO_Mode_F (2<<14) +#define AO_FIFO_Mode_HF (1<<14) +#define AO_FIFO_Mode_E (0<<14) +#define AO_FIFO_Retransmit_Enable _bit13 +#define AO_START1_Disable _bit12 +#define AO_UC_Initial_Load_Source _bit11 +#define AO_UC_Write_Switch _bit10 +#define AO_UI2_Initial_Load_Source _bit9 +#define AO_UI2_Reload_Mode _bit8 +#define AO_UI_Initial_Load_Source _bit7 +#define AO_UI_Reload_Mode(x) (((x) & 0x7) << 4) +#define AO_UI_Write_Switch _bit3 +#define AO_BC_Initial_Load_Source _bit2 +#define AO_BC_Reload_Mode _bit1 +#define AO_BC_Write_Switch _bit0 + +#define AO_UI_Load_A_Register 40 +#define AO_UI_Load_A_Register_High 40 +#define AO_UI_Load_A_Register_Low 41 +#define AO_UI_Load_B_Register 42 +#define AO_UI_Save_Registers 16 +#define AO_BC_Load_A_Register 44 +#define AO_BC_Load_A_Register_High 44 +#define AO_BC_Load_A_Register_Low 45 +#define AO_BC_Load_B_Register 46 +#define AO_BC_Load_B_Register_High 46 +#define AO_BC_Load_B_Register_Low 47 +#define AO_BC_Save_Registers 18 +#define AO_UC_Load_A_Register 48 +#define AO_UC_Load_A_Register_High 48 +#define AO_UC_Load_A_Register_Low 49 +#define AO_UC_Load_B_Register 50 +#define AO_UC_Save_Registers 20 + +#define Clock_and_FOUT_Register 56 +#define FOUT_Enable _bit15 +#define FOUT_Timebase_Select _bit14 +#define DIO_Serial_Out_Divide_By_2 _bit13 +#define Slow_Internal_Time_Divide_By_2 _bit12 +#define Slow_Internal_Timebase _bit11 +#define G_Source_Divide_By_2 _bit10 +#define Clock_To_Board_Divide_By_2 _bit9 +#define Clock_To_Board _bit8 +#define AI_Output_Divide_By_2 _bit7 +#define AI_Source_Divide_By_2 _bit6 +#define AO_Output_Divide_By_2 _bit5 +#define AO_Source_Divide_By_2 _bit4 +#define FOUT_Divider_mask 0xf +#define FOUT_Divider(x) (((x) & 0xf) << 0) + +#define IO_Bidirection_Pin_Register 57 +#define RTSI_Trig_Direction_Register 58 +#define Drive_RTSI_Clock_Bit 0x1 +#define Use_RTSI_Clock_Bit 0x2 + +static inline unsigned int RTSI_Output_Bit(unsigned channel, int is_mseries) +{ + unsigned max_channel; + unsigned base_bit_shift; + if(is_mseries) + { + base_bit_shift = 8; + max_channel = 7; + }else + { + base_bit_shift = 9; + max_channel = 6; + } + if(channel > max_channel) + { + rtdm_printk("%s: bug, invalid RTSI_channel=%i\n", + __FUNCTION__, channel); + return 0; + } + return 1 << (base_bit_shift + channel); +} + +#define Interrupt_Control_Register 59 +#define Interrupt_B_Enable _bit15 +#define Interrupt_B_Output_Select(x) ((x)<<12) +#define Interrupt_A_Enable _bit11 +#define Interrupt_A_Output_Select(x) ((x)<<8) +#define Pass_Thru_0_Interrupt_Polarity _bit3 +#define Pass_Thru_1_Interrupt_Polarity _bit2 +#define Interrupt_Output_On_3_Pins _bit1 +#define Interrupt_Output_Polarity _bit0 + +#define AI_Output_Control_Register 60 +#define AI_START_Output_Select _bit10 +#define AI_SCAN_IN_PROG_Output_Select(x) (((x) & 0x3) << 8) +#define AI_EXTMUX_CLK_Output_Select(x) (((x) & 0x3) << 6) +#define AI_LOCALMUX_CLK_Output_Select(x) ((x)<<4) +#define AI_SC_TC_Output_Select(x) ((x)<<2) +#define AI_CONVERT_Output_High_Z 0 +#define AI_CONVERT_Output_Ground 1 +#define AI_CONVERT_Output_Enable_Low 2 +#define AI_CONVERT_Output_Enable_High 3 +#define AI_CONVERT_Output_Select(x) ((x) & 0x3) + +#define AI_START_STOP_Select_Register 62 +#define AI_START_Polarity _bit15 +#define AI_STOP_Polarity _bit14 +#define AI_STOP_Sync _bit13 +#define AI_STOP_Edge _bit12 +#define AI_STOP_Select(a) (((a) & 0x1f)<<7) +#define AI_START_Sync _bit6 +#define AI_START_Edge _bit5 +#define AI_START_Select(a) ((a) & 0x1f) + +#define AI_Trigger_Select_Register 63 +#define AI_START1_Polarity _bit15 +#define AI_START2_Polarity _bit14 +#define AI_START2_Sync _bit13 +#define AI_START2_Edge _bit12 +#define AI_START2_Select(a) (((a) & 0x1f) << 7) +#define AI_START1_Sync _bit6 +#define AI_START1_Edge _bit5 +#define AI_START1_Select(a) ((a) & 0x1f) + +#define AI_DIV_Load_A_Register 64 + +#define AO_Start_Select_Register 66 +#define AO_UI2_Software_Gate _bit15 +#define AO_UI2_External_Gate_Polarity _bit14 +#define AO_START_Polarity _bit13 +#define AO_AOFREQ_Enable _bit12 +#define AO_UI2_External_Gate_Select(a) (((a) & 0x1f) << 7) +#define AO_START_Sync _bit6 +#define AO_START_Edge _bit5 +#define AO_START_Select(a) ((a) & 0x1f) + +#define AO_Trigger_Select_Register 67 +#define AO_UI2_External_Gate_Enable _bit15 +#define AO_Delayed_START1 _bit14 +#define AO_START1_Polarity _bit13 +#define AO_UI2_Source_Polarity _bit12 +#define AO_UI2_Source_Select(x) (((x)&0x1f)<<7) +#define AO_START1_Sync _bit6 +#define AO_START1_Edge _bit5 +#define AO_START1_Select(x) (((x)&0x1f)<<0) + +#define AO_Mode_3_Register 70 +#define AO_UI2_Switch_Load_Next_TC _bit13 +#define AO_UC_Switch_Load_Every_BC_TC _bit12 +#define AO_Trigger_Length _bit11 +#define AO_Stop_On_Overrun_Error _bit5 +#define AO_Stop_On_BC_TC_Trigger_Error _bit4 +#define AO_Stop_On_BC_TC_Error _bit3 +#define AO_Not_An_UPDATE _bit2 +#define AO_Software_Gate _bit1 +#define AO_Last_Gate_Disable _bit0 /* M Series only */ + +#define Joint_Reset_Register 72 +#define Software_Reset _bit11 +#define AO_Configuration_End _bit9 +#define AI_Configuration_End _bit8 +#define AO_Configuration_Start _bit5 +#define AI_Configuration_Start _bit4 +#define G1_Reset _bit3 +#define G0_Reset _bit2 +#define AO_Reset _bit1 +#define AI_Reset _bit0 + +#define Interrupt_A_Enable_Register 73 +#define Pass_Thru_0_Interrupt_Enable _bit9 +#define G0_Gate_Interrupt_Enable _bit8 +#define AI_FIFO_Interrupt_Enable _bit7 +#define G0_TC_Interrupt_Enable _bit6 +#define AI_Error_Interrupt_Enable _bit5 +#define AI_STOP_Interrupt_Enable _bit4 +#define AI_START_Interrupt_Enable _bit3 +#define AI_START2_Interrupt_Enable _bit2 +#define AI_START1_Interrupt_Enable _bit1 +#define AI_SC_TC_Interrupt_Enable _bit0 + +#define Interrupt_B_Enable_Register 75 +#define Pass_Thru_1_Interrupt_Enable _bit11 +#define G1_Gate_Interrupt_Enable _bit10 +#define G1_TC_Interrupt_Enable _bit9 +#define AO_FIFO_Interrupt_Enable _bit8 +#define AO_UI2_TC_Interrupt_Enable _bit7 +#define AO_UC_TC_Interrupt_Enable _bit6 +#define AO_Error_Interrupt_Enable _bit5 +#define AO_STOP_Interrupt_Enable _bit4 +#define AO_START_Interrupt_Enable _bit3 +#define AO_UPDATE_Interrupt_Enable _bit2 +#define AO_START1_Interrupt_Enable _bit1 +#define AO_BC_TC_Interrupt_Enable _bit0 + +#define Second_IRQ_A_Enable_Register 74 +#define AI_SC_TC_Second_Irq_Enable _bit0 +#define AI_START1_Second_Irq_Enable _bit1 +#define AI_START2_Second_Irq_Enable _bit2 +#define AI_START_Second_Irq_Enable _bit3 +#define AI_STOP_Second_Irq_Enable _bit4 +#define AI_Error_Second_Irq_Enable _bit5 +#define G0_TC_Second_Irq_Enable _bit6 +#define AI_FIFO_Second_Irq_Enable _bit7 +#define G0_Gate_Second_Irq_Enable _bit8 +#define Pass_Thru_0_Second_Irq_Enable _bit9 + +#define Second_IRQ_B_Enable_Register 76 +#define AO_BC_TC_Second_Irq_Enable _bit0 +#define AO_START1_Second_Irq_Enable _bit1 +#define AO_UPDATE_Second_Irq_Enable _bit2 +#define AO_START_Second_Irq_Enable _bit3 +#define AO_STOP_Second_Irq_Enable _bit4 +#define AO_Error_Second_Irq_Enable _bit5 +#define AO_UC_TC_Second_Irq_Enable _bit6 +#define AO_UI2_TC_Second_Irq_Enable _bit7 +#define AO_FIFO_Second_Irq_Enable _bit8 +#define G1_TC_Second_Irq_Enable _bit9 +#define G1_Gate_Second_Irq_Enable _bit10 +#define Pass_Thru_1_Second_Irq_Enable _bit11 + +#define AI_Personal_Register 77 +#define AI_SHIFTIN_Pulse_Width _bit15 +#define AI_EOC_Polarity _bit14 +#define AI_SOC_Polarity _bit13 +#define AI_SHIFTIN_Polarity _bit12 +#define AI_CONVERT_Pulse_Timebase _bit11 +#define AI_CONVERT_Pulse_Width _bit10 +#define AI_CONVERT_Original_Pulse _bit9 +#define AI_FIFO_Flags_Polarity _bit8 +#define AI_Overrun_Mode _bit7 +#define AI_EXTMUX_CLK_Pulse_Width _bit6 +#define AI_LOCALMUX_CLK_Pulse_Width _bit5 +#define AI_AIFREQ_Polarity _bit4 + +#define AO_Personal_Register 78 +#define AO_Interval_Buffer_Mode _bit3 +#define AO_BC_Source_Select _bit4 +#define AO_UPDATE_Pulse_Width _bit5 +#define AO_UPDATE_Pulse_Timebase _bit6 +#define AO_UPDATE_Original_Pulse _bit7 +#define AO_DMA_PIO_Control _bit8 /* M Series: reserved */ +#define AO_AOFREQ_Polarity _bit9 /* M Series: reserved */ +#define AO_FIFO_Enable _bit10 +#define AO_FIFO_Flags_Polarity _bit11 /* M Series: reserved */ +#define AO_TMRDACWR_Pulse_Width _bit12 +#define AO_Fast_CPU _bit13 /* M Series: reserved */ +#define AO_Number_Of_DAC_Packages _bit14 /* 1 for "single" mode, + 0 for "dual" */ +#define AO_Multiple_DACS_Per_Package _bit15 /* M Series only */ + +#define RTSI_Trig_A_Output_Register 79 + +#define RTSI_Trig_B_Output_Register 80 +#define RTSI_Sub_Selection_1_Bit _bit15 /* not for M Series */ +#define RTSI_Trig_Output_Bits(x, y) ((y & 0xf) << ((x % 4) * 4)) +#define RTSI_Trig_Output_Mask(x) (0xf << ((x % 4) * 4)) +#define RTSI_Trig_Output_Source(x, y) ((y >> ((x % 4) * 4)) & 0xf) + +#define RTSI_Board_Register 81 +#define Write_Strobe_0_Register 82 +#define Write_Strobe_1_Register 83 +#define Write_Strobe_2_Register 84 +#define Write_Strobe_3_Register 85 + +#define AO_Output_Control_Register 86 +#define AO_External_Gate_Enable _bit15 +#define AO_External_Gate_Select(x) (((x)&0x1f)<<10) +#define AO_Number_Of_Channels(x) (((x)&0xf)<<6) +#define AO_UPDATE2_Output_Select(x) (((x)&0x3)<<4) +#define AO_External_Gate_Polarity _bit3 +#define AO_UPDATE2_Output_Toggle _bit2 +#define AO_Update_Output_High_Z 0 +#define AO_Update_Output_Ground 1 +#define AO_Update_Output_Enable_Low 2 +#define AO_Update_Output_Enable_High 3 +#define AO_UPDATE_Output_Select(x) (x&0x3) + +#define AI_Mode_3_Register 87 +#define AI_Trigger_Length _bit15 +#define AI_Delay_START _bit14 +#define AI_Software_Gate _bit13 +#define AI_SI_Special_Trigger_Delay _bit12 +#define AI_SI2_Source_Select _bit11 +#define AI_Delayed_START2 _bit10 +#define AI_Delayed_START1 _bit9 +#define AI_External_Gate_Mode _bit8 +#define AI_FIFO_Mode_HF_to_E (3<<6) +#define AI_FIFO_Mode_F (2<<6) +#define AI_FIFO_Mode_HF (1<<6) +#define AI_FIFO_Mode_NE (0<<6) +#define AI_External_Gate_Polarity _bit5 +#define AI_External_Gate_Select(a) ((a) & 0x1f) + +#define G_Autoincrement_Register(a) (68+(a)) +#define G_Command_Register(a) (6+(a)) +#define G_HW_Save_Register(a) (8+(a)*2) +#define G_HW_Save_Register_High(a) (8+(a)*2) +#define G_HW_Save_Register_Low(a) (9+(a)*2) +#define G_Input_Select_Register(a) (36+(a)) +#define G_Load_A_Register(a) (28+(a)*4) +#define G_Load_A_Register_High(a) (28+(a)*4) +#define G_Load_A_Register_Low(a) (29+(a)*4) +#define G_Load_B_Register(a) (30+(a)*4) +#define G_Load_B_Register_High(a) (30+(a)*4) +#define G_Load_B_Register_Low(a) (31+(a)*4) +#define G_Mode_Register(a) (26+(a)) +#define G_Save_Register(a) (12+(a)*2) +#define G_Save_Register_High(a) (12+(a)*2) +#define G_Save_Register_Low(a) (13+(a)*2) +#define G_Status_Register 4 +#define Analog_Trigger_Etc_Register 61 + +/* command register */ +#define G_Disarm_Copy _bit15 /* strobe */ +#define G_Save_Trace_Copy _bit14 +#define G_Arm_Copy _bit13 /* strobe */ +#define G_Bank_Switch_Start _bit10 /* strobe */ +#define G_Little_Big_Endian _bit9 +#define G_Synchronized_Gate _bit8 +#define G_Write_Switch _bit7 +#define G_Up_Down(a) (((a)&0x03)<<5) +#define G_Disarm _bit4 /* strobe */ +#define G_Analog_Trigger_Reset _bit3 /* strobe */ +#define G_Save_Trace _bit1 +#define G_Arm _bit0 /* strobe */ + +/* channel agnostic names for the command register #defines */ +#define G_Bank_Switch_Enable _bit12 +#define G_Bank_Switch_Mode _bit11 +#define G_Load _bit2 /* strobe */ + +/* input select register */ +#define G_Gate_Select(a) (((a)&0x1f)<<7) +#define G_Source_Select(a) (((a)&0x1f)<<2) +#define G_Write_Acknowledges_Irq _bit1 +#define G_Read_Acknowledges_Irq _bit0 + +/* same input select register, but with channel agnostic names */ +#define G_Source_Polarity _bit15 +#define G_Output_Polarity _bit14 +#define G_OR_Gate _bit13 +#define G_Gate_Select_Load_Source _bit12 + +/* mode register */ +#define G_Loading_On_TC _bit12 +#define G_Output_Mode(a) (((a)&0x03)<<8) +#define G_Trigger_Mode_For_Edge_Gate(a) (((a)&0x03)<<3) +#define G_Gating_Mode(a) (((a)&0x03)<<0) + +/* same input mode register, but with channel agnostic names */ +#define G_Load_Source_Select _bit7 +#define G_Reload_Source_Switching _bit15 +#define G_Loading_On_Gate _bit14 +#define G_Gate_Polarity _bit13 + +#define G_Counting_Once(a) (((a)&0x03)<<10) +#define G_Stop_Mode(a) (((a)&0x03)<<5) +#define G_Gate_On_Both_Edges _bit2 + +/* G_Status_Register */ +#define G1_Gate_Error_St _bit15 +#define G0_Gate_Error_St _bit14 +#define G1_TC_Error_St _bit13 +#define G0_TC_Error_St _bit12 +#define G1_No_Load_Between_Gates_St _bit11 +#define G0_No_Load_Between_Gates_St _bit10 +#define G1_Armed_St _bit9 +#define G0_Armed_St _bit8 +#define G1_Stale_Data_St _bit7 +#define G0_Stale_Data_St _bit6 +#define G1_Next_Load_Source_St _bit5 +#define G0_Next_Load_Source_St _bit4 +#define G1_Counting_St _bit3 +#define G0_Counting_St _bit2 +#define G1_Save_St _bit1 +#define G0_Save_St _bit0 + +/* general purpose counter timer */ +#define G_Autoincrement(a) ((a)<<0) + +/*Analog_Trigger_Etc_Register*/ +#define Analog_Trigger_Mode(x) ((x) & 0x7) +#define Analog_Trigger_Enable _bit3 +#define Analog_Trigger_Drive _bit4 +#define GPFO_1_Output_Select _bit7 +#define GPFO_0_Output_Select(a) ((a)<<11) +#define GPFO_0_Output_Enable _bit14 +#define GPFO_1_Output_Enable _bit15 + +/* Additional windowed registers unique to E series */ + +/* 16 bit registers shadowed from DAQ-STC */ +#define Window_Address 0x00 +#define Window_Data 0x02 + +#define Configuration_Memory_Clear 82 +#define ADC_FIFO_Clear 83 +#define DAC_FIFO_Clear 84 + +/* i/o port offsets */ + +/* 8 bit registers */ +#define XXX_Status 0x01 +#define PROMOUT _bit0 +#define AI_FIFO_LOWER_NOT_EMPTY _bit3 + +#define Serial_Command 0x0d +#define Misc_Command 0x0f +#define Port_A 0x19 +#define Port_B 0x1b +#define Port_C 0x1d +#define Configuration 0x1f +#define Strobes 0x01 +#define Channel_A_Mode 0x03 +#define Channel_B_Mode 0x05 +#define Channel_C_Mode 0x07 +#define AI_AO_Select 0x09 +#define AI_DMA_Select_Shift 0 +#define AI_DMA_Select_Mask 0xf +#define AO_DMA_Select_Shift 4 +#define AO_DMA_Select_Mask (0xf << AO_DMA_Select_Shift) + +#define G0_G1_Select 0x0b + +static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel) +{ + if(channel < 4) return 1 << channel; + if(channel == 4) return 0x3; + if(channel == 5) return 0x5; + BUG(); + return 0; +} +static inline unsigned GPCT_DMA_Select_Bits(unsigned gpct_index, unsigned mite_channel) +{ + BUG_ON(gpct_index > 1); + return ni_stc_dma_channel_select_bitfield(mite_channel) << (4 * gpct_index); +} +static inline unsigned GPCT_DMA_Select_Mask(unsigned gpct_index) +{ + BUG_ON(gpct_index > 1); + return 0xf << (4 * gpct_index); +} + +/* 16 bit registers */ + +#define Configuration_Memory_Low 0x10 +#define AI_DITHER _bit9 +#define AI_LAST_CHANNEL _bit15 + +#define Configuration_Memory_High 0x12 +#define AI_AC_COUPLE _bit11 +#define AI_DIFFERENTIAL _bit12 +#define AI_COMMON _bit13 +#define AI_GROUND (_bit12|_bit13) +#define AI_CONFIG_CHANNEL(x) (x&0x3f) + +#define ADC_FIFO_Data_Register 0x1c + +#define AO_Configuration 0x16 +#define AO_Bipolar _bit0 +#define AO_Deglitch _bit1 +#define AO_Ext_Ref _bit2 +#define AO_Ground_Ref _bit3 +#define AO_Channel(x) ((x) << 8) + +#define DAC_FIFO_Data 0x1e +#define DAC0_Direct_Data 0x18 +#define DAC1_Direct_Data 0x1a + +/* 611x registers (these boards differ from the e-series) */ + +#define Magic_611x 0x19 /* w8 (new) */ +#define Calibration_Channel_Select_611x 0x1a /* w16 (new) */ +#define ADC_FIFO_Data_611x 0x1c /* r32 (incompatible) */ +#define AI_FIFO_Offset_Load_611x 0x05 /* r8 (new) */ +#define DAC_FIFO_Data_611x 0x14 /* w32 (incompatible) */ +#define Cal_Gain_Select_611x 0x05 /* w8 (new) */ + +#define AO_Window_Address_611x 0x18 +#define AO_Window_Data_611x 0x1e + +/* 6143 registers */ +#define Magic_6143 0x19 /* w8 */ +#define G0G1_DMA_Select_6143 0x0B /* w8 */ +#define PipelineDelay_6143 0x1f /* w8 */ +#define EOC_Set_6143 0x1D /* w8 */ +#define AIDMA_Select_6143 0x09 /* w8 */ +#define AIFIFO_Data_6143 0x8C /* w32 */ +#define AIFIFO_Flag_6143 0x84 /* w32 */ +#define AIFIFO_Control_6143 0x88 /* w32 */ +#define AIFIFO_Status_6143 0x88 /* w32 */ +#define AIFIFO_DMAThreshold_6143 0x90 /* w32 */ +#define AIFIFO_Words_Available_6143 0x94 /* w32 */ + +#define Calibration_Channel_6143 0x42 /* w16 */ +#define Calibration_LowTime_6143 0x20 /* w16 */ +#define Calibration_HighTime_6143 0x22 /* w16 */ +#define Relay_Counter_Load_Val__6143 0x4C /* w32 */ +#define Signature_6143 0x50 /* w32 */ +#define Release_Date_6143 0x54 /* w32 */ +#define Release_Oldest_Date_6143 0x58 /* w32 */ + +#define Calibration_Channel_6143_RelayOn 0x8000 /* Calibration relay switch On */ +#define Calibration_Channel_6143_RelayOff 0x4000 /* Calibration relay switch Off */ +#define Calibration_Channel_Gnd_Gnd 0x00 /* Offset Calibration */ +#define Calibration_Channel_2v5_Gnd 0x02 /* 2.5V Reference */ +#define Calibration_Channel_Pwm_Gnd 0x05 /* +/- 5V Self Cal */ +#define Calibration_Channel_2v5_Pwm 0x0a /* PWM Calibration */ +#define Calibration_Channel_Pwm_Pwm 0x0d /* CMRR */ +#define Calibration_Channel_Gnd_Pwm 0x0e /* PWM Calibration */ + +/* 671x, 611x registers */ + +/* 671xi 611x windowed ao registers */ +#define AO_Immediate_671x 0x11 /* W 16 */ +#define AO_Timed_611x 0x10 /* W 16 */ +#define AO_FIFO_Offset_Load_611x 0x13 /* W32 */ +#define AO_Later_Single_Point_Updates 0x14 /* W 16 */ +#define AO_Waveform_Generation_611x 0x15 /* W 16 */ +#define AO_Misc_611x 0x16 /* W 16 */ +#define AO_Calibration_Channel_Select_67xx 0x17 /* W 16 */ +#define AO_Configuration_2_67xx 0x18 /* W 16 */ +#define CAL_ADC_Command_67xx 0x19 /* W 8 */ +#define CAL_ADC_Status_67xx 0x1a /* R 8 */ +#define CAL_ADC_Data_67xx 0x1b /* R 16 */ +#define CAL_ADC_Config_Data_High_Word_67xx 0x1c /* RW 16 */ +#define CAL_ADC_Config_Data_Low_Word_67xx 0x1d /* RW 16 */ + +static inline unsigned int DACx_Direct_Data_671x(int channel) +{ + return channel; +} + +#define CLEAR_WG _bit0 + +#define CSCFG_CAL_CONTROL_MASK 0x7 +#define CSCFG_SELF_CAL_OFFSET 0x1 +#define CSCFG_SELF_CAL_GAIN 0x2 +#define CSCFG_SELF_CAL_OFFSET_GAIN 0x3 +#define CSCFG_SYSTEM_CAL_OFFSET 0x5 +#define CSCFG_SYSTEM_CAL_GAIN 0x6 +#define CSCFG_DONE (1 << 3) +#define CSCFG_POWER_SAVE_SELECT (1 << 4) +#define CSCFG_PORT_MODE (1 << 5) +#define CSCFG_RESET_VALID (1 << 6) +#define CSCFG_RESET (1 << 7) +#define CSCFG_UNIPOLAR (1 << 12) +#define CSCFG_WORD_RATE_2180_CYCLES (0x0 << 13) +#define CSCFG_WORD_RATE_1092_CYCLES (0x1 << 13) +#define CSCFG_WORD_RATE_532_CYCLES (0x2 << 13) +#define CSCFG_WORD_RATE_388_CYCLES (0x3 << 13) +#define CSCFG_WORD_RATE_324_CYCLES (0x4 << 13) +#define CSCFG_WORD_RATE_17444_CYCLES (0x5 << 13) +#define CSCFG_WORD_RATE_8724_CYCLES (0x6 << 13) +#define CSCFG_WORD_RATE_4364_CYCLES (0x7 << 13) +#define CSCFG_WORD_RATE_MASK (0x7 << 13) +#define CSCFG_LOW_POWER (1 << 16) + +#define CS5529_CONFIG_DOUT(x) (1 << (18 + x)) +#define CS5529_CONFIG_AOUT(x) (1 << (22 + x)) + +/* cs5529 command bits */ +#define CSCMD_POWER_SAVE _bit0 +#define CSCMD_REGISTER_SELECT_MASK 0xe +#define CSCMD_OFFSET_REGISTER 0x0 +#define CSCMD_GAIN_REGISTER _bit1 +#define CSCMD_CONFIG_REGISTER _bit2 +#define CSCMD_READ _bit4 +#define CSCMD_CONTINUOUS_CONVERSIONS _bit5 +#define CSCMD_SINGLE_CONVERSION _bit6 +#define CSCMD_COMMAND _bit7 + +/* cs5529 status bits */ +#define CSS_ADC_BUSY _bit0 +#define CSS_OSC_DETECT _bit1 /* indicates adc error */ +#define CSS_OVERRANGE _bit3 + +#define SerDacLd(x) (0x08<<(x)) + +/* + This is stuff unique to the NI E series drivers, + but I thought I'd put it here anyway. +*/ + +enum +{ + ai_gain_16 = 0, + ai_gain_8, + ai_gain_14, + ai_gain_4, + ai_gain_611x, + ai_gain_622x, + ai_gain_628x, + ai_gain_6143 +}; +enum caldac_enum +{ + caldac_none=0, + mb88341, + dac8800, + dac8043, + ad8522, + ad8804, + ad8842, + ad8804_debug +}; +enum ni_reg_type +{ + ni_reg_normal = 0x0, + ni_reg_611x = 0x1, + ni_reg_6711 = 0x2, + ni_reg_6713 = 0x4, + ni_reg_67xx_mask = 0x6, + ni_reg_6xxx_mask = 0x7, + ni_reg_622x = 0x8, + ni_reg_625x = 0x10, + ni_reg_628x = 0x18, + ni_reg_m_series_mask = 0x18, + ni_reg_6143 = 0x20 +}; + +/* M Series registers offsets */ +#define M_Offset_CDIO_DMA_Select 0x7 /* write */ +#define M_Offset_SCXI_Status 0x7 /* read */ +#define M_Offset_AI_AO_Select 0x9 /* write, same offset as e-series */ +#define M_Offset_SCXI_Serial_Data_In 0x9 /* read */ +#define M_Offset_G0_G1_Select 0xb /* write, same offset as e-series */ +#define M_Offset_Misc_Command 0xf +#define M_Offset_SCXI_Serial_Data_Out 0x11 +#define M_Offset_SCXI_Control 0x13 +#define M_Offset_SCXI_Output_Enable 0x15 +#define M_Offset_AI_FIFO_Data 0x1c +#define M_Offset_Static_Digital_Output 0x24 /* write */ +#define M_Offset_Static_Digital_Input 0x24 /* read */ +#define M_Offset_DIO_Direction 0x28 +#define M_Offset_Cal_PWM 0x40 +#define M_Offset_AI_Config_FIFO_Data 0x5e +#define M_Offset_Interrupt_C_Enable 0x88 /* write */ +#define M_Offset_Interrupt_C_Status 0x88 /* read */ +#define M_Offset_Analog_Trigger_Control 0x8c +#define M_Offset_AO_Serial_Interrupt_Enable 0xa0 +#define M_Offset_AO_Serial_Interrupt_Ack 0xa1 /* write */ +#define M_Offset_AO_Serial_Interrupt_Status 0xa1 /* read */ +#define M_Offset_AO_Calibration 0xa3 +#define M_Offset_AO_FIFO_Data 0xa4 +#define M_Offset_PFI_Filter 0xb0 +#define M_Offset_RTSI_Filter 0xb4 +#define M_Offset_SCXI_Legacy_Compatibility 0xbc +#define M_Offset_Interrupt_A_Ack 0x104 /* write */ +#define M_Offset_AI_Status_1 0x104 /* read */ +#define M_Offset_Interrupt_B_Ack 0x106 /* write */ +#define M_Offset_AO_Status_1 0x106 /* read */ +#define M_Offset_AI_Command_2 0x108 /* write */ +#define M_Offset_G01_Status 0x108 /* read */ +#define M_Offset_AO_Command_2 0x10a +#define M_Offset_AO_Status_2 0x10c /* read */ +#define M_Offset_G0_Command 0x10c /* write */ +#define M_Offset_G1_Command 0x10e /* write */ +#define M_Offset_G0_HW_Save 0x110 +#define M_Offset_G0_HW_Save_High 0x110 +#define M_Offset_AI_Command_1 0x110 +#define M_Offset_G0_HW_Save_Low 0x112 +#define M_Offset_AO_Command_1 0x112 +#define M_Offset_G1_HW_Save 0x114 +#define M_Offset_G1_HW_Save_High 0x114 +#define M_Offset_G1_HW_Save_Low 0x116 +#define M_Offset_AI_Mode_1 0x118 +#define M_Offset_G0_Save 0x118 +#define M_Offset_G0_Save_High 0x118 +#define M_Offset_AI_Mode_2 0x11a +#define M_Offset_G0_Save_Low 0x11a +#define M_Offset_AI_SI_Load_A 0x11c +#define M_Offset_G1_Save 0x11c +#define M_Offset_G1_Save_High 0x11c +#define M_Offset_G1_Save_Low 0x11e +#define M_Offset_AI_SI_Load_B 0x120 /* write */ +#define M_Offset_AO_UI_Save 0x120 /* read */ +#define M_Offset_AI_SC_Load_A 0x124 /* write */ +#define M_Offset_AO_BC_Save 0x124 /* read */ +#define M_Offset_AI_SC_Load_B 0x128 /* write */ +#define M_Offset_AO_UC_Save 0x128 /* read */ +#define M_Offset_AI_SI2_Load_A 0x12c +#define M_Offset_AI_SI2_Load_B 0x130 +#define M_Offset_G0_Mode 0x134 +#define M_Offset_G1_Mode 0x136 /* write */ +#define M_Offset_Joint_Status_1 0x136 /* read */ +#define M_Offset_G0_Load_A 0x138 +#define M_Offset_Joint_Status_2 0x13a +#define M_Offset_G0_Load_B 0x13c +#define M_Offset_G1_Load_A 0x140 +#define M_Offset_G1_Load_B 0x144 +#define M_Offset_G0_Input_Select 0x148 +#define M_Offset_G1_Input_Select 0x14a +#define M_Offset_AO_Mode_1 0x14c +#define M_Offset_AO_Mode_2 0x14e +#define M_Offset_AO_UI_Load_A 0x150 +#define M_Offset_AO_UI_Load_B 0x154 +#define M_Offset_AO_BC_Load_A 0x158 +#define M_Offset_AO_BC_Load_B 0x15c +#define M_Offset_AO_UC_Load_A 0x160 +#define M_Offset_AO_UC_Load_B 0x164 +#define M_Offset_Clock_and_FOUT 0x170 +#define M_Offset_IO_Bidirection_Pin 0x172 +#define M_Offset_RTSI_Trig_Direction 0x174 +#define M_Offset_Interrupt_Control 0x176 +#define M_Offset_AI_Output_Control 0x178 +#define M_Offset_Analog_Trigger_Etc 0x17a +#define M_Offset_AI_START_STOP_Select 0x17c +#define M_Offset_AI_Trigger_Select 0x17e +#define M_Offset_AI_SI_Save 0x180 /* read */ +#define M_Offset_AI_DIV_Load_A 0x180 /* write */ +#define M_Offset_AI_SC_Save 0x184 /* read */ +#define M_Offset_AO_Start_Select 0x184 /* write */ +#define M_Offset_AO_Trigger_Select 0x186 +#define M_Offset_AO_Mode_3 0x18c +#define M_Offset_G0_Autoincrement 0x188 +#define M_Offset_G1_Autoincrement 0x18a +#define M_Offset_Joint_Reset 0x190 +#define M_Offset_Interrupt_A_Enable 0x192 +#define M_Offset_Interrupt_B_Enable 0x196 +#define M_Offset_AI_Personal 0x19a +#define M_Offset_AO_Personal 0x19c +#define M_Offset_RTSI_Trig_A_Output 0x19e +#define M_Offset_RTSI_Trig_B_Output 0x1a0 +#define M_Offset_RTSI_Shared_MUX 0x1a2 +#define M_Offset_AO_Output_Control 0x1ac +#define M_Offset_AI_Mode_3 0x1ae +#define M_Offset_Configuration_Memory_Clear 0x1a4 +#define M_Offset_AI_FIFO_Clear 0x1a6 +#define M_Offset_AO_FIFO_Clear 0x1a8 +#define M_Offset_G0_Counting_Mode 0x1b0 +#define M_Offset_G1_Counting_Mode 0x1b2 +#define M_Offset_G0_Second_Gate 0x1b4 +#define M_Offset_G1_Second_Gate 0x1b6 +#define M_Offset_G0_DMA_Config 0x1b8 /* write */ +#define M_Offset_G0_DMA_Status 0x1b8 /* read */ +#define M_Offset_G1_DMA_Config 0x1ba /* write */ +#define M_Offset_G1_DMA_Status 0x1ba /* read */ +#define M_Offset_G0_MSeries_ABZ 0x1c0 +#define M_Offset_G1_MSeries_ABZ 0x1c2 +#define M_Offset_Clock_and_Fout2 0x1c4 +#define M_Offset_PLL_Control 0x1c6 +#define M_Offset_PLL_Status 0x1c8 +#define M_Offset_PFI_Output_Select_1 0x1d0 +#define M_Offset_PFI_Output_Select_2 0x1d2 +#define M_Offset_PFI_Output_Select_3 0x1d4 +#define M_Offset_PFI_Output_Select_4 0x1d6 +#define M_Offset_PFI_Output_Select_5 0x1d8 +#define M_Offset_PFI_Output_Select_6 0x1da +#define M_Offset_PFI_DI 0x1dc +#define M_Offset_PFI_DO 0x1de +#define M_Offset_AI_Config_FIFO_Bypass 0x218 +#define M_Offset_SCXI_DIO_Enable 0x21c +#define M_Offset_CDI_FIFO_Data 0x220 /* read */ +#define M_Offset_CDO_FIFO_Data 0x220 /* write */ +#define M_Offset_CDIO_Status 0x224 /* read */ +#define M_Offset_CDIO_Command 0x224 /* write */ +#define M_Offset_CDI_Mode 0x228 +#define M_Offset_CDO_Mode 0x22c +#define M_Offset_CDI_Mask_Enable 0x230 +#define M_Offset_CDO_Mask_Enable 0x234 +#define M_Offset_AO_Waveform_Order(x) (0xc2 + 0x4 * x) +#define M_Offset_AO_Config_Bank(x) (0xc3 + 0x4 * x) +#define M_Offset_DAC_Direct_Data(x) (0xc0 + 0x4 * x) +#define M_Offset_Gen_PWM(x) (0x44 + 0x2 * x) + +static inline int M_Offset_Static_AI_Control(int i) +{ + int offset[] = + { + 0x64, + 0x261, + 0x262, + 0x263, + }; + if(((unsigned)i) >= sizeof(offset) / sizeof(offset[0])) + { + rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, i); + return offset[0]; + } + return offset[i]; +}; +static inline int M_Offset_AO_Reference_Attenuation(int channel) +{ + int offset[] = + { + 0x264, + 0x265, + 0x266, + 0x267 + }; + if(((unsigned)channel) >= sizeof(offset) / sizeof(offset[0])) + { + rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, channel); + return offset[0]; + } + return offset[channel]; +}; +static inline unsigned M_Offset_PFI_Output_Select(unsigned n) +{ + if(n < 1 || n > NUM_PFI_OUTPUT_SELECT_REGS) + { + rtdm_printk("%s: invalid pfi output select register=%i\n", __FUNCTION__, n); + return M_Offset_PFI_Output_Select_1; + } + return M_Offset_PFI_Output_Select_1 + (n - 1) * 2; +} + +#define MSeries_AI_Config_Channel_Type_Mask (0x7 << 6) +#define MSeries_AI_Config_Channel_Type_Calibration_Bits 0x0 +#define MSeries_AI_Config_Channel_Type_Differential_Bits (0x1 << 6) +#define MSeries_AI_Config_Channel_Type_Common_Ref_Bits (0x2 << 6) +#define MSeries_AI_Config_Channel_Type_Ground_Ref_Bits (0x3 << 6) +#define MSeries_AI_Config_Channel_Type_Aux_Bits (0x5 << 6) +#define MSeries_AI_Config_Channel_Type_Ghost_Bits (0x7 << 6) +#define MSeries_AI_Config_Polarity_Bit 0x1000 /* 0 for 2's complement encoding */ +#define MSeries_AI_Config_Dither_Bit 0x2000 +#define MSeries_AI_Config_Last_Channel_Bit 0x4000 +#define MSeries_AI_Config_Channel_Bits(x) (x & 0xf) +#define MSeries_AI_Config_Gain_Bits(x) ((x & 0x7) << 9) + +static inline +unsigned int MSeries_AI_Config_Bank_Bits(unsigned int reg_type, + unsigned int channel) +{ + unsigned int bits = channel & 0x30; + if (reg_type == ni_reg_622x) { + if (channel & 0x40) + bits |= 0x400; + } + return bits; +} + +#define MSeries_PLL_In_Source_Select_RTSI0_Bits 0xb +#define MSeries_PLL_In_Source_Select_Star_Trigger_Bits 0x14 +#define MSeries_PLL_In_Source_Select_RTSI7_Bits 0x1b +#define MSeries_PLL_In_Source_Select_PXI_Clock10 0x1d +#define MSeries_PLL_In_Source_Select_Mask 0x1f +#define MSeries_Timebase1_Select_Bit 0x20 /* use PLL for timebase 1 */ +#define MSeries_Timebase3_Select_Bit 0x40 /* use PLL for timebase 3 */ +/* Use 10MHz instead of 20MHz for RTSI clock frequency. Appears + to have no effect, at least on pxi-6281, which always uses + 20MHz rtsi clock frequency */ +#define MSeries_RTSI_10MHz_Bit 0x80 + +static inline +unsigned int MSeries_PLL_In_Source_Select_RTSI_Bits(unsigned int RTSI_channel) +{ + if(RTSI_channel > 7) + { + rtdm_printk("%s: bug, invalid RTSI_channel=%i\n", __FUNCTION__, RTSI_channel); + return 0; + } + if(RTSI_channel == 7) return MSeries_PLL_In_Source_Select_RTSI7_Bits; + else return MSeries_PLL_In_Source_Select_RTSI0_Bits + RTSI_channel; +} + +#define MSeries_PLL_Enable_Bit 0x1000 +#define MSeries_PLL_VCO_Mode_200_325MHz_Bits 0x0 +#define MSeries_PLL_VCO_Mode_175_225MHz_Bits 0x2000 +#define MSeries_PLL_VCO_Mode_100_225MHz_Bits 0x4000 +#define MSeries_PLL_VCO_Mode_75_150MHz_Bits 0x6000 + +static inline +unsigned int MSeries_PLL_Divisor_Bits(unsigned int divisor) +{ + static const unsigned int max_divisor = 0x10; + if(divisor < 1 || divisor > max_divisor) + { + rtdm_printk("%s: bug, invalid divisor=%i\n", __FUNCTION__, divisor); + return 0; + } + return (divisor & 0xf) << 8; +} +static inline +unsigned int MSeries_PLL_Multiplier_Bits(unsigned int multiplier) +{ + static const unsigned int max_multiplier = 0x100; + if(multiplier < 1 || multiplier > max_multiplier) + { + rtdm_printk("%s: bug, invalid multiplier=%i\n", __FUNCTION__, multiplier); + return 0; + } + return multiplier & 0xff; +} + +#define MSeries_PLL_Locked_Bit 0x1 + +#define MSeries_AI_Bypass_Channel_Mask 0x7 +#define MSeries_AI_Bypass_Bank_Mask 0x78 +#define MSeries_AI_Bypass_Cal_Sel_Pos_Mask 0x380 +#define MSeries_AI_Bypass_Cal_Sel_Neg_Mask 0x1c00 +#define MSeries_AI_Bypass_Mode_Mux_Mask 0x6000 +#define MSeries_AO_Bypass_AO_Cal_Sel_Mask 0x38000 +#define MSeries_AI_Bypass_Gain_Mask 0x1c0000 +#define MSeries_AI_Bypass_Dither_Bit 0x200000 +#define MSeries_AI_Bypass_Polarity_Bit 0x400000 /* 0 for 2's complement encoding */ +#define MSeries_AI_Bypass_Config_FIFO_Bit 0x80000000 +#define MSeries_AI_Bypass_Cal_Sel_Pos_Bits(x) ((x << 7) & \ + MSeries_AI_Bypass_Cal_Sel_Pos_Mask) +#define MSeries_AI_Bypass_Cal_Sel_Neg_Bits(x) ((x << 10) & \ + MSeries_AI_Bypass_Cal_Sel_Pos_Mask) +#define MSeries_AI_Bypass_Gain_Bits(x) ((x << 18) & \ + MSeries_AI_Bypass_Gain_Mask) + +#define MSeries_AO_DAC_Offset_Select_Mask 0x7 +#define MSeries_AO_DAC_Offset_0V_Bits 0x0 +#define MSeries_AO_DAC_Offset_5V_Bits 0x1 +#define MSeries_AO_DAC_Reference_Mask 0x38 +#define MSeries_AO_DAC_Reference_10V_Internal_Bits 0x0 +#define MSeries_AO_DAC_Reference_5V_Internal_Bits 0x8 +#define MSeries_AO_Update_Timed_Bit 0x40 +#define MSeries_AO_Bipolar_Bit 0x80 /* turns on 2's complement encoding */ + +#define MSeries_Attenuate_x5_Bit 0x1 + +#define MSeries_Cal_PWM_High_Time_Bits(x) ((x << 16) & 0xffff0000) +#define MSeries_Cal_PWM_Low_Time_Bits(x) (x & 0xffff) + +#define MSeries_PFI_Output_Select_Mask(x) (0x1f << (x % 3) * 5) +#define MSeries_PFI_Output_Select_Bits(x, y) ((y & 0x1f) << ((x % 3) * 5)) +// inverse to MSeries_PFI_Output_Select_Bits +#define MSeries_PFI_Output_Select_Source(x, y) ((y >> ((x % 3) * 5)) & 0x1f) + +#define Gi_DMA_BankSW_Error_Bit 0x10 +#define Gi_DMA_Reset_Bit 0x8 +#define Gi_DMA_Int_Enable_Bit 0x4 +#define Gi_DMA_Write_Bit 0x2 +#define Gi_DMA_Enable_Bit 0x1 + +#define MSeries_PFI_Filter_Select_Mask(x) (0x3 << (x * 2)) +#define MSeries_PFI_Filter_Select_Bits(x, y) ((y << (x * 2)) & \ + MSeries_PFI_Filter_Select_Mask(x)) + +/* CDIO DMA select bits */ +#define CDI_DMA_Select_Shift 0 +#define CDI_DMA_Select_Mask 0xf +#define CDO_DMA_Select_Shift 4 +#define CDO_DMA_Select_Mask 0xf << CDO_DMA_Select_Shift + +/* CDIO status bits */ +#define CDO_FIFO_Empty_Bit 0x1 +#define CDO_FIFO_Full_Bit 0x2 +#define CDO_FIFO_Request_Bit 0x4 +#define CDO_Overrun_Bit 0x8 +#define CDO_Underflow_Bit 0x10 +#define CDI_FIFO_Empty_Bit 0x10000 +#define CDI_FIFO_Full_Bit 0x20000 +#define CDI_FIFO_Request_Bit 0x40000 +#define CDI_Overrun_Bit 0x80000 +#define CDI_Overflow_Bit 0x100000 + +/* CDIO command bits */ +#define CDO_Disarm_Bit 0x1 +#define CDO_Arm_Bit 0x2 +#define CDI_Disarm_Bit 0x4 +#define CDI_Arm_Bit 0x8 +#define CDO_Reset_Bit 0x10 +#define CDI_Reset_Bit 0x20 +#define CDO_Error_Interrupt_Enable_Set_Bit 0x40 +#define CDO_Error_Interrupt_Enable_Clear_Bit 0x80 +#define CDI_Error_Interrupt_Enable_Set_Bit 0x100 +#define CDI_Error_Interrupt_Enable_Clear_Bit 0x200 +#define CDO_FIFO_Request_Interrupt_Enable_Set_Bit 0x400 +#define CDO_FIFO_Request_Interrupt_Enable_Clear_Bit 0x800 +#define CDI_FIFO_Request_Interrupt_Enable_Set_Bit 0x1000 +#define CDI_FIFO_Request_Interrupt_Enable_Clear_Bit 0x2000 +#define CDO_Error_Interrupt_Confirm_Bit 0x4000 +#define CDI_Error_Interrupt_Confirm_Bit 0x8000 +#define CDO_Empty_FIFO_Interrupt_Enable_Set_Bit 0x10000 +#define CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit 0x20000 +#define CDO_SW_Update_Bit 0x80000 +#define CDI_SW_Update_Bit 0x100000 + +/* CDIO mode bits */ +#define CDI_Sample_Source_Select_Mask 0x3f +#define CDI_Halt_On_Error_Bit 0x200 +/* sample clock on falling edge */ +#define CDI_Polarity_Bit 0x400 +/* set for half full mode, clear for not empty mode */ +#define CDI_FIFO_Mode_Bit 0x800 +/* data lanes specify which dio channels map to byte or word accesses + to the dio fifos */ +#define CDI_Data_Lane_Mask 0x3000 +#define CDI_Data_Lane_0_15_Bits 0x0 +#define CDI_Data_Lane_16_31_Bits 0x1000 +#define CDI_Data_Lane_0_7_Bits 0x0 +#define CDI_Data_Lane_8_15_Bits 0x1000 +#define CDI_Data_Lane_16_23_Bits 0x2000 +#define CDI_Data_Lane_24_31_Bits 0x3000 + +/* CDO mode bits */ +#define CDO_Sample_Source_Select_Mask 0x3f +#define CDO_Retransmit_Bit 0x100 +#define CDO_Halt_On_Error_Bit 0x200 +/* sample clock on falling edge */ +#define CDO_Polarity_Bit 0x400 +/* set for half full mode, clear for not full mode */ +#define CDO_FIFO_Mode_Bit 0x800 +/* data lanes specify which dio channels map to byte or word accesses + to the dio fifos */ +#define CDO_Data_Lane_Mask 0x3000 +#define CDO_Data_Lane_0_15_Bits 0x0 +#define CDO_Data_Lane_16_31_Bits 0x1000 +#define CDO_Data_Lane_0_7_Bits 0x0 +#define CDO_Data_Lane_8_15_Bits 0x1000 +#define CDO_Data_Lane_16_23_Bits 0x2000 +#define CDO_Data_Lane_24_31_Bits 0x3000 + +/* Interrupt C bits */ +#define Interrupt_Group_C_Enable_Bit 0x1 +#define Interrupt_Group_C_Status_Bit 0x1 + +#define M_SERIES_EEPROM_SIZE 1024 + +typedef struct ni_board_struct{ + unsigned short device_id; + int isapnp_id; + char *name; + + int n_adchan; + int adbits; + + int ai_fifo_depth; + unsigned int alwaysdither : 1; + int gainlkup; + int ai_speed; + + int n_aochan; + int aobits; + struct a4l_rngdesc *ao_range_table; + int ao_fifo_depth; + + unsigned ao_speed; + + unsigned num_p0_dio_channels; + + int reg_type; + unsigned int ao_unipolar : 1; + unsigned int has_8255 : 1; + unsigned int has_analog_trig : 1; + + enum caldac_enum caldac[3]; +} ni_board; + +#define n_ni_boards (sizeof(ni_boards)/sizeof(ni_board)) + +#define MAX_N_CALDACS 34 +#define MAX_N_AO_CHAN 8 +#define NUM_GPCT 2 + +#define NI_PRIVATE_COMMON \ + uint16_t (*stc_readw)(struct a4l_device *dev, int register); \ + uint32_t (*stc_readl)(struct a4l_device *dev, int register); \ + void (*stc_writew)(struct a4l_device *dev, uint16_t value, int register); \ + void (*stc_writel)(struct a4l_device *dev, uint32_t value, int register); \ + \ + int dio_state; \ + int pfi_state; \ + int io_bits; \ + unsigned short dio_output; \ + unsigned short dio_control; \ + int ao0p,ao1p; \ + int lastchan; \ + int last_do; \ + int rt_irq; \ + int irq_polarity; \ + int irq_pin; \ + int aimode; \ + int ai_continuous; \ + int blocksize; \ + int n_left; \ + unsigned int ai_calib_source; \ + unsigned int ai_calib_source_enabled; \ + rtdm_lock_t window_lock; \ + rtdm_lock_t soft_reg_copy_lock; \ + rtdm_lock_t mite_channel_lock; \ + \ + int changain_state; \ + unsigned int changain_spec; \ + \ + unsigned int caldac_maxdata_list[MAX_N_CALDACS]; \ + unsigned short ao[MAX_N_AO_CHAN]; \ + unsigned short caldacs[MAX_N_CALDACS]; \ + \ + unsigned short ai_cmd2; \ + \ + unsigned short ao_conf[MAX_N_AO_CHAN]; \ + unsigned short ao_mode1; \ + unsigned short ao_mode2; \ + unsigned short ao_mode3; \ + unsigned short ao_cmd1; \ + unsigned short ao_cmd2; \ + unsigned short ao_cmd3; \ + unsigned short ao_trigger_select; \ + \ + struct ni_gpct_device *counter_dev; \ + unsigned short an_trig_etc_reg; \ + \ + unsigned ai_offset[512]; \ + \ + unsigned long serial_interval_ns; \ + unsigned char serial_hw_mode; \ + unsigned short clock_and_fout; \ + unsigned short clock_and_fout2; \ + \ + unsigned short int_a_enable_reg; \ + unsigned short int_b_enable_reg; \ + unsigned short io_bidirection_pin_reg; \ + unsigned short rtsi_trig_direction_reg; \ + unsigned short rtsi_trig_a_output_reg; \ + unsigned short rtsi_trig_b_output_reg; \ + unsigned short pfi_output_select_reg[NUM_PFI_OUTPUT_SELECT_REGS]; \ + unsigned short ai_ao_select_reg; \ + unsigned short g0_g1_select_reg; \ + unsigned short cdio_dma_select_reg; \ + \ + unsigned clock_ns; \ + unsigned clock_source; \ + \ + unsigned short atrig_mode; \ + unsigned short atrig_high; \ + unsigned short atrig_low; \ + \ + unsigned short pwm_up_count; \ + unsigned short pwm_down_count; \ + \ + sampl_t ai_fifo_buffer[0x2000]; \ + uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE]; \ + \ + struct mite_struct *mite; \ + struct mite_channel *ai_mite_chan; \ + struct mite_channel *ao_mite_chan;\ + struct mite_channel *cdo_mite_chan;\ + struct mite_dma_descriptor_ring *ai_mite_ring; \ + struct mite_dma_descriptor_ring *ao_mite_ring; \ + struct mite_dma_descriptor_ring *cdo_mite_ring; \ + struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT]; \ + subd_8255_t subd_8255 + + +typedef struct { + ni_board *board_ptr; + NI_PRIVATE_COMMON; +} ni_private; + +#define devpriv ((ni_private *)dev->priv) +#define boardtype (*(ni_board *)devpriv->board_ptr) + +/* How we access registers */ + +#define ni_writel(a,b) (writel((a), devpriv->mite->daq_io_addr + (b))) +#define ni_readl(a) (readl(devpriv->mite->daq_io_addr + (a))) +#define ni_writew(a,b) (writew((a), devpriv->mite->daq_io_addr + (b))) +#define ni_readw(a) (readw(devpriv->mite->daq_io_addr + (a))) +#define ni_writeb(a,b) (writeb((a), devpriv->mite->daq_io_addr + (b))) +#define ni_readb(a) (readb(devpriv->mite->daq_io_addr + (a))) + +/* INSN_CONFIG_SET_CLOCK_SRC argument for NI cards */ +#define NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC 0 /* 10 MHz */ +#define NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC 1 /* 100 KHz */ + +#endif /* _ANALOGY_NI_STC_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h new file mode 100644 index 0000000..deef652 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h @@ -0,0 +1,1192 @@ +/* + * Hardware driver for NI general purpose counter + * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef __ANALOGY_NI_TIO_H__ +#define __ANALOGY_NI_TIO_H__ + +#include <rtdm/analogy/device.h> + +#ifdef CONFIG_PCI +#include "mite.h" +#endif + +enum ni_gpct_register { + NITIO_G0_Autoincrement_Reg, + NITIO_G1_Autoincrement_Reg, + NITIO_G2_Autoincrement_Reg, + NITIO_G3_Autoincrement_Reg, + NITIO_G0_Command_Reg, + NITIO_G1_Command_Reg, + NITIO_G2_Command_Reg, + NITIO_G3_Command_Reg, + NITIO_G0_HW_Save_Reg, + NITIO_G1_HW_Save_Reg, + NITIO_G2_HW_Save_Reg, + NITIO_G3_HW_Save_Reg, + NITIO_G0_SW_Save_Reg, + NITIO_G1_SW_Save_Reg, + NITIO_G2_SW_Save_Reg, + NITIO_G3_SW_Save_Reg, + NITIO_G0_Mode_Reg, + NITIO_G1_Mode_Reg, + NITIO_G2_Mode_Reg, + NITIO_G3_Mode_Reg, + NITIO_G0_LoadA_Reg, + NITIO_G1_LoadA_Reg, + NITIO_G2_LoadA_Reg, + NITIO_G3_LoadA_Reg, + NITIO_G0_LoadB_Reg, + NITIO_G1_LoadB_Reg, + NITIO_G2_LoadB_Reg, + NITIO_G3_LoadB_Reg, + NITIO_G0_Input_Select_Reg, + NITIO_G1_Input_Select_Reg, + NITIO_G2_Input_Select_Reg, + NITIO_G3_Input_Select_Reg, + NITIO_G0_Counting_Mode_Reg, + NITIO_G1_Counting_Mode_Reg, + NITIO_G2_Counting_Mode_Reg, + NITIO_G3_Counting_Mode_Reg, + NITIO_G0_Second_Gate_Reg, + NITIO_G1_Second_Gate_Reg, + NITIO_G2_Second_Gate_Reg, + NITIO_G3_Second_Gate_Reg, + NITIO_G01_Status_Reg, + NITIO_G23_Status_Reg, + NITIO_G01_Joint_Reset_Reg, + NITIO_G23_Joint_Reset_Reg, + NITIO_G01_Joint_Status1_Reg, + NITIO_G23_Joint_Status1_Reg, + NITIO_G01_Joint_Status2_Reg, + NITIO_G23_Joint_Status2_Reg, + NITIO_G0_DMA_Config_Reg, + NITIO_G1_DMA_Config_Reg, + NITIO_G2_DMA_Config_Reg, + NITIO_G3_DMA_Config_Reg, + NITIO_G0_DMA_Status_Reg, + NITIO_G1_DMA_Status_Reg, + NITIO_G2_DMA_Status_Reg, + NITIO_G3_DMA_Status_Reg, + NITIO_G0_ABZ_Reg, + NITIO_G1_ABZ_Reg, + NITIO_G0_Interrupt_Acknowledge_Reg, + NITIO_G1_Interrupt_Acknowledge_Reg, + NITIO_G2_Interrupt_Acknowledge_Reg, + NITIO_G3_Interrupt_Acknowledge_Reg, + NITIO_G0_Status_Reg, + NITIO_G1_Status_Reg, + NITIO_G2_Status_Reg, + NITIO_G3_Status_Reg, + NITIO_G0_Interrupt_Enable_Reg, + NITIO_G1_Interrupt_Enable_Reg, + NITIO_G2_Interrupt_Enable_Reg, + NITIO_G3_Interrupt_Enable_Reg, + NITIO_Num_Registers, +}; + +static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned + counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Autoincrement_Reg; + break; + case 1: + return NITIO_G1_Autoincrement_Reg; + break; + case 2: + return NITIO_G2_Autoincrement_Reg; + break; + case 3: + return NITIO_G3_Autoincrement_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Command_Reg; + break; + case 1: + return NITIO_G1_Command_Reg; + break; + case 2: + return NITIO_G2_Command_Reg; + break; + case 3: + return NITIO_G3_Command_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned + counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Counting_Mode_Reg; + break; + case 1: + return NITIO_G1_Counting_Mode_Reg; + break; + case 2: + return NITIO_G2_Counting_Mode_Reg; + break; + case 3: + return NITIO_G3_Counting_Mode_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned + counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Input_Select_Reg; + break; + case 1: + return NITIO_G1_Input_Select_Reg; + break; + case 2: + return NITIO_G2_Input_Select_Reg; + break; + case 3: + return NITIO_G3_Input_Select_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned + counter_index) +{ + switch (counter_index) { + case 0: + case 1: + return NITIO_G01_Joint_Reset_Reg; + break; + case 2: + case 3: + return NITIO_G23_Joint_Reset_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned + counter_index) +{ + switch (counter_index) { + case 0: + case 1: + return NITIO_G01_Joint_Status1_Reg; + break; + case 2: + case 3: + return NITIO_G23_Joint_Status1_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned + counter_index) +{ + switch (counter_index) { + case 0: + case 1: + return NITIO_G01_Joint_Status2_Reg; + break; + case 2: + case 3: + return NITIO_G23_Joint_Status2_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index) +{ + switch (counter_index) { + case 0: + case 1: + return NITIO_G01_Status_Reg; + break; + case 2: + case 3: + return NITIO_G23_Status_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_LoadA_Reg; + break; + case 1: + return NITIO_G1_LoadA_Reg; + break; + case 2: + return NITIO_G2_LoadA_Reg; + break; + case 3: + return NITIO_G3_LoadA_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_LoadB_Reg; + break; + case 1: + return NITIO_G1_LoadB_Reg; + break; + case 2: + return NITIO_G2_LoadB_Reg; + break; + case 3: + return NITIO_G3_LoadB_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Mode_Reg; + break; + case 1: + return NITIO_G1_Mode_Reg; + break; + case 2: + return NITIO_G2_Mode_Reg; + break; + case 3: + return NITIO_G3_Mode_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_SW_Save_Reg; + break; + case 1: + return NITIO_G1_SW_Save_Reg; + break; + case 2: + return NITIO_G2_SW_Save_Reg; + break; + case 3: + return NITIO_G3_SW_Save_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Second_Gate_Reg; + break; + case 1: + return NITIO_G1_Second_Gate_Reg; + break; + case 2: + return NITIO_G2_Second_Gate_Reg; + break; + case 3: + return NITIO_G3_Second_Gate_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_DMA_Config_Reg; + break; + case 1: + return NITIO_G1_DMA_Config_Reg; + break; + case 2: + return NITIO_G2_DMA_Config_Reg; + break; + case 3: + return NITIO_G3_DMA_Config_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_DMA_Status_Reg; + break; + case 1: + return NITIO_G1_DMA_Status_Reg; + break; + case 2: + return NITIO_G2_DMA_Status_Reg; + break; + case 3: + return NITIO_G3_DMA_Status_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_ABZ_Reg; + break; + case 1: + return NITIO_G1_ABZ_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(int + counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Interrupt_Acknowledge_Reg; + break; + case 1: + return NITIO_G1_Interrupt_Acknowledge_Reg; + break; + case 2: + return NITIO_G2_Interrupt_Acknowledge_Reg; + break; + case 3: + return NITIO_G3_Interrupt_Acknowledge_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Status_Reg; + break; + case 1: + return NITIO_G1_Status_Reg; + break; + case 2: + return NITIO_G2_Status_Reg; + break; + case 3: + return NITIO_G3_Status_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg(int + counter_index) +{ + switch (counter_index) { + case 0: + return NITIO_G0_Interrupt_Enable_Reg; + break; + case 1: + return NITIO_G1_Interrupt_Enable_Reg; + break; + case 2: + return NITIO_G2_Interrupt_Enable_Reg; + break; + case 3: + return NITIO_G3_Interrupt_Enable_Reg; + break; + default: + BUG(); + break; + } + return 0; +} + +enum ni_gpct_variant { + ni_gpct_variant_e_series, + ni_gpct_variant_m_series, + ni_gpct_variant_660x +}; + +struct ni_gpct { + struct ni_gpct_device *counter_dev; + unsigned counter_index; + unsigned chip_index; + uint64_t clock_period_ps; /* clock period in picoseconds */ + struct mite_channel *mite_chan; + rtdm_lock_t lock; +}; + +struct ni_gpct_device { + struct a4l_device *dev; + void (*write_register)(struct ni_gpct * counter, + unsigned int bits, enum ni_gpct_register reg); + unsigned (*read_register)(struct ni_gpct * counter, + enum ni_gpct_register reg); + enum ni_gpct_variant variant; + struct ni_gpct **counters; + unsigned num_counters; + unsigned regs[NITIO_Num_Registers]; + rtdm_lock_t regs_lock; +}; + +#define Gi_Auto_Increment_Mask 0xff +#define Gi_Up_Down_Shift 5 + +#define Gi_Arm_Bit 0x1 +#define Gi_Save_Trace_Bit 0x2 +#define Gi_Load_Bit 0x4 +#define Gi_Disarm_Bit 0x10 +#define Gi_Up_Down_Mask (0x3 << Gi_Up_Down_Shift) +#define Gi_Always_Down_Bits (0x0 << Gi_Up_Down_Shift) +#define Gi_Always_Up_Bits (0x1 << Gi_Up_Down_Shift) +#define Gi_Up_Down_Hardware_IO_Bits (0x2 << Gi_Up_Down_Shift) +#define Gi_Up_Down_Hardware_Gate_Bits (0x3 << Gi_Up_Down_Shift) +#define Gi_Write_Switch_Bit 0x80 +#define Gi_Synchronize_Gate_Bit 0x100 +#define Gi_Little_Big_Endian_Bit 0x200 +#define Gi_Bank_Switch_Start_Bit 0x400 +#define Gi_Bank_Switch_Mode_Bit 0x800 +#define Gi_Bank_Switch_Enable_Bit 0x1000 +#define Gi_Arm_Copy_Bit 0x2000 +#define Gi_Save_Trace_Copy_Bit 0x4000 +#define Gi_Disarm_Copy_Bit 0x8000 + +#define Gi_Index_Phase_Bitshift 5 +#define Gi_HW_Arm_Select_Shift 8 + +#define Gi_Counting_Mode_Mask 0x7 +#define Gi_Counting_Mode_Normal_Bits 0x0 +#define Gi_Counting_Mode_QuadratureX1_Bits 0x1 +#define Gi_Counting_Mode_QuadratureX2_Bits 0x2 +#define Gi_Counting_Mode_QuadratureX4_Bits 0x3 +#define Gi_Counting_Mode_Two_Pulse_Bits 0x4 +#define Gi_Counting_Mode_Sync_Source_Bits 0x6 +#define Gi_Index_Mode_Bit 0x10 +#define Gi_Index_Phase_Mask (0x3 << Gi_Index_Phase_Bitshift) +#define Gi_Index_Phase_LowA_LowB (0x0 << Gi_Index_Phase_Bitshift) +#define Gi_Index_Phase_LowA_HighB (0x1 << Gi_Index_Phase_Bitshift) +#define Gi_Index_Phase_HighA_LowB (0x2 << Gi_Index_Phase_Bitshift) +#define Gi_Index_Phase_HighA_HighB (0x3 << Gi_Index_Phase_Bitshift) + +/* From m-series example code, + not documented in 660x register level manual */ +#define Gi_HW_Arm_Enable_Bit 0x80 +/* From m-series example code, + not documented in 660x register level manual */ +#define Gi_660x_HW_Arm_Select_Mask (0x7 << Gi_HW_Arm_Select_Shift) +#define Gi_660x_Prescale_X8_Bit 0x1000 +#define Gi_M_Series_Prescale_X8_Bit 0x2000 +#define Gi_M_Series_HW_Arm_Select_Mask (0x1f << Gi_HW_Arm_Select_Shift) +/* Must be set for clocks over 40MHz, + which includes synchronous counting and quadrature modes */ +#define Gi_660x_Alternate_Sync_Bit 0x2000 +#define Gi_M_Series_Alternate_Sync_Bit 0x4000 +/* From m-series example code, + not documented in 660x register level manual */ +#define Gi_660x_Prescale_X2_Bit 0x4000 +#define Gi_M_Series_Prescale_X2_Bit 0x8000 + +static inline unsigned int Gi_Alternate_Sync_Bit(enum ni_gpct_variant variant) +{ + switch (variant) { + case ni_gpct_variant_e_series: + return 0; + break; + case ni_gpct_variant_m_series: + return Gi_M_Series_Alternate_Sync_Bit; + break; + case ni_gpct_variant_660x: + return Gi_660x_Alternate_Sync_Bit; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline unsigned int Gi_Prescale_X2_Bit(enum ni_gpct_variant variant) +{ + switch (variant) { + case ni_gpct_variant_e_series: + return 0; + break; + case ni_gpct_variant_m_series: + return Gi_M_Series_Prescale_X2_Bit; + break; + case ni_gpct_variant_660x: + return Gi_660x_Prescale_X2_Bit; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline unsigned int Gi_Prescale_X8_Bit(enum ni_gpct_variant variant) +{ + switch (variant) { + case ni_gpct_variant_e_series: + return 0; + break; + case ni_gpct_variant_m_series: + return Gi_M_Series_Prescale_X8_Bit; + break; + case ni_gpct_variant_660x: + return Gi_660x_Prescale_X8_Bit; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline unsigned int Gi_HW_Arm_Select_Mask(enum ni_gpct_variant variant) +{ + switch (variant) { + case ni_gpct_variant_e_series: + return 0; + break; + case ni_gpct_variant_m_series: + return Gi_M_Series_HW_Arm_Select_Mask; + break; + case ni_gpct_variant_660x: + return Gi_660x_HW_Arm_Select_Mask; + break; + default: + BUG(); + break; + } + return 0; +} + +#define NI_660x_Timebase_1_Clock 0x0 /* 20MHz */ +#define NI_660x_Source_Pin_i_Clock 0x1 +#define NI_660x_Next_Gate_Clock 0xa +#define NI_660x_Timebase_2_Clock 0x12 /* 100KHz */ +#define NI_660x_Next_TC_Clock 0x13 +#define NI_660x_Timebase_3_Clock 0x1e /* 80MHz */ +#define NI_660x_Logic_Low_Clock 0x1f + +#define ni_660x_max_rtsi_channel 6 +#define ni_660x_max_source_pin 7 + +static inline unsigned int NI_660x_RTSI_Clock(unsigned int n) +{ + BUG_ON(n > ni_660x_max_rtsi_channel); + return (0xb + n); +} + +static inline unsigned int NI_660x_Source_Pin_Clock(unsigned int n) +{ + BUG_ON(n > ni_660x_max_source_pin); + return (0x2 + n); +} + +/* Clock sources for ni e and m series boards, + get bits with Gi_Source_Select_Bits() */ +#define NI_M_Series_Timebase_1_Clock 0x0 /* 20MHz */ +#define NI_M_Series_Timebase_2_Clock 0x12 /* 100KHz */ +#define NI_M_Series_Next_TC_Clock 0x13 +#define NI_M_Series_Next_Gate_Clock 0x14 /* when Gi_Src_SubSelect = 0 */ +#define NI_M_Series_PXI_Star_Trigger_Clock 0x14 /* when Gi_Src_SubSelect = 1 */ +#define NI_M_Series_PXI10_Clock 0x1d +#define NI_M_Series_Timebase_3_Clock 0x1e /* 80MHz, when Gi_Src_SubSelect = 0 */ +#define NI_M_Series_Analog_Trigger_Out_Clock 0x1e /* when Gi_Src_SubSelect = 1 */ +#define NI_M_Series_Logic_Low_Clock 0x1f + +#define ni_m_series_max_pfi_channel 15 +#define ni_m_series_max_rtsi_channel 7 + +static inline unsigned int NI_M_Series_PFI_Clock(unsigned int n) +{ + BUG_ON(n > ni_m_series_max_pfi_channel); + if (n < 10) + return 1 + n; + else + return 0xb + n; +} + +static inline unsigned int NI_M_Series_RTSI_Clock(unsigned int n) +{ + BUG_ON(n > ni_m_series_max_rtsi_channel); + if (n == 7) + return 0x1b; + else + return 0xb + n; +} + +#define NI_660x_Source_Pin_i_Gate_Select 0x0 +#define NI_660x_Gate_Pin_i_Gate_Select 0x1 +#define NI_660x_Next_SRC_Gate_Select 0xa +#define NI_660x_Next_Out_Gate_Select 0x14 +#define NI_660x_Logic_Low_Gate_Select 0x1f +#define ni_660x_max_gate_pin 7 + +static inline unsigned int NI_660x_Gate_Pin_Gate_Select(unsigned int n) +{ + BUG_ON(n > ni_660x_max_gate_pin); + return 0x2 + n; +} + +static inline unsigned int NI_660x_RTSI_Gate_Select(unsigned int n) +{ + BUG_ON(n > ni_660x_max_rtsi_channel); + return 0xb + n; +} + + +#define NI_M_Series_Timestamp_Mux_Gate_Select 0x0 +#define NI_M_Series_AI_START2_Gate_Select 0x12 +#define NI_M_Series_PXI_Star_Trigger_Gate_Select 0x13 +#define NI_M_Series_Next_Out_Gate_Select 0x14 +#define NI_M_Series_AI_START1_Gate_Select 0x1c +#define NI_M_Series_Next_SRC_Gate_Select 0x1d +#define NI_M_Series_Analog_Trigger_Out_Gate_Select 0x1e +#define NI_M_Series_Logic_Low_Gate_Select 0x1f + +static inline unsigned int NI_M_Series_RTSI_Gate_Select(unsigned int n) +{ + BUG_ON(n > ni_m_series_max_rtsi_channel); + if (n == 7) + return 0x1b; + return 0xb + n; +} + +static inline unsigned int NI_M_Series_PFI_Gate_Select(unsigned int n) +{ + BUG_ON(n > ni_m_series_max_pfi_channel); + if (n < 10) + return 1 + n; + return 0xb + n; +} + + +#define Gi_Source_Select_Shift 2 +#define Gi_Gate_Select_Shift 7 + +#define Gi_Read_Acknowledges_Irq 0x1 /* not present on 660x */ +#define Gi_Write_Acknowledges_Irq 0x2 /* not present on 660x */ +#define Gi_Source_Select_Mask 0x7c +#define Gi_Gate_Select_Mask (0x1f << Gi_Gate_Select_Shift) +#define Gi_Gate_Select_Load_Source_Bit 0x1000 +#define Gi_Or_Gate_Bit 0x2000 +#define Gi_Output_Polarity_Bit 0x4000 /* set to invert */ +#define Gi_Source_Polarity_Bit 0x8000 /* set to invert */ + +#define Gi_Source_Select_Bits(x) ((x << Gi_Source_Select_Shift) & \ + Gi_Source_Select_Mask) +#define Gi_Gate_Select_Bits(x) ((x << Gi_Gate_Select_Shift) & \ + Gi_Gate_Select_Mask) + +#define Gi_Gating_Mode_Mask 0x3 +#define Gi_Gating_Disabled_Bits 0x0 +#define Gi_Level_Gating_Bits 0x1 +#define Gi_Rising_Edge_Gating_Bits 0x2 +#define Gi_Falling_Edge_Gating_Bits 0x3 +#define Gi_Gate_On_Both_Edges_Bit 0x4 /* used in conjunction with + rising edge gating mode */ +#define Gi_Trigger_Mode_for_Edge_Gate_Mask 0x18 +#define Gi_Edge_Gate_Starts_Stops_Bits 0x0 +#define Gi_Edge_Gate_Stops_Starts_Bits 0x8 +#define Gi_Edge_Gate_Starts_Bits 0x10 +#define Gi_Edge_Gate_No_Starts_or_Stops_Bits 0x18 +#define Gi_Stop_Mode_Mask 0x60 +#define Gi_Stop_on_Gate_Bits 0x00 +#define Gi_Stop_on_Gate_or_TC_Bits 0x20 +#define Gi_Stop_on_Gate_or_Second_TC_Bits 0x40 +#define Gi_Load_Source_Select_Bit 0x80 +#define Gi_Output_Mode_Mask 0x300 +#define Gi_Output_TC_Pulse_Bits 0x100 +#define Gi_Output_TC_Toggle_Bits 0x200 +#define Gi_Output_TC_or_Gate_Toggle_Bits 0x300 +#define Gi_Counting_Once_Mask 0xc00 +#define Gi_No_Hardware_Disarm_Bits 0x000 +#define Gi_Disarm_at_TC_Bits 0x400 +#define Gi_Disarm_at_Gate_Bits 0x800 +#define Gi_Disarm_at_TC_or_Gate_Bits 0xc00 +#define Gi_Loading_On_TC_Bit 0x1000 +#define Gi_Gate_Polarity_Bit 0x2000 +#define Gi_Loading_On_Gate_Bit 0x4000 +#define Gi_Reload_Source_Switching_Bit 0x8000 + +#define NI_660x_Source_Pin_i_Second_Gate_Select 0x0 +#define NI_660x_Up_Down_Pin_i_Second_Gate_Select 0x1 +#define NI_660x_Next_SRC_Second_Gate_Select 0xa +#define NI_660x_Next_Out_Second_Gate_Select 0x14 +#define NI_660x_Selected_Gate_Second_Gate_Select 0x1e +#define NI_660x_Logic_Low_Second_Gate_Select 0x1f + +#define ni_660x_max_up_down_pin 7 + +static inline +unsigned int NI_660x_Up_Down_Pin_Second_Gate_Select(unsigned int n) +{ + BUG_ON(n > ni_660x_max_up_down_pin); + return 0x2 + n; +} +static inline +unsigned int NI_660x_RTSI_Second_Gate_Select(unsigned int n) +{ + BUG_ON(n > ni_660x_max_rtsi_channel); + return 0xb + n; +} + +#define Gi_Second_Gate_Select_Shift 7 + +/*FIXME: m-series has a second gate subselect bit */ +/*FIXME: m-series second gate sources are undocumented (by NI)*/ +#define Gi_Second_Gate_Mode_Bit 0x1 +#define Gi_Second_Gate_Select_Mask (0x1f << Gi_Second_Gate_Select_Shift) +#define Gi_Second_Gate_Polarity_Bit 0x2000 +#define Gi_Second_Gate_Subselect_Bit 0x4000 /* m-series only */ +#define Gi_Source_Subselect_Bit 0x8000 /* m-series only */ + +static inline +unsigned int Gi_Second_Gate_Select_Bits(unsigned int second_gate_select) +{ + return (second_gate_select << Gi_Second_Gate_Select_Shift) & + Gi_Second_Gate_Select_Mask; +} + +#define G0_Save_Bit 0x1 +#define G1_Save_Bit 0x2 +#define G0_Counting_Bit 0x4 +#define G1_Counting_Bit 0x8 +#define G0_Next_Load_Source_Bit 0x10 +#define G1_Next_Load_Source_Bit 0x20 +#define G0_Stale_Data_Bit 0x40 +#define G1_Stale_Data_Bit 0x80 +#define G0_Armed_Bit 0x100 +#define G1_Armed_Bit 0x200 +#define G0_No_Load_Between_Gates_Bit 0x400 +#define G1_No_Load_Between_Gates_Bit 0x800 +#define G0_TC_Error_Bit 0x1000 +#define G1_TC_Error_Bit 0x2000 +#define G0_Gate_Error_Bit 0x4000 +#define G1_Gate_Error_Bit 0x8000 + +static inline unsigned int Gi_Counting_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_Counting_Bit; + return G0_Counting_Bit; +} + +static inline unsigned int Gi_Armed_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_Armed_Bit; + return G0_Armed_Bit; +} + +static inline unsigned int Gi_Next_Load_Source_Bit(unsigned counter_index) +{ + if (counter_index % 2) + return G1_Next_Load_Source_Bit; + return G0_Next_Load_Source_Bit; +} + +static inline unsigned int Gi_Stale_Data_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_Stale_Data_Bit; + return G0_Stale_Data_Bit; +} + +static inline unsigned int Gi_TC_Error_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_TC_Error_Bit; + return G0_TC_Error_Bit; +} + +static inline unsigned int Gi_Gate_Error_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_Gate_Error_Bit; + return G0_Gate_Error_Bit; +} + +/* Joint reset register bits */ +static inline unsigned Gi_Reset_Bit(unsigned int counter_index) +{ + return 0x1 << (2 + (counter_index % 2)); +} + +#define G0_Output_Bit 0x1 +#define G1_Output_Bit 0x2 +#define G0_HW_Save_Bit 0x1000 +#define G1_HW_Save_Bit 0x2000 +#define G0_Permanent_Stale_Bit 0x4000 +#define G1_Permanent_Stale_Bit 0x8000 + +static inline unsigned int Gi_Permanent_Stale_Bit(unsigned + counter_index) +{ + if (counter_index % 2) + return G1_Permanent_Stale_Bit; + return G0_Permanent_Stale_Bit; +} + +#define Gi_DMA_Enable_Bit 0x1 +#define Gi_DMA_Write_Bit 0x2 +#define Gi_DMA_Int_Bit 0x4 + +#define Gi_DMA_Readbank_Bit 0x2000 +#define Gi_DRQ_Error_Bit 0x4000 +#define Gi_DRQ_Status_Bit 0x8000 + +#define G0_Gate_Error_Confirm_Bit 0x20 +#define G0_TC_Error_Confirm_Bit 0x40 + +#define G1_Gate_Error_Confirm_Bit 0x2 +#define G1_TC_Error_Confirm_Bit 0x4 + +static inline unsigned int Gi_Gate_Error_Confirm_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_Gate_Error_Confirm_Bit; + return G0_Gate_Error_Confirm_Bit; +} + +static inline unsigned int Gi_TC_Error_Confirm_Bit(unsigned int counter_index) +{ + if (counter_index % 2) + return G1_TC_Error_Confirm_Bit; + return G0_TC_Error_Confirm_Bit; +} + +/* Bits that are the same in G0/G2 and G1/G3 interrupt acknowledge registers */ +#define Gi_TC_Interrupt_Ack_Bit 0x4000 +#define Gi_Gate_Interrupt_Ack_Bit 0x8000 + +#define Gi_Gate_Interrupt_Bit 0x4 +#define Gi_TC_Bit 0x8 +#define Gi_Interrupt_Bit 0x8000 + +#define G0_TC_Interrupt_Enable_Bit 0x40 +#define G0_Gate_Interrupt_Enable_Bit 0x100 + +#define G1_TC_Interrupt_Enable_Bit 0x200 +#define G1_Gate_Interrupt_Enable_Bit 0x400 + +static inline unsigned int Gi_Gate_Interrupt_Enable_Bit(unsigned int counter_index) +{ + unsigned int bit; + + if (counter_index % 2) { + bit = G1_Gate_Interrupt_Enable_Bit; + } else { + bit = G0_Gate_Interrupt_Enable_Bit; + } + return bit; +} + +#define counter_status_mask (A4L_COUNTER_ARMED | A4L_COUNTER_COUNTING) + +#define NI_USUAL_PFI_SELECT(x) ((x < 10) ? (0x1 + x) : (0xb + x)) +#define NI_USUAL_RTSI_SELECT(x) ((x < 7 ) ? (0xb + x) : (0x1b + x)) + +/* Mode bits for NI general-purpose counters, set with + INSN_CONFIG_SET_COUNTER_MODE */ +#define NI_GPCT_COUNTING_MODE_SHIFT 16 +#define NI_GPCT_INDEX_PHASE_BITSHIFT 20 +#define NI_GPCT_COUNTING_DIRECTION_SHIFT 24 + +#define NI_GPCT_GATE_ON_BOTH_EDGES_BIT 0x4 +#define NI_GPCT_EDGE_GATE_MODE_MASK 0x18 +#define NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS 0x0 +#define NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS 0x8 +#define NI_GPCT_EDGE_GATE_STARTS_BITS 0x10 +#define NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS 0x18 +#define NI_GPCT_STOP_MODE_MASK 0x60 +#define NI_GPCT_STOP_ON_GATE_BITS 0x00 +#define NI_GPCT_STOP_ON_GATE_OR_TC_BITS 0x20 +#define NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS 0x40 +#define NI_GPCT_LOAD_B_SELECT_BIT 0x80 +#define NI_GPCT_OUTPUT_MODE_MASK 0x300 +#define NI_GPCT_OUTPUT_TC_PULSE_BITS 0x100 +#define NI_GPCT_OUTPUT_TC_TOGGLE_BITS 0x200 +#define NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS 0x300 +#define NI_GPCT_HARDWARE_DISARM_MASK 0xc00 +#define NI_GPCT_NO_HARDWARE_DISARM_BITS 0x000 +#define NI_GPCT_DISARM_AT_TC_BITS 0x400 +#define NI_GPCT_DISARM_AT_GATE_BITS 0x800 +#define NI_GPCT_DISARM_AT_TC_OR_GATE_BITS 0xc00 +#define NI_GPCT_LOADING_ON_TC_BIT 0x1000 +#define NI_GPCT_LOADING_ON_GATE_BIT 0x4000 +#define NI_GPCT_COUNTING_MODE_MASK 0x7 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_COUNTING_MODE_NORMAL_BITS 0x0 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS 0x1 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS 0x2 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS 0x3 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS 0x4 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS 0x6 << NI_GPCT_COUNTING_MODE_SHIFT +#define NI_GPCT_INDEX_PHASE_MASK 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT +#define NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS 0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT +#define NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS 0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT +#define NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS 0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT +#define NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS 0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT +#define NI_GPCT_INDEX_ENABLE_BIT 0x400000 +#define NI_GPCT_COUNTING_DIRECTION_MASK 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT +#define NI_GPCT_COUNTING_DIRECTION_DOWN_BITS 0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT +#define NI_GPCT_COUNTING_DIRECTION_UP_BITS 0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT +#define NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT +#define NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT +#define NI_GPCT_RELOAD_SOURCE_MASK 0xc000000 +#define NI_GPCT_RELOAD_SOURCE_FIXED_BITS 0x0 +#define NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS 0x4000000 +#define NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS 0x8000000 +#define NI_GPCT_OR_GATE_BIT 0x10000000 +#define NI_GPCT_INVERT_OUTPUT_BIT 0x20000000 + +/* Bits for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC when + using NI general-purpose counters. */ +#define NI_GPCT_CLOCK_SRC_SELECT_MASK 0x3f +#define NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS 0x0 +#define NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS 0x1 +#define NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS 0x2 +#define NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS 0x3 +#define NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS 0x4 +#define NI_GPCT_NEXT_TC_CLOCK_SRC_BITS 0x5 +#define NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS 0x6 /* NI 660x-specific */ +#define NI_GPCT_PXI10_CLOCK_SRC_BITS 0x7 +#define NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS 0x8 +#define NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS 0x9 +#define NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK 0x30000000 +#define NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS 0x0 +#define NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS 0x10000000 /* divide source by 2 */ +#define NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS 0x20000000 /* divide source by 8 */ +#define NI_GPCT_INVERT_CLOCK_SRC_BIT 0x80000000 +#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x) (0x10 + x) +#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x) (0x18 + x) +#define NI_GPCT_PFI_CLOCK_SRC_BITS(x) (0x20 + x) + +/* Possibilities for setting a gate source with + INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters. + May be bitwise-or'd with CR_EDGE or CR_INVERT. */ +/* M-series gates */ +#define NI_GPCT_TIMESTAMP_MUX_GATE_SELECT 0x0 +#define NI_GPCT_AI_START2_GATE_SELECT 0x12 +#define NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT 0x13 +#define NI_GPCT_NEXT_OUT_GATE_SELECT 0x14 +#define NI_GPCT_AI_START1_GATE_SELECT 0x1c +#define NI_GPCT_NEXT_SOURCE_GATE_SELECT 0x1d +#define NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT 0x1e +#define NI_GPCT_LOGIC_LOW_GATE_SELECT 0x1f +/* More gates for 660x */ +#define NI_GPCT_SOURCE_PIN_i_GATE_SELECT 0x100 +#define NI_GPCT_GATE_PIN_i_GATE_SELECT 0x101 +/* More gates for 660x "second gate" */ +#define NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT 0x201 +#define NI_GPCT_SELECTED_GATE_GATE_SELECT 0x21e +/* M-series "second gate" sources are unknown, we should add them here + with an offset of 0x300 when known. */ +#define NI_GPCT_DISABLED_GATE_SELECT 0x8000 +#define NI_GPCT_GATE_PIN_GATE_SELECT(x) (0x102 + x) +#define NI_GPCT_RTSI_GATE_SELECT(x) NI_USUAL_RTSI_SELECT(x) +#define NI_GPCT_PFI_GATE_SELECT(x) NI_USUAL_PFI_SELECT(x) +#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x) (0x202 + x) + +/* Possibilities for setting a source with INSN_CONFIG_SET_OTHER_SRC + when using NI general-purpose counters. */ +#define NI_GPCT_SOURCE_ENCODER_A 0 +#define NI_GPCT_SOURCE_ENCODER_B 1 +#define NI_GPCT_SOURCE_ENCODER_Z 2 +/* M-series gates */ +/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */ +#define NI_GPCT_DISABLED_OTHER_SELECT 0x8000 +#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x) + +/* Start sources for ni general-purpose counters for use with + INSN_CONFIG_ARM */ +#define NI_GPCT_ARM_IMMEDIATE 0x0 +/* Start both the counter and the adjacent paired counter + simultaneously */ +#define NI_GPCT_ARM_PAIRED_IMMEDIATE 0x1 +/* NI doesn't document bits for selecting hardware arm triggers. If + the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least significant + bits (3 bits for 660x or 5 bits for m-series) through to the + hardware. This will at least allow someone to figure out what the bits + do later. */ +#define NI_GPCT_ARM_UNKNOWN 0x1000 + +/* Digital filtering options for ni 660x for use with + INSN_CONFIG_FILTER. */ +#define NI_GPCT_FILTER_OFF 0x0 +#define NI_GPCT_FILTER_TIMEBASE_3_SYNC 0x1 +#define NI_GPCT_FILTER_100x_TIMEBASE_1 0x2 +#define NI_GPCT_FILTER_20x_TIMEBASE_1 0x3 +#define NI_GPCT_FILTER_10x_TIMEBASE_1 0x4 +#define NI_GPCT_FILTER_2x_TIMEBASE_1 0x5 +#define NI_GPCT_FILTER_2x_TIMEBASE_3 0x6 + +/* Master clock sources for ni mio boards and + INSN_CONFIG_SET_CLOCK_SRC */ +#define NI_MIO_INTERNAL_CLOCK 0 +#define NI_MIO_RTSI_CLOCK 1 +/* Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK() the + NI_MIO_PLL_* sources are m-series only */ +#define NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK 2 +#define NI_MIO_PLL_PXI10_CLOCK 3 +#define NI_MIO_PLL_RTSI0_CLOCK 4 + +#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x)) + +/* Signals which can be routed to an NI RTSI pin with + INSN_CONFIG_SET_ROUTING. The numbers assigned are not arbitrary, they + correspond to the bits required to program the board. */ +#define NI_RTSI_OUTPUT_ADR_START1 0 +#define NI_RTSI_OUTPUT_ADR_START2 1 +#define NI_RTSI_OUTPUT_SCLKG 2 +#define NI_RTSI_OUTPUT_DACUPDN 3 +#define NI_RTSI_OUTPUT_DA_START1 4 +#define NI_RTSI_OUTPUT_G_SRC0 5 +#define NI_RTSI_OUTPUT_G_GATE0 6 +#define NI_RTSI_OUTPUT_RGOUT0 7 +#define NI_RTSI_OUTPUT_RTSI_BRD_0 8 +/* Pre-m-series always have RTSI clock on line 7 */ +#define NI_RTSI_OUTPUT_RTSI_OSC 12 + +#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x)) + + +int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn); +int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn); +int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn); +void a4l_ni_tio_init_counter(struct ni_gpct *counter); + +struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev, + void (*write_register) (struct ni_gpct * counter, unsigned int bits, + enum ni_gpct_register reg), + unsigned int (*read_register) (struct ni_gpct * counter, + enum ni_gpct_register reg), enum ni_gpct_variant variant, + unsigned int num_counters); +void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev); + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +extern struct a4l_cmd_desc a4l_ni_tio_cmd_mask; + +int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum); +int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd); +int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd); +int a4l_ni_tio_cancel(struct ni_gpct *counter); + +void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev); +void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter, + struct mite_channel *mite_chan); +void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, + int *gate_error, + int *tc_error, + int *perm_stale_data, int *stale_data); + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +#endif /* !__ANALOGY_NI_TIO_H__ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c new file mode 100644 index 0000000..8a3cccc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c @@ -0,0 +1,1603 @@ +/* + * Hardware driver for NI PCI-MIO E series cards + * + * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Description: National Instruments PCI-MIO-E series and M series + * (all boards) + * + * Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans, + * Herman Bruyninckx, Terry Barnaby + * Status: works + * Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio), + * PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014, + * PCI-6040E,PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, + * PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, + * PCI-6035E, PCI-6052E, PCI-6110, PCI-6111, PCI-6220, PCI-6221, + * PCI-6224, PCI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, + * PCI-6254, PCI-6259, PCIe-6259, PCI-6280, PCI-6281, PXI-6281, + * PCI-6284, PCI-6289, PCI-6711, PXI-6711, PCI-6713, PXI-6713, + * PXI-6071E, PCI-6070E, PXI-6070E, PXI-6052E, PCI-6036E, PCI-6731, + * PCI-6733, PXI-6733, PCI-6143, PXI-6143 + * + * These boards are almost identical to the AT-MIO E series, except that + * they use the PCI bus instead of ISA (i.e., AT). See the notes for + * the ni_atmio.o driver for additional information about these boards. + * + * By default, the driver uses DMA to transfer analog input data to + * memory. When DMA is enabled, not all triggering features are + * supported. + * + * Note that the PCI-6143 is a simultaneous sampling device with 8 + * convertors. With this board all of the convertors perform one + * simultaneous sample during a scan interval. The period for a scan + * is used for the convert time in an Analgoy cmd. The convert trigger + * source is normally set to TRIG_NOW by default. + * + * The RTSI trigger bus is supported on these cards on subdevice + * 10. See the Analogy library documentation for details. + * + * References: + * 341079b.pdf PCI E Series Register-Level Programmer Manual + * 340934b.pdf DAQ-STC reference manual + * 322080b.pdf 6711/6713/6715 User Manual + * 320945c.pdf PCI E Series User Manual + * 322138a.pdf PCI-6052E and DAQPad-6052E User Manual + * + * ISSUES: + * - When DMA is enabled, XXX_EV_CONVERT does not work correctly. + * - Calibration is not fully implemented + * - SCXI is probably broken for m-series boards + * - Digital I/O may not work on 673x. + * - Information (number of channels, bits, etc.) for some devices may + * be incorrect. Please check this and submit a bug if there are + * problems for your device. + * - Need to deal with external reference for DAC, and other DAC + * properties in board properties + * - Deal with at-mio-16de-10 revision D to N changes, etc. + * - Need to add other CALDAC type + * - Need to slow down DAC loading. I don't trust NI's claim that two + * writes to the PCI bus slows IO enough. I would prefer to use + * a4l_udelay(). Timing specs: (clock) + * AD8522 30ns + * DAC8043 120ns + * DAC8800 60ns + * MB88341 ? + * + */ + +#include <linux/module.h> +#include <rtdm/analogy/device.h> + +#include "../intel/8255.h" +#include "ni_stc.h" +#include "ni_mio.h" +#include "mite.h" + +#define PCIMIO_IRQ_POLARITY 1 + +/* The following two tables must be in the same order */ +static struct pci_device_id ni_pci_table[] __maybe_unused = { + { PCI_VENDOR_ID_NATINST, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1190, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x11b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x11c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x11d0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x14e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x14f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x15b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x1870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x18b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x18c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2890, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x28c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2a60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2a70, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2a80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2ab0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2b80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2b90, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x2ca0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70aa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70ab, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70ac, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70af, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70b4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70b6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70b7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70bd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, ni_pci_table); + +/* These are not all the possible ao ranges for 628x boards. + They can do OFFSET +- REFERENCE where OFFSET can be + 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can + be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>. That's + 63 different possibilities. An AO channel + can not act as it's own OFFSET or REFERENCE. +*/ + +#if 0 +static struct a4l_rngtab rng_ni_M_628x_ao = { 8, { + RANGE(-10, 10), + RANGE(-5, 5), + RANGE(-2, 2), + RANGE(-1, 1), + RANGE(-5, 15), + RANGE(0, 10), + RANGE(3, 7), + RANGE(4, 6), + RANGE_ext(-1, 1) +}}; +static struct a4l_rngdesc range_ni_M_628x_ao = + RNG_GLOBAL(rng_ni_M_628x_ao); +#endif + +static struct a4l_rngtab rng_ni_M_625x_ao = { 3, { + RANGE(-10, 10), + RANGE(-5, 5), + RANGE_ext(-1, 1) +}}; +static struct a4l_rngdesc range_ni_M_625x_ao = + RNG_GLOBAL(rng_ni_M_625x_ao); + +static struct a4l_rngtab rng_ni_M_622x_ao = { 1, { + RANGE(-10, 10), +}}; +static struct a4l_rngdesc range_ni_M_622x_ao = + RNG_GLOBAL(rng_ni_M_622x_ao); + +static ni_board ni_boards[]={ + { device_id: 0x0162, // NI also says 0x1620. typo? + name: "pci-mio-16xe-50", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 2048, + alwaysdither: 1, + gainlkup: ai_gain_8, + ai_speed: 50000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_speed: 50000, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043}, + has_8255: 0, + }, + { device_id: 0x1170, + name: "pci-mio-16xe-10", // aka pci-6030E + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_14, + ai_speed: 10000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 10000, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043,ad8522}, + has_8255: 0, + }, + { device_id: 0x28c0, + name: "pci-6014", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_speed: 100000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x11d0, + name: "pxi-6030e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_14, + ai_speed: 10000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 10000, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043,ad8522}, + has_8255: 0, + }, + + { device_id: 0x1180, + name: "pci-mio-16e-1", /* aka pci-6070e */ + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_16, + ai_speed: 800, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 1000, + .num_p0_dio_channels = 8, + caldac: {mb88341}, + has_8255: 0, + }, + { device_id: 0x1190, + name: "pci-mio-16e-4", /* aka pci-6040e */ + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_16, + /* Note: there have been reported problems with full speed + * on this board */ + ai_speed: 2000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 512, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 1000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, // doc says mb88341 + has_8255: 0, + }, + { device_id: 0x11c0, + name: "pxi-6040e", + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_16, + ai_speed: 2000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 512, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 1000, + .num_p0_dio_channels = 8, + caldac: {mb88341}, + has_8255: 0, + }, + + { device_id: 0x1330, + name: "pci-6031e", + n_adchan: 64, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_14, + ai_speed: 10000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 10000, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043,ad8522}, + has_8255: 0, + }, + { device_id: 0x1270, + name: "pci-6032e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_14, + ai_speed: 10000, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + ao_unipolar: 0, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043,ad8522}, + has_8255: 0, + }, + { device_id: 0x1340, + name: "pci-6033e", + n_adchan: 64, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_14, + ai_speed: 10000, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + ao_unipolar: 0, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043,ad8522}, + has_8255: 0, + }, + { device_id: 0x1350, + name: "pci-6071e", + n_adchan: 64, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_16, + ai_speed: 800, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 1000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x2a60, + name: "pci-6023e", + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 0, + aobits: 0, + ao_unipolar: 0, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, /* manual is wrong */ + has_8255: 0, + }, + { device_id: 0x2a70, + name: "pci-6024e", + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_speed: 100000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, /* manual is wrong */ + has_8255: 0, + }, + { device_id: 0x2a80, + name: "pci-6025e", + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_speed: 100000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, /* manual is wrong */ + has_8255: 1, + }, + { device_id: 0x2ab0, + name: "pxi-6025e", + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 0, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 100000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, /* manual is wrong */ + has_8255: 1, + }, + + { device_id: 0x2ca0, + name: "pci-6034e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + ao_unipolar: 0, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x2c80, + name: "pci-6035e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_speed: 100000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x18b0, + name: "pci-6052e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_16, + ai_speed: 3000, + n_aochan: 2, + aobits: 16, + ao_unipolar: 1, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_speed: 3000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug,ad8804_debug,ad8522}, /* manual is wrong */ + }, + { device_id: 0x14e0, + name: "pci-6110", + n_adchan: 4, + adbits: 12, + ai_fifo_depth: 8192, + alwaysdither: 0, + gainlkup: ai_gain_611x, + ai_speed: 200, + n_aochan: 2, + aobits: 16, + reg_type: ni_reg_611x, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_fifo_depth: 2048, + ao_speed: 250, + .num_p0_dio_channels = 8, + caldac: {ad8804,ad8804}, + }, + { device_id: 0x14f0, + name: "pci-6111", + n_adchan: 2, + adbits: 12, + ai_fifo_depth: 8192, + alwaysdither: 0, + gainlkup: ai_gain_611x, + ai_speed: 200, + n_aochan: 2, + aobits: 16, + reg_type: ni_reg_611x, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_fifo_depth: 2048, + ao_speed: 250, + .num_p0_dio_channels = 8, + caldac: {ad8804,ad8804}, + }, +#if 0 /* Need device IDs */ + /* The 6115 boards probably need their own driver */ + { device_id: 0x2ed0, + name: "pci-6115", + n_adchan: 4, + adbits: 12, + ai_fifo_depth: 8192, + alwaysdither: 0, + gainlkup: ai_gain_611x, + ai_speed: 100, + n_aochan: 2, + aobits: 16, + ao_671x: 1, + ao_unipolar: 0, + ao_fifo_depth: 2048, + ao_speed: 250, + .num_p0_dio_channels = 8, + reg_611x: 1, + caldac: {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */ + }, +#endif +#if 0 /* Need device IDs */ + { device_id: 0x0000, + name: "pxi-6115", + n_adchan: 4, + adbits: 12, + ai_fifo_depth: 8192, + alwaysdither: 0, + gainlkup: ai_gain_611x, + ai_speed: 100, + n_aochan: 2, + aobits: 16, + ao_671x: 1, + ao_unipolar: 0, + ao_fifo_depth: 2048, + ao_speed: 250, + reg_611x: 1, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */ + }, +#endif + { device_id: 0x1880, + name: "pci-6711", + n_adchan: 0, /* no analog input */ + n_aochan: 4, + aobits: 12, + ao_unipolar: 0, + ao_fifo_depth: 16384, /* data sheet says 8192, but fifo really holds 16384 samples */ + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6711, + caldac: {ad8804_debug}, + }, + { device_id: 0x2b90, + name: "pxi-6711", + n_adchan: 0, /* no analog input */ + n_aochan: 4, + aobits: 12, + ao_unipolar: 0, + ao_fifo_depth: 16384, + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6711, + caldac: {ad8804_debug}, + }, + { device_id: 0x1870, + name: "pci-6713", + n_adchan: 0, /* no analog input */ + n_aochan: 8, + aobits: 12, + ao_unipolar: 0, + ao_fifo_depth: 16384, + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6713, + caldac: {ad8804_debug,ad8804_debug}, + }, + { device_id: 0x2b80, + name: "pxi-6713", + n_adchan: 0, /* no analog input */ + n_aochan: 8, + aobits: 12, + ao_unipolar: 0, + ao_fifo_depth: 16384, + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6713, + caldac: {ad8804_debug,ad8804_debug}, + }, + { device_id: 0x2430, + name: "pci-6731", + n_adchan: 0, /* no analog input */ + n_aochan: 4, + aobits: 16, + ao_unipolar: 0, + ao_fifo_depth: 8192, + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6711, + caldac: {ad8804_debug}, + }, +#if 0 /* Need device IDs */ + { device_id: 0x0, + name: "pxi-6731", + n_adchan: 0, /* no analog input */ + n_aochan: 4, + aobits: 16, + ao_unipolar: 0, + ao_fifo_depth: 8192, + .ao_range_table = &a4l_range_bipolar10, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6711, + caldac: {ad8804_debug}, + }, +#endif + { device_id: 0x2410, + name: "pci-6733", + n_adchan: 0, /* no analog input */ + n_aochan: 8, + aobits: 16, + ao_unipolar: 0, + ao_fifo_depth: 16384, + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6713, + caldac: {ad8804_debug,ad8804_debug}, + }, + { device_id: 0x2420, + name: "pxi-6733", + n_adchan: 0, /* no analog input */ + n_aochan: 8, + aobits: 16, + ao_unipolar: 0, + ao_fifo_depth: 16384, + .ao_range_table = &a4l_range_bipolar10, + ao_speed: 1000, + .num_p0_dio_channels = 8, + reg_type: ni_reg_6713, + caldac: {ad8804_debug,ad8804_debug}, + }, + { device_id: 0x15b0, + name: "pxi-6071e", + n_adchan: 64, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_16, + ai_speed: 800, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 1000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x11b0, + name: "pxi-6070e", + n_adchan: 16, + adbits: 12, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_16, + ai_speed: 800, + n_aochan: 2, + aobits: 12, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 1000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x18c0, + name: "pxi-6052e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_16, + ai_speed: 3000, + n_aochan: 2, + aobits: 16, + ao_unipolar: 1, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_speed: 3000, + .num_p0_dio_channels = 8, + caldac: {mb88341,mb88341,ad8522}, + }, + { device_id: 0x1580, + name: "pxi-6031e", + n_adchan: 64, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_14, + ai_speed: 10000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 2048, + .ao_range_table = &a4l_range_ni_E_ao_ext, + ao_unipolar: 1, + ao_speed: 10000, + .num_p0_dio_channels = 8, + caldac: {dac8800,dac8043,ad8522}, + }, + { device_id: 0x2890, + name: "pci-6036e", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, + alwaysdither: 1, + gainlkup: ai_gain_4, + ai_speed: 5000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 0, + .ao_range_table = &a4l_range_bipolar10, + ao_unipolar: 0, + ao_speed: 100000, + .num_p0_dio_channels = 8, + caldac: {ad8804_debug}, + has_8255: 0, + }, + { device_id: 0x70b0, + name: "pci-6220", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 512, //FIXME: guess + gainlkup: ai_gain_622x, + ai_speed: 4000, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + .num_p0_dio_channels = 8, + reg_type: ni_reg_622x, + ao_unipolar: 0, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70af, + name: "pci-6221", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 4095, + gainlkup: ai_gain_622x, + ai_speed: 4000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &a4l_range_bipolar10, + reg_type: ni_reg_622x, + ao_unipolar: 0, + ao_speed: 1200, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x71bc, + name: "pci-6221_37pin", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 4095, + gainlkup: ai_gain_622x, + ai_speed: 4000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &a4l_range_bipolar10, + reg_type: ni_reg_622x, + ao_unipolar: 0, + ao_speed: 1200, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70f2, + name: "pci-6224", + n_adchan: 32, + adbits: 16, + ai_fifo_depth: 4095, + gainlkup: ai_gain_622x, + ai_speed: 4000, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + reg_type: ni_reg_622x, + ao_unipolar: 0, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x716c, + name: "pci-6225", + n_adchan: 80, + adbits: 16, + ai_fifo_depth: 4095, + gainlkup: ai_gain_622x, + ai_speed: 4000, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_622x_ao, + reg_type: ni_reg_622x, + ao_unipolar: 0, + ao_speed: 1200, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70aa, + name: "pci-6229", + n_adchan: 32, + adbits: 16, + ai_fifo_depth: 4095, + gainlkup: ai_gain_622x, + ai_speed: 4000, + n_aochan: 4, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_622x_ao, + reg_type: ni_reg_622x, + ao_unipolar: 0, + ao_speed: 1200, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70b4, + name: "pci-6250", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 4095, + .gainlkup = ai_gain_628x, + ai_speed: 800, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + reg_type: ni_reg_625x, + ao_unipolar: 0, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70b8, + name: "pci-6251", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 4095, + .gainlkup = ai_gain_628x, + ai_speed: 800, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_625x_ao, + reg_type: ni_reg_625x, + ao_unipolar: 0, + ao_speed: 357, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x717d, + name: "pcie-6251", + n_adchan: 16, + adbits: 16, + ai_fifo_depth: 4095, + .gainlkup = ai_gain_628x, + ai_speed: 800, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_625x_ao, + reg_type: ni_reg_625x, + ao_unipolar: 0, + ao_speed: 357, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70b7, + name: "pci-6254", + n_adchan: 32, + adbits: 16, + ai_fifo_depth: 4095, + .gainlkup = ai_gain_628x, + ai_speed: 800, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + reg_type: ni_reg_625x, + ao_unipolar: 0, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70ab, + name: "pci-6259", + n_adchan: 32, + adbits: 16, + ai_fifo_depth: 4095, + .gainlkup = ai_gain_628x, + ai_speed: 800, + n_aochan: 4, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_625x_ao, + reg_type: ni_reg_625x, + ao_unipolar: 0, + ao_speed: 357, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x717f, + name: "pcie-6259", + n_adchan: 32, + adbits: 16, + ai_fifo_depth: 4095, + .gainlkup = ai_gain_628x, + ai_speed: 800, + n_aochan: 4, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_625x_ao, + reg_type: ni_reg_625x, + ao_unipolar: 0, + ao_speed: 357, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, +#if 0 /* TODO: fix data size */ + { device_id: 0x70b6, + name: "pci-6280", + n_adchan: 16, + adbits: 18, + ai_fifo_depth: 2047, + .gainlkup = ai_gain_628x, + ai_speed: 1600, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 8191, + reg_type: ni_reg_628x, + ao_unipolar: 0, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70bd, + name: "pci-6281", + n_adchan: 16, + adbits: 18, + ai_fifo_depth: 2047, + .gainlkup = ai_gain_628x, + ai_speed: 1600, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_628x_ao, + reg_type: ni_reg_628x, + ao_unipolar: 1, + ao_speed: 357, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70bf, + name: "pxi-6281", + n_adchan: 16, + adbits: 18, + ai_fifo_depth: 2047, + .gainlkup = ai_gain_628x, + ai_speed: 1600, + n_aochan: 2, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_628x_ao, + reg_type: ni_reg_628x, + ao_unipolar: 1, + ao_speed: 357, + .num_p0_dio_channels = 8, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70bc, + name: "pci-6284", + n_adchan: 32, + adbits: 18, + ai_fifo_depth: 2047, + .gainlkup = ai_gain_628x, + ai_speed: 1600, + n_aochan: 0, + aobits: 0, + ao_fifo_depth: 0, + reg_type: ni_reg_628x, + ao_unipolar: 0, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, + { device_id: 0x70ac, + name: "pci-6289", + n_adchan: 32, + adbits: 18, + ai_fifo_depth: 2047, + .gainlkup = ai_gain_628x, + ai_speed: 1600, + n_aochan: 4, + aobits: 16, + ao_fifo_depth: 8191, + .ao_range_table = &range_ni_M_628x_ao, + reg_type: ni_reg_628x, + ao_unipolar: 1, + ao_speed: 357, + .num_p0_dio_channels = 32, + .caldac = {caldac_none}, + has_8255: 0, + }, +#endif /* TODO: fix data size */ + { device_id: 0x70C0, + name: "pci-6143", + n_adchan: 8, + adbits: 16, + ai_fifo_depth: 1024, + alwaysdither: 0, + gainlkup: ai_gain_6143, + ai_speed: 4000, + n_aochan: 0, + aobits: 0, + reg_type: ni_reg_6143, + ao_unipolar: 0, + ao_fifo_depth: 0, + .num_p0_dio_channels = 8, + .caldac = {ad8804_debug,ad8804_debug}, + }, + { device_id: 0x710D, + name: "pxi-6143", + n_adchan: 8, + adbits: 16, + ai_fifo_depth: 1024, + alwaysdither: 0, + gainlkup: ai_gain_6143, + ai_speed: 4000, + n_aochan: 0, + aobits: 0, + reg_type: ni_reg_6143, + ao_unipolar: 0, + ao_fifo_depth: 0, + .num_p0_dio_channels = 8, + .caldac = {ad8804_debug,ad8804_debug}, + }, +}; +#define n_pcimio_boards ((sizeof(ni_boards)/sizeof(ni_boards[0]))) + +/* How we access STC registers */ + +/* We automatically take advantage of STC registers that can be + * read/written directly in the I/O space of the board. Most + * PCIMIO devices map the low 8 STC registers to iobase+addr*2. + * The 611x devices map the write registers to iobase+addr*2, and + * the read registers to iobase+(addr-1)*2. */ +/* However, the 611x boards still aren't working, so I'm disabling + * non-windowed STC access temporarily */ + +static void e_series_win_out(struct a4l_device *dev, uint16_t data, int reg) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&devpriv->window_lock, flags); + ni_writew(reg, Window_Address); + ni_writew(data, Window_Data); + rtdm_lock_put_irqrestore(&devpriv->window_lock, flags); +} + +static uint16_t e_series_win_in(struct a4l_device *dev, int reg) +{ + unsigned long flags; + uint16_t ret; + + rtdm_lock_get_irqsave(&devpriv->window_lock, flags); + ni_writew(reg, Window_Address); + ret = ni_readw(Window_Data); + rtdm_lock_put_irqrestore(&devpriv->window_lock,flags); + + return ret; +} + +static void m_series_stc_writew(struct a4l_device *dev, uint16_t data, int reg) +{ + unsigned offset; + switch(reg) + { + case ADC_FIFO_Clear: + offset = M_Offset_AI_FIFO_Clear; + break; + case AI_Command_1_Register: + offset = M_Offset_AI_Command_1; + break; + case AI_Command_2_Register: + offset = M_Offset_AI_Command_2; + break; + case AI_Mode_1_Register: + offset = M_Offset_AI_Mode_1; + break; + case AI_Mode_2_Register: + offset = M_Offset_AI_Mode_2; + break; + case AI_Mode_3_Register: + offset = M_Offset_AI_Mode_3; + break; + case AI_Output_Control_Register: + offset = M_Offset_AI_Output_Control; + break; + case AI_Personal_Register: + offset = M_Offset_AI_Personal; + break; + case AI_SI2_Load_A_Register: + /* This is actually a 32 bit register on m series boards */ + ni_writel(data, M_Offset_AI_SI2_Load_A); + return; + break; + case AI_SI2_Load_B_Register: + /* This is actually a 32 bit register on m series boards */ + ni_writel(data, M_Offset_AI_SI2_Load_B); + return; + break; + case AI_START_STOP_Select_Register: + offset = M_Offset_AI_START_STOP_Select; + break; + case AI_Trigger_Select_Register: + offset = M_Offset_AI_Trigger_Select; + break; + case Analog_Trigger_Etc_Register: + offset = M_Offset_Analog_Trigger_Etc; + break; + case AO_Command_1_Register: + offset = M_Offset_AO_Command_1; + break; + case AO_Command_2_Register: + offset = M_Offset_AO_Command_2; + break; + case AO_Mode_1_Register: + offset = M_Offset_AO_Mode_1; + break; + case AO_Mode_2_Register: + offset = M_Offset_AO_Mode_2; + break; + case AO_Mode_3_Register: + offset = M_Offset_AO_Mode_3; + break; + case AO_Output_Control_Register: + offset = M_Offset_AO_Output_Control; + break; + case AO_Personal_Register: + offset = M_Offset_AO_Personal; + break; + case AO_Start_Select_Register: + offset = M_Offset_AO_Start_Select; + break; + case AO_Trigger_Select_Register: + offset = M_Offset_AO_Trigger_Select; + break; + case Clock_and_FOUT_Register: + offset = M_Offset_Clock_and_FOUT; + break; + case Configuration_Memory_Clear: + offset = M_Offset_Configuration_Memory_Clear; + break; + case DAC_FIFO_Clear: + offset = M_Offset_AO_FIFO_Clear; + break; + case DIO_Control_Register: + rtdm_printk("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n", __FUNCTION__, reg); + return; + break; + case G_Autoincrement_Register(0): + offset = M_Offset_G0_Autoincrement; + break; + case G_Autoincrement_Register(1): + offset = M_Offset_G1_Autoincrement; + break; + case G_Command_Register(0): + offset = M_Offset_G0_Command; + break; + case G_Command_Register(1): + offset = M_Offset_G1_Command; + break; + case G_Input_Select_Register(0): + offset = M_Offset_G0_Input_Select; + break; + case G_Input_Select_Register(1): + offset = M_Offset_G1_Input_Select; + break; + case G_Mode_Register(0): + offset = M_Offset_G0_Mode; + break; + case G_Mode_Register(1): + offset = M_Offset_G1_Mode; + break; + case Interrupt_A_Ack_Register: + offset = M_Offset_Interrupt_A_Ack; + break; + case Interrupt_A_Enable_Register: + offset = M_Offset_Interrupt_A_Enable; + break; + case Interrupt_B_Ack_Register: + offset = M_Offset_Interrupt_B_Ack; + break; + case Interrupt_B_Enable_Register: + offset = M_Offset_Interrupt_B_Enable; + break; + case Interrupt_Control_Register: + offset = M_Offset_Interrupt_Control; + break; + case IO_Bidirection_Pin_Register: + offset = M_Offset_IO_Bidirection_Pin; + break; + case Joint_Reset_Register: + offset = M_Offset_Joint_Reset; + break; + case RTSI_Trig_A_Output_Register: + offset = M_Offset_RTSI_Trig_A_Output; + break; + case RTSI_Trig_B_Output_Register: + offset = M_Offset_RTSI_Trig_B_Output; + break; + case RTSI_Trig_Direction_Register: + offset = M_Offset_RTSI_Trig_Direction; + break; + /* FIXME: DIO_Output_Register (16 bit reg) is replaced + by M_Offset_Static_Digital_Output (32 bit) and + M_Offset_SCXI_Serial_Data_Out (8 bit) */ + default: + rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n", + __FUNCTION__, reg); + BUG(); + return; + } + ni_writew(data, offset); +} + +static uint16_t m_series_stc_readw(struct a4l_device *dev, int reg) +{ + unsigned offset; + switch(reg) + { + case AI_Status_1_Register: + offset = M_Offset_AI_Status_1; + break; + case AO_Status_1_Register: + offset = M_Offset_AO_Status_1; + break; + case AO_Status_2_Register: + offset = M_Offset_AO_Status_2; + break; + case DIO_Serial_Input_Register: + return ni_readb(M_Offset_SCXI_Serial_Data_In); + break; + case Joint_Status_1_Register: + offset = M_Offset_Joint_Status_1; + break; + case Joint_Status_2_Register: + offset = M_Offset_Joint_Status_2; + break; + case G_Status_Register: + offset = M_Offset_G01_Status; + break; + default: + rtdm_printk("%s: bug! " + "unhandled register=0x%x in switch.\n", + __FUNCTION__, reg); + BUG(); + return 0; + break; + } + return ni_readw(offset); +} + +static void m_series_stc_writel(struct a4l_device *dev, uint32_t data, int reg) +{ + unsigned offset; + + switch(reg) + { + case AI_SC_Load_A_Registers: + offset = M_Offset_AI_SC_Load_A; + break; + case AI_SI_Load_A_Registers: + offset = M_Offset_AI_SI_Load_A; + break; + case AO_BC_Load_A_Register: + offset = M_Offset_AO_BC_Load_A; + break; + case AO_UC_Load_A_Register: + offset = M_Offset_AO_UC_Load_A; + break; + case AO_UI_Load_A_Register: + offset = M_Offset_AO_UI_Load_A; + break; + case G_Load_A_Register(0): + offset = M_Offset_G0_Load_A; + break; + case G_Load_A_Register(1): + offset = M_Offset_G1_Load_A; + break; + case G_Load_B_Register(0): + offset = M_Offset_G0_Load_B; + break; + case G_Load_B_Register(1): + offset = M_Offset_G1_Load_B; + break; + default: + rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n", + __FUNCTION__, reg); + BUG(); + return; + } + ni_writel(data, offset); +} + +static uint32_t m_series_stc_readl(struct a4l_device *dev, int reg) +{ + unsigned offset; + switch(reg) + { + case G_HW_Save_Register(0): + offset = M_Offset_G0_HW_Save; + break; + case G_HW_Save_Register(1): + offset = M_Offset_G1_HW_Save; + break; + case G_Save_Register(0): + offset = M_Offset_G0_Save; + break; + case G_Save_Register(1): + offset = M_Offset_G1_Save; + break; + default: + rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n", + __FUNCTION__, reg); + BUG(); + return 0; + } + return ni_readl(offset); +} + +static void win_out2(struct a4l_device *dev, uint32_t data, int reg) +{ + devpriv->stc_writew(dev, data >> 16, reg); + devpriv->stc_writew(dev, data & 0xffff, reg + 1); +} + +static uint32_t win_in2(struct a4l_device *dev, int reg) +{ + uint32_t bits; + bits = devpriv->stc_readw(dev, reg) << 16; + bits |= devpriv->stc_readw(dev, reg + 1); + return bits; +} + +static void m_series_init_eeprom_buffer(struct a4l_device *dev) +{ + static const int Start_Cal_EEPROM = 0x400; + static const unsigned window_size = 10; + unsigned old_iodwbsr_bits; + unsigned old_iodwbsr1_bits; + unsigned old_iodwcr1_bits; + int i; + + old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR); + old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1); + old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1); + writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR); + writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr), + devpriv->mite->mite_io_addr + MITE_IODWBSR_1); + writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWCR_1); + writel(0xf, devpriv->mite->mite_io_addr + 0x30); + + for(i = 0; i < M_SERIES_EEPROM_SIZE; ++i) + { + devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i); + } + + writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1); + writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR); + writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1); + writel(0x0, devpriv->mite->mite_io_addr + 0x30); +} + +static void init_6143(struct a4l_device *dev) +{ + /* Disable interrupts */ + devpriv->stc_writew(dev, 0, Interrupt_Control_Register); + + /* Initialise 6143 AI specific bits */ + + /* Set G0,G1 DMA mode to E series version */ + ni_writeb(0x00, Magic_6143); + /* Set EOCMode, ADCMode and pipelinedelay */ + ni_writeb(0x80, PipelineDelay_6143); + /* Set EOC Delay */ + ni_writeb(0x00, EOC_Set_6143); + + /* Set the FIFO half full level */ + ni_writel(boardtype.ai_fifo_depth / 2, AIFIFO_Flag_6143); + + /* Strobe Relay disable bit */ + devpriv->ai_calib_source_enabled = 0; + ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOff, + Calibration_Channel_6143); + ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143); +} + +static int pcimio_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + int ret, bus, slot, i, irq; + struct mite_struct *mite = NULL; + struct ni_board_struct *board = NULL; + + if(arg->opts == NULL || arg->opts_size == 0) + bus = slot = 0; + else { + bus = arg->opts_size >= sizeof(unsigned long) ? + ((unsigned long *)arg->opts)[0] : 0; + slot = arg->opts_size >= sizeof(unsigned long) * 2 ? + ((unsigned long *)arg->opts)[1] : 0; + } + + for(i = 0; i < n_pcimio_boards && mite == NULL; i++) { + mite = a4l_mite_find_device(bus, slot, ni_boards[i].device_id); + board = &ni_boards[i]; + } + + if(mite == 0) + return -ENOENT; + + devpriv->irq_polarity = PCIMIO_IRQ_POLARITY; + devpriv->irq_pin = 0; + + devpriv->mite = mite; + devpriv->board_ptr = board; + + devpriv->ai_mite_ring = mite_alloc_ring(mite); + devpriv->ao_mite_ring = mite_alloc_ring(mite); + devpriv->cdo_mite_ring = mite_alloc_ring(mite); + devpriv->gpct_mite_ring[0] = mite_alloc_ring(mite); + devpriv->gpct_mite_ring[1] = mite_alloc_ring(mite); + + if(devpriv->ai_mite_ring == NULL || + devpriv->ao_mite_ring == NULL || + devpriv->cdo_mite_ring == NULL || + devpriv->gpct_mite_ring[0] == NULL || + devpriv->gpct_mite_ring[1] == NULL) + return -ENOMEM; + + a4l_info(dev, "found %s board\n", boardtype.name); + + if(boardtype.reg_type & ni_reg_m_series_mask) + { + devpriv->stc_writew = &m_series_stc_writew; + devpriv->stc_readw = &m_series_stc_readw; + devpriv->stc_writel = &m_series_stc_writel; + devpriv->stc_readl = &m_series_stc_readl; + }else + { + devpriv->stc_writew = &e_series_win_out; + devpriv->stc_readw = &e_series_win_in; + devpriv->stc_writel = &win_out2; + devpriv->stc_readl = &win_in2; + } + + ret = a4l_mite_setup(devpriv->mite, 0); + if(ret < 0) + { + a4l_err(dev, "pcmio_attach: error setting up mite\n"); + return ret; + } + + if(boardtype.reg_type & ni_reg_m_series_mask) + m_series_init_eeprom_buffer(dev); + if(boardtype.reg_type == ni_reg_6143) + init_6143(dev); + + irq = mite_irq(devpriv->mite); + + if(irq == 0){ + a4l_warn(dev, "pcimio_attach: unknown irq (bad)\n\n"); + }else{ + a4l_info(dev, "found irq %u\n", irq); + ret = a4l_request_irq(dev, + irq, + a4l_ni_E_interrupt, RTDM_IRQTYPE_SHARED, dev); + if(ret < 0) + a4l_err(dev, "pcimio_attach: irq not available\n"); + } + + ret = a4l_ni_E_init(dev); + if(ret < 0) + return ret; + + dev->driver->driver_name = devpriv->board_ptr->name; + + return ret; +} + +static int pcimio_detach(struct a4l_device *dev) +{ + if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED){ + a4l_free_irq(dev,a4l_get_irq(dev)); + } + + if(dev->priv != NULL && devpriv->mite != NULL) + { + mite_free_ring(devpriv->ai_mite_ring); + mite_free_ring(devpriv->ao_mite_ring); + mite_free_ring(devpriv->gpct_mite_ring[0]); + mite_free_ring(devpriv->gpct_mite_ring[1]); + a4l_mite_unsetup(devpriv->mite); + } + + dev->driver->driver_name = NULL; + + return 0; +} + +static struct a4l_driver pcimio_drv = { + .owner = THIS_MODULE, + .board_name = "analogy_ni_pcimio", + .driver_name = NULL, + .attach = pcimio_attach, + .detach = pcimio_detach, + .privdata_size = sizeof(ni_private), +}; + +static int __init pcimio_init(void) +{ + return a4l_register_drv(&pcimio_drv); +} + +static void __exit pcimio_cleanup(void) +{ + a4l_unregister_drv(&pcimio_drv); +} + +MODULE_DESCRIPTION("Analogy driver for NI PCI-MIO series cards"); +MODULE_LICENSE("GPL"); + +module_init(pcimio_init); +module_exit(pcimio_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c new file mode 100644 index 0000000..bcce728 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c @@ -0,0 +1,1999 @@ +/* + * Hardware driver for NI general purpose counter + * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Description: National Instruments general purpose counters + * This module is not used directly by end-users. Rather, it is used + * by other drivers (for example ni_660x and ni_pcimio) to provide + * support for NI's general purpose counters. It was originally based + * on the counter code from ni_660x.c and ni_mio_common.c. + * + * Author: + * J.P. Mellor <jpmellor@rose-hulman.edu> + * Herman.Bruyninckx@mech.kuleuven.ac.be + * Wim.Meeussen@mech.kuleuven.ac.be, + * Klaas.Gadeyne@mech.kuleuven.ac.be, + * Frank Mori Hess <fmhess@users.sourceforge.net> + * + * References: + * DAQ 660x Register-Level Programmer Manual (NI 370505A-01) + * DAQ 6601/6602 User Manual (NI 322137B-01) + * 340934b.pdf DAQ-STC reference manual + * + * TODO: + * - Support use of both banks X and Y + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <rtdm/analogy/device.h> + +#include "ni_tio.h" +#include "ni_mio.h" + +static inline void write_register(struct ni_gpct *counter, + unsigned int bits, enum ni_gpct_register reg) +{ + BUG_ON(reg >= NITIO_Num_Registers); + counter->counter_dev->write_register(counter, bits, reg); +} + +static inline unsigned int read_register(struct ni_gpct *counter, + enum ni_gpct_register reg) +{ + BUG_ON(reg >= NITIO_Num_Registers); + return counter->counter_dev->read_register(counter, reg); +} + +struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev, + void (*write_register) (struct ni_gpct * counter, unsigned int bits, + enum ni_gpct_register reg), + unsigned int (*read_register) (struct ni_gpct * counter, + enum ni_gpct_register reg), enum ni_gpct_variant variant, + unsigned int num_counters) +{ + struct ni_gpct_device *counter_dev = + kmalloc(sizeof(struct ni_gpct_device), GFP_KERNEL); + if (counter_dev == NULL) + return NULL; + + memset(counter_dev, 0, sizeof(struct ni_gpct_device)); + + counter_dev->dev = dev; + counter_dev->write_register = write_register; + counter_dev->read_register = read_register; + counter_dev->variant = variant; + rtdm_lock_init(&counter_dev->regs_lock); + BUG_ON(num_counters == 0); + + counter_dev->counters = + kmalloc(sizeof(struct ni_gpct *) * num_counters, GFP_KERNEL); + + if (counter_dev->counters == NULL) { + kfree(counter_dev); + return NULL; + } + + memset(counter_dev->counters, 0, sizeof(struct ni_gpct *) * num_counters); + + counter_dev->num_counters = num_counters; + return counter_dev; +} + +void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev) +{ + if (counter_dev->counters == NULL) + return; + kfree(counter_dev->counters); + kfree(counter_dev); +} + +static +int ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev) +{ + switch (counter_dev->variant) { + case ni_gpct_variant_e_series: + return 0; + break; + case ni_gpct_variant_m_series: + case ni_gpct_variant_660x: + return 1; + break; + default: + BUG(); + break; + } + return 0; +} + +static +int ni_tio_second_gate_registers_present(const struct ni_gpct_device *counter_dev) +{ + switch (counter_dev->variant) { + case ni_gpct_variant_e_series: + return 0; + break; + case ni_gpct_variant_m_series: + case ni_gpct_variant_660x: + return 1; + break; + default: + BUG(); + break; + } + return 0; +} + +static inline +void ni_tio_set_bits_transient(struct ni_gpct *counter, + enum ni_gpct_register register_index, + unsigned int bit_mask, + unsigned int bit_values, + unsigned transient_bit_values) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned long flags; + + BUG_ON(register_index >= NITIO_Num_Registers); + rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags); + counter_dev->regs[register_index] &= ~bit_mask; + counter_dev->regs[register_index] |= (bit_values & bit_mask); + write_register(counter, + counter_dev->regs[register_index] | transient_bit_values, + register_index); + mmiowb(); + rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags); +} + +/* ni_tio_set_bits( ) is for safely writing to registers whose bits + may be twiddled in interrupt context, or whose software copy may be + read in interrupt context. */ +static inline void ni_tio_set_bits(struct ni_gpct *counter, + enum ni_gpct_register register_index, + unsigned int bit_mask, + unsigned int bit_values) +{ + ni_tio_set_bits_transient(counter, + register_index, + bit_mask, bit_values, 0x0); +} + +/* ni_tio_get_soft_copy( ) is for safely reading the software copy of + a register whose bits might be modified in interrupt context, or whose + software copy might need to be read in interrupt context. */ +static inline +unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter, + enum ni_gpct_register register_index) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned long flags; + unsigned value; + + BUG_ON(register_index >= NITIO_Num_Registers); + rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags); + value = counter_dev->regs[register_index]; + rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags); + return value; +} + +static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter) +{ + write_register(counter, Gi_Reset_Bit(counter->counter_index), + NITIO_Gxx_Joint_Reset_Reg(counter->counter_index)); +} + +void a4l_ni_tio_init_counter(struct ni_gpct *counter) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + + ni_tio_reset_count_and_disarm(counter); + /* Initialize counter registers */ + counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] = + 0x0; + write_register(counter, + counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter-> + counter_index)], + NITIO_Gi_Autoincrement_Reg(counter->counter_index)); + ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), + ~0, Gi_Synchronize_Gate_Bit); + ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0, + 0); + counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0; + write_register(counter, + counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)], + NITIO_Gi_LoadA_Reg(counter->counter_index)); + counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0; + write_register(counter, + counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)], + NITIO_Gi_LoadB_Reg(counter->counter_index)); + ni_tio_set_bits(counter, + NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0, 0); + if (ni_tio_counting_mode_registers_present(counter_dev)) { + ni_tio_set_bits(counter, + NITIO_Gi_Counting_Mode_Reg(counter->counter_index), ~0, + 0); + } + if (ni_tio_second_gate_registers_present(counter_dev)) { + counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter-> + counter_index)] = 0x0; + write_register(counter, + counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter-> + counter_index)], + NITIO_Gi_Second_Gate_Reg(counter->counter_index)); + } + ni_tio_set_bits(counter, + NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0, 0x0); + ni_tio_set_bits(counter, + NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), ~0, 0x0); +} + +static lsampl_t ni_tio_counter_status(struct ni_gpct *counter) +{ + lsampl_t status = 0; + unsigned int bits; + + bits = read_register(counter,NITIO_Gxx_Status_Reg(counter->counter_index)); + if (bits & Gi_Armed_Bit(counter->counter_index)) { + status |= A4L_COUNTER_ARMED; + if (bits & Gi_Counting_Bit(counter->counter_index)) + status |= A4L_COUNTER_COUNTING; + } + return status; +} + +static +uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, + unsigned int generic_clock_source); +static +unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter); + +static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned counting_mode_reg = + NITIO_Gi_Counting_Mode_Reg(counter->counter_index); + static const uint64_t min_normal_sync_period_ps = 25000; + const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter, + ni_tio_generic_clock_src_select(counter)); + + if (ni_tio_counting_mode_registers_present(counter_dev) == 0) + return; + + switch (ni_tio_get_soft_copy(counter, + counting_mode_reg) & Gi_Counting_Mode_Mask) { + case Gi_Counting_Mode_QuadratureX1_Bits: + case Gi_Counting_Mode_QuadratureX2_Bits: + case Gi_Counting_Mode_QuadratureX4_Bits: + case Gi_Counting_Mode_Sync_Source_Bits: + force_alt_sync = 1; + break; + default: + break; + } + + /* It's not clear what we should do if clock_period is + unknown, so we are not using the alt sync bit in that case, + but allow the caller to decide by using the force_alt_sync + parameter. */ + if (force_alt_sync || + (clock_period_ps + && clock_period_ps < min_normal_sync_period_ps)) { + ni_tio_set_bits(counter, counting_mode_reg, + Gi_Alternate_Sync_Bit(counter_dev->variant), + Gi_Alternate_Sync_Bit(counter_dev->variant)); + } else { + ni_tio_set_bits(counter, counting_mode_reg, + Gi_Alternate_Sync_Bit(counter_dev->variant), 0x0); + } +} + +static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned mode_reg_mask; + unsigned mode_reg_values; + unsigned input_select_bits = 0; + + /* these bits map directly on to the mode register */ + static const unsigned mode_reg_direct_mask = + NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK | + NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK | + NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT | + NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT; + + mode_reg_mask = mode_reg_direct_mask | Gi_Reload_Source_Switching_Bit; + mode_reg_values = mode & mode_reg_direct_mask; + switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) { + case NI_GPCT_RELOAD_SOURCE_FIXED_BITS: + break; + case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS: + mode_reg_values |= Gi_Reload_Source_Switching_Bit; + break; + case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS: + input_select_bits |= Gi_Gate_Select_Load_Source_Bit; + mode_reg_mask |= Gi_Gating_Mode_Mask; + mode_reg_values |= Gi_Level_Gating_Bits; + break; + default: + break; + } + ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), + mode_reg_mask, mode_reg_values); + + if (ni_tio_counting_mode_registers_present(counter_dev)) { + unsigned counting_mode_bits = 0; + counting_mode_bits |= + (mode >> NI_GPCT_COUNTING_MODE_SHIFT) & + Gi_Counting_Mode_Mask; + counting_mode_bits |= + ((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT) << + Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask; + if (mode & NI_GPCT_INDEX_ENABLE_BIT) { + counting_mode_bits |= Gi_Index_Mode_Bit; + } + ni_tio_set_bits(counter, + NITIO_Gi_Counting_Mode_Reg(counter->counter_index), + Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask | + Gi_Index_Mode_Bit, counting_mode_bits); + ni_tio_set_sync_mode(counter, 0); + } + + ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), + Gi_Up_Down_Mask, + (mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) << Gi_Up_Down_Shift); + + if (mode & NI_GPCT_OR_GATE_BIT) { + input_select_bits |= Gi_Or_Gate_Bit; + } + if (mode & NI_GPCT_INVERT_OUTPUT_BIT) { + input_select_bits |= Gi_Output_Polarity_Bit; + } + ni_tio_set_bits(counter, + NITIO_Gi_Input_Select_Reg(counter->counter_index), + Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit | + Gi_Output_Polarity_Bit, input_select_bits); + + return 0; +} + +static int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned int start_trigger) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + + unsigned int command_transient_bits = 0; + + if (arm) { + switch (start_trigger) { + case NI_GPCT_ARM_IMMEDIATE: + command_transient_bits |= Gi_Arm_Bit; + break; + case NI_GPCT_ARM_PAIRED_IMMEDIATE: + command_transient_bits |= Gi_Arm_Bit | Gi_Arm_Copy_Bit; + break; + default: + break; + } + if (ni_tio_counting_mode_registers_present(counter_dev)) { + unsigned counting_mode_bits = 0; + + switch (start_trigger) { + case NI_GPCT_ARM_IMMEDIATE: + case NI_GPCT_ARM_PAIRED_IMMEDIATE: + break; + default: + if (start_trigger & NI_GPCT_ARM_UNKNOWN) { + /* Pass-through the least + significant bits so we can + figure out what select later + */ + unsigned hw_arm_select_bits = + (start_trigger << + Gi_HW_Arm_Select_Shift) & + Gi_HW_Arm_Select_Mask + (counter_dev->variant); + + counting_mode_bits |= + Gi_HW_Arm_Enable_Bit | + hw_arm_select_bits; + } else { + return -EINVAL; + } + break; + } + ni_tio_set_bits(counter, + NITIO_Gi_Counting_Mode_Reg(counter-> + counter_index), + Gi_HW_Arm_Select_Mask(counter_dev-> + variant) | Gi_HW_Arm_Enable_Bit, + counting_mode_bits); + } + } else { + command_transient_bits |= Gi_Disarm_Bit; + } + ni_tio_set_bits_transient(counter, + NITIO_Gi_Command_Reg(counter->counter_index), 0, 0, + command_transient_bits); + return 0; +} + +static unsigned int ni_660x_source_select_bits(lsampl_t clock_source) +{ + unsigned int ni_660x_clock; + unsigned int i; + const unsigned int clock_select_bits = + clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK; + + switch (clock_select_bits) { + case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Timebase_1_Clock; + break; + case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Timebase_2_Clock; + break; + case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Timebase_3_Clock; + break; + case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Logic_Low_Clock; + break; + case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Source_Pin_i_Clock; + break; + case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Next_Gate_Clock; + break; + case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS: + ni_660x_clock = NI_660x_Next_TC_Clock; + break; + default: + for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { + if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) { + ni_660x_clock = NI_660x_RTSI_Clock(i); + break; + } + } + if (i <= ni_660x_max_rtsi_channel) + break; + for (i = 0; i <= ni_660x_max_source_pin; ++i) { + if (clock_select_bits == + NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) { + ni_660x_clock = NI_660x_Source_Pin_Clock(i); + break; + } + } + if (i <= ni_660x_max_source_pin) + break; + ni_660x_clock = 0; + BUG(); + break; + } + return Gi_Source_Select_Bits(ni_660x_clock); +} + +static unsigned int ni_m_series_source_select_bits(lsampl_t clock_source) +{ + unsigned int ni_m_series_clock; + unsigned int i; + const unsigned int clock_select_bits = + clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK; + switch (clock_select_bits) { + case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Timebase_1_Clock; + break; + case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Timebase_2_Clock; + break; + case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Timebase_3_Clock; + break; + case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Logic_Low_Clock; + break; + case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Next_Gate_Clock; + break; + case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Next_TC_Clock; + break; + case NI_GPCT_PXI10_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_PXI10_Clock; + break; + case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_PXI_Star_Trigger_Clock; + break; + case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS: + ni_m_series_clock = NI_M_Series_Analog_Trigger_Out_Clock; + break; + default: + for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { + if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) { + ni_m_series_clock = NI_M_Series_RTSI_Clock(i); + break; + } + } + if (i <= ni_m_series_max_rtsi_channel) + break; + for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { + if (clock_select_bits == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) { + ni_m_series_clock = NI_M_Series_PFI_Clock(i); + break; + } + } + if (i <= ni_m_series_max_pfi_channel) + break; + __a4l_err("invalid clock source 0x%lx\n", + (unsigned long)clock_source); + BUG(); + ni_m_series_clock = 0; + break; + } + return Gi_Source_Select_Bits(ni_m_series_clock); +} + +static void ni_tio_set_source_subselect(struct ni_gpct *counter, + lsampl_t clock_source) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned second_gate_reg = + NITIO_Gi_Second_Gate_Reg(counter->counter_index); + + if (counter_dev->variant != ni_gpct_variant_m_series) + return; + switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) { + /* Gi_Source_Subselect is zero */ + case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: + case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: + counter_dev->regs[second_gate_reg] &= ~Gi_Source_Subselect_Bit; + break; + /* Gi_Source_Subselect is one */ + case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS: + case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS: + counter_dev->regs[second_gate_reg] |= Gi_Source_Subselect_Bit; + break; + /* Gi_Source_Subselect doesn't matter */ + default: + return; + break; + } + write_register(counter, counter_dev->regs[second_gate_reg], + second_gate_reg); +} + +static int ni_tio_set_clock_src(struct ni_gpct *counter, + lsampl_t clock_source, lsampl_t period_ns) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned input_select_bits = 0; + static const uint64_t pico_per_nano = 1000; + + /* FIXME: validate clock source */ + switch (counter_dev->variant) { + case ni_gpct_variant_660x: + input_select_bits |= ni_660x_source_select_bits(clock_source); + break; + case ni_gpct_variant_e_series: + case ni_gpct_variant_m_series: + input_select_bits |= + ni_m_series_source_select_bits(clock_source); + break; + default: + BUG(); + break; + } + if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT) + input_select_bits |= Gi_Source_Polarity_Bit; + ni_tio_set_bits(counter, + NITIO_Gi_Input_Select_Reg(counter->counter_index), + Gi_Source_Select_Mask | Gi_Source_Polarity_Bit, + input_select_bits); + ni_tio_set_source_subselect(counter, clock_source); + if (ni_tio_counting_mode_registers_present(counter_dev)) { + const unsigned prescaling_mode = + clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK; + unsigned counting_mode_bits = 0; + + switch (prescaling_mode) { + case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS: + break; + case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS: + counting_mode_bits |= + Gi_Prescale_X2_Bit(counter_dev->variant); + break; + case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS: + counting_mode_bits |= + Gi_Prescale_X8_Bit(counter_dev->variant); + break; + default: + return -EINVAL; + break; + } + ni_tio_set_bits(counter, + NITIO_Gi_Counting_Mode_Reg(counter->counter_index), + Gi_Prescale_X2_Bit(counter_dev-> + variant) | Gi_Prescale_X8_Bit(counter_dev-> + variant), counting_mode_bits); + } + counter->clock_period_ps = pico_per_nano * period_ns; + ni_tio_set_sync_mode(counter, 0); + return 0; +} + +static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter, + NITIO_Gi_Counting_Mode_Reg(counter->counter_index)); + unsigned int bits = 0; + + if (ni_tio_get_soft_copy(counter, + NITIO_Gi_Input_Select_Reg(counter-> + counter_index)) & Gi_Source_Polarity_Bit) + bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT; + if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant)) + bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS; + if (counting_mode_bits & Gi_Prescale_X8_Bit(counter_dev->variant)) + bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS; + return bits; +} + +static unsigned int ni_m_series_clock_src_select(const struct ni_gpct *counter) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int second_gate_reg = + NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned int i, clock_source = 0; + + const unsigned int input_select = (ni_tio_get_soft_copy(counter, + NITIO_Gi_Input_Select_Reg(counter-> + counter_index)) & Gi_Source_Select_Mask) >> + Gi_Source_Select_Shift; + + switch (input_select) { + case NI_M_Series_Timebase_1_Clock: + clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS; + break; + case NI_M_Series_Timebase_2_Clock: + clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS; + break; + case NI_M_Series_Timebase_3_Clock: + if (counter_dev-> + regs[second_gate_reg] & Gi_Source_Subselect_Bit) + clock_source = + NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS; + else + clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS; + break; + case NI_M_Series_Logic_Low_Clock: + clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS; + break; + case NI_M_Series_Next_Gate_Clock: + if (counter_dev-> + regs[second_gate_reg] & Gi_Source_Subselect_Bit) + clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS; + else + clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS; + break; + case NI_M_Series_PXI10_Clock: + clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS; + break; + case NI_M_Series_Next_TC_Clock: + clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS; + break; + default: + for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { + if (input_select == NI_M_Series_RTSI_Clock(i)) { + clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i); + break; + } + } + if (i <= ni_m_series_max_rtsi_channel) + break; + for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { + if (input_select == NI_M_Series_PFI_Clock(i)) { + clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i); + break; + } + } + if (i <= ni_m_series_max_pfi_channel) + break; + BUG(); + break; + } + clock_source |= ni_tio_clock_src_modifiers(counter); + return clock_source; +} + +static unsigned int ni_660x_clock_src_select(const struct ni_gpct *counter) +{ + unsigned int i, clock_source = 0; + const unsigned input_select = (ni_tio_get_soft_copy(counter, + NITIO_Gi_Input_Select_Reg(counter-> + counter_index)) & Gi_Source_Select_Mask) >> + Gi_Source_Select_Shift; + + switch (input_select) { + case NI_660x_Timebase_1_Clock: + clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS; + break; + case NI_660x_Timebase_2_Clock: + clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS; + break; + case NI_660x_Timebase_3_Clock: + clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS; + break; + case NI_660x_Logic_Low_Clock: + clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS; + break; + case NI_660x_Source_Pin_i_Clock: + clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS; + break; + case NI_660x_Next_Gate_Clock: + clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS; + break; + case NI_660x_Next_TC_Clock: + clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS; + break; + default: + for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { + if (input_select == NI_660x_RTSI_Clock(i)) { + clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i); + break; + } + } + if (i <= ni_660x_max_rtsi_channel) + break; + for (i = 0; i <= ni_660x_max_source_pin; ++i) { + if (input_select == NI_660x_Source_Pin_Clock(i)) { + clock_source = + NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i); + break; + } + } + if (i <= ni_660x_max_source_pin) + break; + BUG(); + break; + } + clock_source |= ni_tio_clock_src_modifiers(counter); + return clock_source; +} + +static unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter) +{ + switch (counter->counter_dev->variant) { + case ni_gpct_variant_e_series: + case ni_gpct_variant_m_series: + return ni_m_series_clock_src_select(counter); + break; + case ni_gpct_variant_660x: + return ni_660x_clock_src_select(counter); + break; + default: + BUG(); + break; + } + return 0; +} + +static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter, + unsigned int generic_clock_source) +{ + uint64_t clock_period_ps; + + switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) { + case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS: + clock_period_ps = 50000; + break; + case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS: + clock_period_ps = 10000000; + break; + case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: + clock_period_ps = 12500; + break; + case NI_GPCT_PXI10_CLOCK_SRC_BITS: + clock_period_ps = 100000; + break; + default: + /* Clock period is specified by user with prescaling + already taken into account. */ + return counter->clock_period_ps; + break; + } + + switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) { + case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS: + break; + case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS: + clock_period_ps *= 2; + break; + case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS: + clock_period_ps *= 8; + break; + default: + BUG(); + break; + } + return clock_period_ps; +} + +static void ni_tio_get_clock_src(struct ni_gpct *counter, + unsigned int * clock_source, + unsigned int * period_ns) +{ + static const unsigned int pico_per_nano = 1000; + uint64_t temp64; + + *clock_source = ni_tio_generic_clock_src_select(counter); + temp64 = ni_tio_clock_period_ps(counter, *clock_source); + do_div(temp64, pico_per_nano); + *period_ns = temp64; +} + +static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter, + lsampl_t gate_source) +{ + const unsigned int mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask; + unsigned int mode_values = 0; + + if (gate_source & CR_INVERT) { + mode_values |= Gi_Gate_Polarity_Bit; + } + if (gate_source & CR_EDGE) { + mode_values |= Gi_Rising_Edge_Gating_Bits; + } else { + mode_values |= Gi_Level_Gating_Bits; + } + ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), + mode_mask, mode_values); +} + +static int ni_660x_set_first_gate(struct ni_gpct *counter, lsampl_t gate_source) +{ + const unsigned int selected_gate = CR_CHAN(gate_source); + /* Bits of selected_gate that may be meaningful to + input select register */ + const unsigned int selected_gate_mask = 0x1f; + unsigned ni_660x_gate_select; + unsigned i; + + switch (selected_gate) { + case NI_GPCT_NEXT_SOURCE_GATE_SELECT: + ni_660x_gate_select = NI_660x_Next_SRC_Gate_Select; + break; + case NI_GPCT_NEXT_OUT_GATE_SELECT: + case NI_GPCT_LOGIC_LOW_GATE_SELECT: + case NI_GPCT_SOURCE_PIN_i_GATE_SELECT: + case NI_GPCT_GATE_PIN_i_GATE_SELECT: + ni_660x_gate_select = selected_gate & selected_gate_mask; + break; + default: + for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { + if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) { + ni_660x_gate_select = + selected_gate & selected_gate_mask; + break; + } + } + if (i <= ni_660x_max_rtsi_channel) + break; + for (i = 0; i <= ni_660x_max_gate_pin; ++i) { + if (selected_gate == NI_GPCT_GATE_PIN_GATE_SELECT(i)) { + ni_660x_gate_select = + selected_gate & selected_gate_mask; + break; + } + } + if (i <= ni_660x_max_gate_pin) + break; + return -EINVAL; + break; + } + ni_tio_set_bits(counter, + NITIO_Gi_Input_Select_Reg(counter->counter_index), + Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_660x_gate_select)); + return 0; +} + +static int ni_m_series_set_first_gate(struct ni_gpct *counter, + lsampl_t gate_source) +{ + const unsigned int selected_gate = CR_CHAN(gate_source); + /* bits of selected_gate that may be meaningful to input select register */ + const unsigned int selected_gate_mask = 0x1f; + unsigned int i, ni_m_series_gate_select; + + switch (selected_gate) { + case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT: + case NI_GPCT_AI_START2_GATE_SELECT: + case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT: + case NI_GPCT_NEXT_OUT_GATE_SELECT: + case NI_GPCT_AI_START1_GATE_SELECT: + case NI_GPCT_NEXT_SOURCE_GATE_SELECT: + case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT: + case NI_GPCT_LOGIC_LOW_GATE_SELECT: + ni_m_series_gate_select = selected_gate & selected_gate_mask; + break; + default: + for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { + if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) { + ni_m_series_gate_select = + selected_gate & selected_gate_mask; + break; + } + } + if (i <= ni_m_series_max_rtsi_channel) + break; + for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { + if (selected_gate == NI_GPCT_PFI_GATE_SELECT(i)) { + ni_m_series_gate_select = + selected_gate & selected_gate_mask; + break; + } + } + if (i <= ni_m_series_max_pfi_channel) + break; + return -EINVAL; + break; + } + ni_tio_set_bits(counter, + NITIO_Gi_Input_Select_Reg(counter->counter_index), + Gi_Gate_Select_Mask, + Gi_Gate_Select_Bits(ni_m_series_gate_select)); + return 0; +} + +static int ni_660x_set_second_gate(struct ni_gpct *counter, + lsampl_t gate_source) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int second_gate_reg = + NITIO_Gi_Second_Gate_Reg(counter->counter_index); + const unsigned int selected_second_gate = CR_CHAN(gate_source); + /* bits of second_gate that may be meaningful to second gate register */ + static const unsigned int selected_second_gate_mask = 0x1f; + unsigned int i, ni_660x_second_gate_select; + + switch (selected_second_gate) { + case NI_GPCT_SOURCE_PIN_i_GATE_SELECT: + case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT: + case NI_GPCT_SELECTED_GATE_GATE_SELECT: + case NI_GPCT_NEXT_OUT_GATE_SELECT: + case NI_GPCT_LOGIC_LOW_GATE_SELECT: + ni_660x_second_gate_select = + selected_second_gate & selected_second_gate_mask; + break; + case NI_GPCT_NEXT_SOURCE_GATE_SELECT: + ni_660x_second_gate_select = + NI_660x_Next_SRC_Second_Gate_Select; + break; + default: + for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { + if (selected_second_gate == NI_GPCT_RTSI_GATE_SELECT(i)) { + ni_660x_second_gate_select = + selected_second_gate & + selected_second_gate_mask; + break; + } + } + if (i <= ni_660x_max_rtsi_channel) + break; + for (i = 0; i <= ni_660x_max_up_down_pin; ++i) { + if (selected_second_gate == + NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) { + ni_660x_second_gate_select = + selected_second_gate & + selected_second_gate_mask; + break; + } + } + if (i <= ni_660x_max_up_down_pin) + break; + return -EINVAL; + break; + }; + counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit; + counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask; + counter_dev->regs[second_gate_reg] |= + Gi_Second_Gate_Select_Bits(ni_660x_second_gate_select); + write_register(counter, counter_dev->regs[second_gate_reg], + second_gate_reg); + return 0; +} + +static int ni_m_series_set_second_gate(struct ni_gpct *counter, + lsampl_t gate_source) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int second_gate_reg = + NITIO_Gi_Second_Gate_Reg(counter->counter_index); + const unsigned int selected_second_gate = CR_CHAN(gate_source); + /* Bits of second_gate that may be meaningful to second gate register */ + static const unsigned int selected_second_gate_mask = 0x1f; + unsigned int ni_m_series_second_gate_select; + + /* FIXME: We don't know what the m-series second gate codes + are, so we'll just pass the bits through for now. */ + switch (selected_second_gate) { + default: + ni_m_series_second_gate_select = + selected_second_gate & selected_second_gate_mask; + break; + }; + counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit; + counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask; + counter_dev->regs[second_gate_reg] |= + Gi_Second_Gate_Select_Bits(ni_m_series_second_gate_select); + write_register(counter, counter_dev->regs[second_gate_reg], + second_gate_reg); + return 0; +} + +static int ni_tio_set_gate_src(struct ni_gpct *counter, + unsigned int gate_index, lsampl_t gate_source) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int second_gate_reg = + NITIO_Gi_Second_Gate_Reg(counter->counter_index); + + switch (gate_index) { + case 0: + if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) { + ni_tio_set_bits(counter, + NITIO_Gi_Mode_Reg(counter->counter_index), + Gi_Gating_Mode_Mask, Gi_Gating_Disabled_Bits); + return 0; + } + ni_tio_set_first_gate_modifiers(counter, gate_source); + switch (counter_dev->variant) { + case ni_gpct_variant_e_series: + case ni_gpct_variant_m_series: + return ni_m_series_set_first_gate(counter, gate_source); + break; + case ni_gpct_variant_660x: + return ni_660x_set_first_gate(counter, gate_source); + break; + default: + BUG(); + break; + } + break; + case 1: + if (ni_tio_second_gate_registers_present(counter_dev) == 0) + return -EINVAL; + if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) { + counter_dev->regs[second_gate_reg] &= + ~Gi_Second_Gate_Mode_Bit; + write_register(counter, + counter_dev->regs[second_gate_reg], + second_gate_reg); + return 0; + } + if (gate_source & CR_INVERT) { + counter_dev->regs[second_gate_reg] |= + Gi_Second_Gate_Polarity_Bit; + } else { + counter_dev->regs[second_gate_reg] &= + ~Gi_Second_Gate_Polarity_Bit; + } + switch (counter_dev->variant) { + case ni_gpct_variant_m_series: + return ni_m_series_set_second_gate(counter, + gate_source); + break; + case ni_gpct_variant_660x: + return ni_660x_set_second_gate(counter, gate_source); + break; + default: + BUG(); + break; + } + break; + default: + return -EINVAL; + break; + } + return 0; +} + +static int ni_tio_set_other_src(struct ni_gpct *counter, + unsigned int index, unsigned int source) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + + if (counter_dev->variant == ni_gpct_variant_m_series) { + unsigned int abz_reg, shift, mask; + + abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index); + switch (index) { + case NI_GPCT_SOURCE_ENCODER_A: + shift = 10; + break; + case NI_GPCT_SOURCE_ENCODER_B: + shift = 5; + break; + case NI_GPCT_SOURCE_ENCODER_Z: + shift = 0; + break; + default: + return -EINVAL; + break; + } + mask = 0x1f << shift; + if (source > 0x1f) { + /* Disable gate */ + source = 0x1f; + } + counter_dev->regs[abz_reg] &= ~mask; + counter_dev->regs[abz_reg] |= (source << shift) & mask; + write_register(counter, counter_dev->regs[abz_reg], abz_reg); + return 0; + } + return -EINVAL; +} + +static unsigned int ni_660x_first_gate_to_generic_gate_source(unsigned int ni_660x_gate_select) +{ + unsigned int i; + + switch (ni_660x_gate_select) { + case NI_660x_Source_Pin_i_Gate_Select: + return NI_GPCT_SOURCE_PIN_i_GATE_SELECT; + break; + case NI_660x_Gate_Pin_i_Gate_Select: + return NI_GPCT_GATE_PIN_i_GATE_SELECT; + break; + case NI_660x_Next_SRC_Gate_Select: + return NI_GPCT_NEXT_SOURCE_GATE_SELECT; + break; + case NI_660x_Next_Out_Gate_Select: + return NI_GPCT_NEXT_OUT_GATE_SELECT; + break; + case NI_660x_Logic_Low_Gate_Select: + return NI_GPCT_LOGIC_LOW_GATE_SELECT; + break; + default: + for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { + if (ni_660x_gate_select == NI_660x_RTSI_Gate_Select(i)) { + return NI_GPCT_RTSI_GATE_SELECT(i); + break; + } + } + if (i <= ni_660x_max_rtsi_channel) + break; + for (i = 0; i <= ni_660x_max_gate_pin; ++i) { + if (ni_660x_gate_select == + NI_660x_Gate_Pin_Gate_Select(i)) { + return NI_GPCT_GATE_PIN_GATE_SELECT(i); + break; + } + } + if (i <= ni_660x_max_gate_pin) + break; + BUG(); + break; + } + return 0; +} + +static unsigned int ni_m_series_first_gate_to_generic_gate_source(unsigned int + ni_m_series_gate_select) +{ + unsigned int i; + + switch (ni_m_series_gate_select) { + case NI_M_Series_Timestamp_Mux_Gate_Select: + return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT; + break; + case NI_M_Series_AI_START2_Gate_Select: + return NI_GPCT_AI_START2_GATE_SELECT; + break; + case NI_M_Series_PXI_Star_Trigger_Gate_Select: + return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT; + break; + case NI_M_Series_Next_Out_Gate_Select: + return NI_GPCT_NEXT_OUT_GATE_SELECT; + break; + case NI_M_Series_AI_START1_Gate_Select: + return NI_GPCT_AI_START1_GATE_SELECT; + break; + case NI_M_Series_Next_SRC_Gate_Select: + return NI_GPCT_NEXT_SOURCE_GATE_SELECT; + break; + case NI_M_Series_Analog_Trigger_Out_Gate_Select: + return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT; + break; + case NI_M_Series_Logic_Low_Gate_Select: + return NI_GPCT_LOGIC_LOW_GATE_SELECT; + break; + default: + for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) { + if (ni_m_series_gate_select == + NI_M_Series_RTSI_Gate_Select(i)) { + return NI_GPCT_RTSI_GATE_SELECT(i); + break; + } + } + if (i <= ni_m_series_max_rtsi_channel) + break; + for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) { + if (ni_m_series_gate_select == + NI_M_Series_PFI_Gate_Select(i)) { + return NI_GPCT_PFI_GATE_SELECT(i); + break; + } + } + if (i <= ni_m_series_max_pfi_channel) + break; + BUG(); + break; + } + return 0; +} + +static unsigned int ni_660x_second_gate_to_generic_gate_source(unsigned int + ni_660x_gate_select) +{ + unsigned int i; + + switch (ni_660x_gate_select) { + case NI_660x_Source_Pin_i_Second_Gate_Select: + return NI_GPCT_SOURCE_PIN_i_GATE_SELECT; + break; + case NI_660x_Up_Down_Pin_i_Second_Gate_Select: + return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT; + break; + case NI_660x_Next_SRC_Second_Gate_Select: + return NI_GPCT_NEXT_SOURCE_GATE_SELECT; + break; + case NI_660x_Next_Out_Second_Gate_Select: + return NI_GPCT_NEXT_OUT_GATE_SELECT; + break; + case NI_660x_Selected_Gate_Second_Gate_Select: + return NI_GPCT_SELECTED_GATE_GATE_SELECT; + break; + case NI_660x_Logic_Low_Second_Gate_Select: + return NI_GPCT_LOGIC_LOW_GATE_SELECT; + break; + default: + for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) { + if (ni_660x_gate_select == + NI_660x_RTSI_Second_Gate_Select(i)) { + return NI_GPCT_RTSI_GATE_SELECT(i); + break; + } + } + if (i <= ni_660x_max_rtsi_channel) + break; + for (i = 0; i <= ni_660x_max_up_down_pin; ++i) { + if (ni_660x_gate_select == + NI_660x_Up_Down_Pin_Second_Gate_Select(i)) { + return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i); + break; + } + } + if (i <= ni_660x_max_up_down_pin) + break; + BUG(); + break; + } + return 0; +} + +static unsigned int ni_m_series_second_gate_to_generic_gate_source(unsigned int + ni_m_series_gate_select) +{ + /* FIXME: the second gate sources for the m series are + undocumented, so we just return the raw bits for now. */ + switch (ni_m_series_gate_select) { + default: + return ni_m_series_gate_select; + break; + } + return 0; +}; + +static int ni_tio_get_gate_src(struct ni_gpct *counter, + unsigned int gate_index, + unsigned int * gate_source) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int mode_bits = ni_tio_get_soft_copy(counter, + NITIO_Gi_Mode_Reg(counter->counter_index)); + const unsigned int second_gate_reg = + NITIO_Gi_Second_Gate_Reg(counter->counter_index); + unsigned int gate_select_bits; + + switch (gate_index) { + case 0: + if ((mode_bits & Gi_Gating_Mode_Mask) == + Gi_Gating_Disabled_Bits) { + *gate_source = NI_GPCT_DISABLED_GATE_SELECT; + return 0; + } else { + gate_select_bits = + (ni_tio_get_soft_copy(counter, + NITIO_Gi_Input_Select_Reg(counter-> + counter_index)) & + Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift; + } + switch (counter_dev->variant) { + case ni_gpct_variant_e_series: + case ni_gpct_variant_m_series: + *gate_source = + ni_m_series_first_gate_to_generic_gate_source + (gate_select_bits); + break; + case ni_gpct_variant_660x: + *gate_source = + ni_660x_first_gate_to_generic_gate_source + (gate_select_bits); + break; + default: + BUG(); + break; + } + if (mode_bits & Gi_Gate_Polarity_Bit) { + *gate_source |= CR_INVERT; + } + if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) { + *gate_source |= CR_EDGE; + } + break; + case 1: + if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits + || (counter_dev-> + regs[second_gate_reg] & Gi_Second_Gate_Mode_Bit) + == 0) { + *gate_source = NI_GPCT_DISABLED_GATE_SELECT; + return 0; + } else { + gate_select_bits = + (counter_dev-> + regs[second_gate_reg] & + Gi_Second_Gate_Select_Mask) >> + Gi_Second_Gate_Select_Shift; + } + switch (counter_dev->variant) { + case ni_gpct_variant_e_series: + case ni_gpct_variant_m_series: + *gate_source = + ni_m_series_second_gate_to_generic_gate_source + (gate_select_bits); + break; + case ni_gpct_variant_660x: + *gate_source = + ni_660x_second_gate_to_generic_gate_source + (gate_select_bits); + break; + default: + BUG(); + break; + } + if (counter_dev-> + regs[second_gate_reg] & Gi_Second_Gate_Polarity_Bit) { + *gate_source |= CR_INVERT; + } + /* Second gate can't have edge/level mode set independently */ + if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) { + *gate_source |= CR_EDGE; + } + break; + default: + return -EINVAL; + break; + } + return 0; +} + +int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn) +{ + unsigned int *data = (unsigned int *)insn->data; + + switch (data[0]) { + case A4L_INSN_CONFIG_SET_COUNTER_MODE: + return ni_tio_set_counter_mode(counter, data[1]); + break; + case A4L_INSN_CONFIG_ARM: + return ni_tio_arm(counter, 1, data[1]); + break; + case A4L_INSN_CONFIG_DISARM: + ni_tio_arm(counter, 0, 0); + return 0; + break; + case A4L_INSN_CONFIG_GET_COUNTER_STATUS: + data[1] = ni_tio_counter_status(counter); + data[2] = counter_status_mask; + return 0; + break; + case A4L_INSN_CONFIG_SET_CLOCK_SRC: + return ni_tio_set_clock_src(counter, data[1], data[2]); + break; + case A4L_INSN_CONFIG_GET_CLOCK_SRC: + ni_tio_get_clock_src(counter, &data[1], &data[2]); + return 0; + break; + case A4L_INSN_CONFIG_SET_GATE_SRC: + return ni_tio_set_gate_src(counter, data[1], data[2]); + break; + case A4L_INSN_CONFIG_GET_GATE_SRC: + return ni_tio_get_gate_src(counter, data[1], &data[2]); + break; + case A4L_INSN_CONFIG_SET_OTHER_SRC: + return ni_tio_set_other_src(counter, data[1], data[2]); + break; + case A4L_INSN_CONFIG_RESET: + ni_tio_reset_count_and_disarm(counter); + return 0; + break; + default: + break; + } + return -EINVAL; +} + +int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int channel = CR_CHAN(insn->chan_desc); + unsigned int first_read; + unsigned int second_read; + unsigned int correct_read; + + uint32_t *data = (uint32_t *)insn->data; + + if (insn->data_size != sizeof(uint32_t)) + return -EINVAL; + + switch (channel) { + case 0: + ni_tio_set_bits(counter, + NITIO_Gi_Command_Reg(counter->counter_index), + Gi_Save_Trace_Bit, 0); + ni_tio_set_bits(counter, + NITIO_Gi_Command_Reg(counter->counter_index), + Gi_Save_Trace_Bit, Gi_Save_Trace_Bit); + /* The count doesn't get latched until the next clock + edge, so it is possible the count may change (once) + while we are reading. Since the read of the + SW_Save_Reg isn't atomic (apparently even when it's a + 32 bit register according to 660x docs), we need to + read twice and make sure the reading hasn't changed. + If it has, a third read will be correct since the + count value will definitely have latched by then. */ + first_read = + read_register(counter, + NITIO_Gi_SW_Save_Reg(counter->counter_index)); + second_read = + read_register(counter, + NITIO_Gi_SW_Save_Reg(counter->counter_index)); + if (first_read != second_read) + correct_read = + read_register(counter, + NITIO_Gi_SW_Save_Reg(counter->counter_index)); + else + correct_read = first_read; + data[0] = correct_read; + return 0; + break; + case 1: + data[0] = counter_dev->regs + [NITIO_Gi_LoadA_Reg(counter->counter_index)]; + break; + case 2: + data[0] = counter_dev->regs + [NITIO_Gi_LoadB_Reg(counter->counter_index)]; + break; + }; + + return 0; +} + +static unsigned int ni_tio_next_load_register(struct ni_gpct *counter) +{ + const unsigned int bits = read_register(counter, + NITIO_Gxx_Status_Reg(counter->counter_index)); + + if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) { + return NITIO_Gi_LoadB_Reg(counter->counter_index); + } else { + return NITIO_Gi_LoadA_Reg(counter->counter_index); + } +} + +int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + const unsigned int channel = CR_CHAN(insn->chan_desc); + unsigned int load_reg; + + uint32_t *data = (uint32_t *)insn->data; + + if (insn->data_size != sizeof(uint32_t)) + return -EINVAL; + + switch (channel) { + case 0: + /* Unsafe if counter is armed. Should probably check + status and return -EBUSY if armed. */ + /* Don't disturb load source select, just use + whichever load register is already selected. */ + load_reg = ni_tio_next_load_register(counter); + write_register(counter, data[0], load_reg); + ni_tio_set_bits_transient(counter, + NITIO_Gi_Command_Reg(counter->counter_index), 0, 0, + Gi_Load_Bit); + /* Restore state of load reg to whatever the user set + last set it to */ + write_register(counter, counter_dev->regs[load_reg], load_reg); + break; + case 1: + counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = + data[0]; + write_register(counter, data[0], + NITIO_Gi_LoadA_Reg(counter->counter_index)); + break; + case 2: + counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = + data[0]; + write_register(counter, data[0], + NITIO_Gi_LoadB_Reg(counter->counter_index)); + break; + default: + return -EINVAL; + break; + } + + return 0; +} + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +static void ni_tio_configure_dma(struct ni_gpct *counter, + short enable, short read_not_write) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned int input_select_bits = 0; + + if (enable) { + if (read_not_write) { + input_select_bits |= Gi_Read_Acknowledges_Irq; + } else { + input_select_bits |= Gi_Write_Acknowledges_Irq; + } + } + ni_tio_set_bits(counter, + NITIO_Gi_Input_Select_Reg(counter->counter_index), + Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq, + input_select_bits); + switch (counter_dev->variant) { + case ni_gpct_variant_e_series: + break; + case ni_gpct_variant_m_series: + case ni_gpct_variant_660x: + { + unsigned gi_dma_config_bits = 0; + + if (enable) { + gi_dma_config_bits |= Gi_DMA_Enable_Bit; + gi_dma_config_bits |= Gi_DMA_Int_Bit; + } + if (read_not_write == 0) { + gi_dma_config_bits |= Gi_DMA_Write_Bit; + } + ni_tio_set_bits(counter, + NITIO_Gi_DMA_Config_Reg(counter->counter_index), + Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit | + Gi_DMA_Write_Bit, gi_dma_config_bits); + } + break; + } +} + +/* TODO: a4l_ni_tio_input_inttrig is left unused because the trigger + callback cannot be changed at run time */ +int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum) +{ + unsigned long flags; + int retval = 0; + + BUG_ON(counter == NULL); + if (trignum != 0) + return -EINVAL; + + rtdm_lock_get_irqsave(&counter->lock, flags); + if (counter->mite_chan) + a4l_mite_dma_arm(counter->mite_chan); + else + retval = -EIO; + rtdm_lock_put_irqrestore(&counter->lock, flags); + if (retval < 0) + return retval; + retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE); + + /* TODO: disable trigger until a command is recorded. + Null trig at beginning prevent ao start trigger from executing + more than once per command (and doing things like trying to + allocate the ao dma channel multiple times) */ + + return retval; +} + +static int ni_tio_input_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd) +{ + struct ni_gpct_device *counter_dev = counter->counter_dev; + int retval = 0; + + counter->mite_chan->dir = A4L_INPUT; + switch (counter_dev->variant) { + case ni_gpct_variant_m_series: + case ni_gpct_variant_660x: + a4l_mite_prep_dma(counter->mite_chan, 32, 32); + break; + case ni_gpct_variant_e_series: + a4l_mite_prep_dma(counter->mite_chan, 16, 32); + break; + default: + BUG(); + break; + } + ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index), + Gi_Save_Trace_Bit, 0); + ni_tio_configure_dma(counter, 1, 1); + switch (cmd->start_src) { + case TRIG_NOW: + a4l_mite_dma_arm(counter->mite_chan); + retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE); + break; + case TRIG_INT: + break; + case TRIG_EXT: + a4l_mite_dma_arm(counter->mite_chan); + retval = ni_tio_arm(counter, 1, cmd->start_arg); + break; + case TRIG_OTHER: + a4l_mite_dma_arm(counter->mite_chan); + break; + default: + BUG(); + break; + } + return retval; +} + +static int ni_tio_output_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd) +{ + __a4l_err("ni_tio: output commands not yet implemented.\n"); + return -ENOTSUPP; +} + +static int ni_tio_cmd_setup(struct ni_gpct *counter, struct a4l_cmd_desc *cmd) +{ + int retval = 0, set_gate_source = 0; + unsigned int gate_source; + + if (cmd->scan_begin_src == TRIG_EXT) { + set_gate_source = 1; + gate_source = cmd->scan_begin_arg; + } else if (cmd->convert_src == TRIG_EXT) { + set_gate_source = 1; + gate_source = cmd->convert_arg; + } + if (set_gate_source) { + retval = ni_tio_set_gate_src(counter, 0, gate_source); + } + if (cmd->flags & TRIG_WAKE_EOS) { + ni_tio_set_bits(counter, + NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), + Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), + Gi_Gate_Interrupt_Enable_Bit(counter->counter_index)); + } + return retval; +} + +int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd) +{ + int retval = 0; + unsigned long flags; + + rtdm_lock_get_irqsave(&counter->lock, flags); + if (counter->mite_chan == NULL) { + __a4l_err("a4l_ni_tio_cmd: commands only supported with DMA." + " Interrupt-driven commands not yet implemented.\n"); + retval = -EIO; + } else { + retval = ni_tio_cmd_setup(counter, cmd); + if (retval == 0) { + if (cmd->flags & A4L_CMD_WRITE) { + retval = ni_tio_output_cmd(counter, cmd); + } else { + retval = ni_tio_input_cmd(counter, cmd); + } + } + } + rtdm_lock_put_irqrestore(&counter->lock, flags); + return retval; +} + +struct a4l_cmd_desc a4l_ni_tio_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_NOW | TRIG_INT | TRIG_OTHER | TRIG_EXT, + .scan_begin_src = TRIG_FOLLOW | TRIG_EXT | TRIG_OTHER, + .convert_src = TRIG_NOW | TRIG_EXT | TRIG_OTHER, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_NONE, +}; + +int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd) +{ + /* Make sure trigger sources are trivially valid */ + + if ((cmd->start_src & TRIG_EXT) != 0 && + ni_tio_counting_mode_registers_present(counter->counter_dev) == 0) + return -EINVAL; + + /* Make sure trigger sources are mutually compatible */ + + if (cmd->convert_src != TRIG_NOW && cmd->scan_begin_src != TRIG_FOLLOW) + return -EINVAL; + + /* Make sure arguments are trivially compatible */ + + if (cmd->start_src != TRIG_EXT) { + if (cmd->start_arg != 0) { + return -EINVAL; + } + } + + if (cmd->scan_begin_src != TRIG_EXT) { + if (cmd->scan_begin_arg) { + return -EINVAL; + } + } + + if (cmd->convert_src != TRIG_EXT) { + if (cmd->convert_arg) { + return -EINVAL; + } + } + + if (cmd->scan_end_arg != cmd->nb_chan) { + return -EINVAL; + } + + if (cmd->stop_src == TRIG_NONE) { + if (cmd->stop_arg != 0) { + return -EINVAL; + } + } + + return 0; +} + +int a4l_ni_tio_cancel(struct ni_gpct *counter) +{ + unsigned long flags; + + ni_tio_arm(counter, 0, 0); + rtdm_lock_get_irqsave(&counter->lock, flags); + if (counter->mite_chan) { + a4l_mite_dma_disarm(counter->mite_chan); + } + rtdm_lock_put_irqrestore(&counter->lock, flags); + ni_tio_configure_dma(counter, 0, 0); + + ni_tio_set_bits(counter, + NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), + Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 0x0); + return 0; +} + +/* During buffered input counter operation for e-series, the gate + interrupt is acked automatically by the dma controller, due to the + Gi_Read/Write_Acknowledges_IRQ bits in the input select + register. */ +static int should_ack_gate(struct ni_gpct *counter) +{ + unsigned long flags; + int retval = 0; + + switch (counter->counter_dev->variant) { + case ni_gpct_variant_m_series: + case ni_gpct_variant_660x: + /* Not sure if 660x really supports gate interrupts + (the bits are not listed in register-level manual) */ + return 1; + break; + case ni_gpct_variant_e_series: + rtdm_lock_get_irqsave(&counter->lock, flags); + { + if (counter->mite_chan == NULL || + counter->mite_chan->dir != A4L_INPUT || + (a4l_mite_done(counter->mite_chan))) { + retval = 1; + } + } + rtdm_lock_put_irqrestore(&counter->lock, flags); + break; + } + return retval; +} + +void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter, + int *gate_error, + int *tc_error, + int *perm_stale_data, int *stale_data) +{ + const unsigned short gxx_status = read_register(counter, + NITIO_Gxx_Status_Reg(counter->counter_index)); + const unsigned short gi_status = read_register(counter, + NITIO_Gi_Status_Reg(counter->counter_index)); + unsigned ack = 0; + + if (gate_error) + *gate_error = 0; + if (tc_error) + *tc_error = 0; + if (perm_stale_data) + *perm_stale_data = 0; + if (stale_data) + *stale_data = 0; + + if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) { + ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index); + if (gate_error) { + /* 660x don't support automatic + acknowledgement of gate interrupt via dma + read/write and report bogus gate errors */ + if (counter->counter_dev->variant != + ni_gpct_variant_660x) { + *gate_error = 1; + } + } + } + if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) { + ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index); + if (tc_error) + *tc_error = 1; + } + if (gi_status & Gi_TC_Bit) { + ack |= Gi_TC_Interrupt_Ack_Bit; + } + if (gi_status & Gi_Gate_Interrupt_Bit) { + if (should_ack_gate(counter)) + ack |= Gi_Gate_Interrupt_Ack_Bit; + } + if (ack) + write_register(counter, ack, + NITIO_Gi_Interrupt_Acknowledge_Reg(counter-> + counter_index)); + if (ni_tio_get_soft_copy(counter, + NITIO_Gi_Mode_Reg(counter-> + counter_index)) & Gi_Loading_On_Gate_Bit) { + if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) { + if (stale_data) + *stale_data = 1; + } + if (read_register(counter, + NITIO_Gxx_Joint_Status2_Reg(counter-> + counter_index)) & + Gi_Permanent_Stale_Bit(counter->counter_index)) { + __a4l_err("%s: Gi_Permanent_Stale_Data detected.\n", + __FUNCTION__); + if (perm_stale_data) + *perm_stale_data = 1; + } + } +} + +/* TODO: to be adapted after a4l_buf_evt review */ +void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev) +{ + unsigned gpct_mite_status; + unsigned long flags; + int gate_error; + int tc_error; + int perm_stale_data; + struct a4l_subdevice *subd = + a4l_get_subd(dev, NI_GPCT_SUBDEV(counter->counter_index)); + + a4l_ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error, + &perm_stale_data, NULL); + if (gate_error) { + __a4l_err("%s: Gi_Gate_Error detected.\n", __FUNCTION__); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } + if (perm_stale_data) { + a4l_buf_evt(subd, A4L_BUF_ERROR); + } + switch (counter->counter_dev->variant) { + case ni_gpct_variant_m_series: + case ni_gpct_variant_660x: + if (read_register(counter, + NITIO_Gi_DMA_Status_Reg(counter->counter_index)) + & Gi_DRQ_Error_Bit) { + __a4l_err("%s: Gi_DRQ_Error detected.\n", __FUNCTION__); + a4l_buf_evt(subd, A4L_BUF_ERROR); + } + break; + case ni_gpct_variant_e_series: + break; + } + rtdm_lock_get_irqsave(&counter->lock, flags); + if (counter->mite_chan == NULL) { + rtdm_lock_put_irqrestore(&counter->lock, flags); + return; + } + gpct_mite_status = a4l_mite_get_status(counter->mite_chan); + if (gpct_mite_status & CHSR_LINKC) { + writel(CHOR_CLRLC, + counter->mite_chan->mite->mite_io_addr + + MITE_CHOR(counter->mite_chan->channel)); + } + a4l_mite_sync_input_dma(counter->mite_chan, subd); + rtdm_lock_put_irqrestore(&counter->lock, flags); +} + +void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter, + struct mite_channel *mite_chan) +{ + unsigned long flags; + + rtdm_lock_get_irqsave(&counter->lock, flags); + counter->mite_chan = mite_chan; + rtdm_lock_put_irqrestore(&counter->lock, flags); +} + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ + +static int __init ni_tio_init_module(void) +{ + return 0; +} + +static void __exit ni_tio_cleanup_module(void) +{ +} + +MODULE_DESCRIPTION("Analogy support for NI general-purpose counters"); +MODULE_LICENSE("GPL"); + +module_init(ni_tio_init_module); +module_exit(ni_tio_cleanup_module); + +EXPORT_SYMBOL_GPL(a4l_ni_tio_rinsn); +EXPORT_SYMBOL_GPL(a4l_ni_tio_winsn); +EXPORT_SYMBOL_GPL(a4l_ni_tio_insn_config); +EXPORT_SYMBOL_GPL(a4l_ni_tio_init_counter); +EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_construct); +EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_destroy); + +#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \ + defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE)) + +EXPORT_SYMBOL_GPL(a4l_ni_tio_input_inttrig); +EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd); +EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd_mask); +EXPORT_SYMBOL_GPL(a4l_ni_tio_cmdtest); +EXPORT_SYMBOL_GPL(a4l_ni_tio_cancel); +EXPORT_SYMBOL_GPL(a4l_ni_tio_handle_interrupt); +EXPORT_SYMBOL_GPL(a4l_ni_tio_set_mite_channel); +EXPORT_SYMBOL_GPL(a4l_ni_tio_acknowledge_and_confirm); + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h new file mode 100644 index 0000000..a10e07a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h @@ -0,0 +1,33 @@ +/* + * Analogy for Linux, procfs related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __ANALOGY_PROC_H__ +#define __ANALOGY_PROC_H__ + +#ifdef __KERNEL__ + +#ifdef CONFIG_PROC_FS +extern struct proc_dir_entry *a4l_proc_root; +#endif /* CONFIG_PROC_FS */ + +#endif /* __KERNEL__ */ + +#endif /* __ANALOGY_PROC_H__ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c new file mode 100644 index 0000000..6755941 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c @@ -0,0 +1,214 @@ +/* + * Analogy for Linux, RTDM helpers + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/version.h> +#include <linux/fs.h> +#include <asm/atomic.h> + +#include <rtdm/analogy/rtdm_helpers.h> + +/* --- Time section --- */ + +static nanosecs_abs_t a4l_clkofs; + +void a4l_init_time(void) +{ + nanosecs_abs_t t1, t2; + t1 = rtdm_clock_read(); + t2 = ktime_to_ns(ktime_get_real()); + a4l_clkofs = t2 - t1; +} + +nanosecs_abs_t a4l_get_time(void) +{ + return a4l_clkofs + rtdm_clock_read(); +} + +/* --- IRQ section --- */ + +static int a4l_handle_irq(rtdm_irq_t *irq_handle) +{ + struct a4l_irq_descriptor *dsc = + rtdm_irq_get_arg(irq_handle, struct a4l_irq_descriptor); + + if (dsc->handler((unsigned int)irq_handle->irq, dsc->cookie) == 0) + return RTDM_IRQ_HANDLED; + else + return RTDM_IRQ_NONE; +} + +int __a4l_request_irq(struct a4l_irq_descriptor *dsc, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie) +{ + /* Fills the IRQ descriptor */ + dsc->handler = handler; + dsc->cookie = cookie; + dsc->irq = irq; + + /* Registers the RT IRQ handler */ + return rtdm_irq_request(&dsc->rtdm_desc, + (int)irq, + a4l_handle_irq, flags, "Analogy device", dsc); +} + +int __a4l_free_irq(struct a4l_irq_descriptor * dsc) +{ + return rtdm_irq_free(&dsc->rtdm_desc); +} + +/* --- Synchronization section --- */ + +static void a4l_nrt_sync_handler(rtdm_nrtsig_t *nrt_sig, void *arg) +{ + struct a4l_sync *snc = (struct a4l_sync *) arg; + wake_up_interruptible(&snc->wq); +} + +int a4l_init_sync(struct a4l_sync *snc) +{ + int ret = 0; + + /* Initializes the flags field */ + snc->status = 0; + + /* If the process is NRT, we need a wait queue structure */ + init_waitqueue_head(&snc->wq); + + /* Initializes the RTDM event */ + rtdm_event_init(&snc->rtdm_evt, 0); + + /* Initializes the gateway to NRT context */ + rtdm_nrtsig_init(&snc->nrt_sig, a4l_nrt_sync_handler, snc); + + return ret; +} + +void a4l_cleanup_sync(struct a4l_sync *snc) +{ + rtdm_nrtsig_destroy(&snc->nrt_sig); + rtdm_event_destroy(&snc->rtdm_evt); +} + +int a4l_wait_sync(struct a4l_sync *snc, int rt) +{ + int ret = 0; + + if (test_bit(__EVT_PDING, &snc->status)) + goto out_wait; + + if (rt != 0) { + /* If the calling process is in primary mode, + we can use RTDM API ... */ + set_bit(__RT_WAITER, &snc->status); + ret = rtdm_event_wait(&snc->rtdm_evt); + } else { + /* ... else if the process is NRT, + the Linux wait queue system is used */ + set_bit(__NRT_WAITER, &snc->status); + ret = wait_event_interruptible(snc->wq, + test_bit(__EVT_PDING, + &snc->status)); + } + +out_wait: + + clear_bit(__EVT_PDING, &snc->status); + + return ret; +} + +int a4l_timedwait_sync(struct a4l_sync * snc, + int rt, unsigned long long ns_timeout) +{ + int ret = 0; + unsigned long timeout; + + if (test_bit(__EVT_PDING, &snc->status)) + goto out_wait; + + if (rt != 0) { + /* If the calling process is in primary mode, + we can use RTDM API ... */ + set_bit(__RT_WAITER, &snc->status); + ret = rtdm_event_timedwait(&snc->rtdm_evt, ns_timeout, NULL); + } else { + /* ... else if the process is NRT, + the Linux wait queue system is used */ + + timeout = do_div(ns_timeout, 1000); + + /* We consider the Linux kernel cannot tick at a frequency + higher than 1 MHz + If the timeout value is lower than 1us, we round up to 1us */ + timeout = (timeout == 0) ? 1 : usecs_to_jiffies(timeout); + + set_bit(__NRT_WAITER, &snc->status); + + ret = wait_event_interruptible_timeout(snc->wq, + test_bit(__EVT_PDING, + &snc->status), + timeout); + } + +out_wait: + + clear_bit(__EVT_PDING, &snc->status); + + return ret; +} + +void a4l_flush_sync(struct a4l_sync * snc) +{ + /* Clear the status bitfield */ + snc->status = 0; + + /* Flush the RTDM event */ + rtdm_event_clear(&snc->rtdm_evt); +} + +void a4l_signal_sync(struct a4l_sync * snc) +{ + int hit = 0; + + set_bit(__EVT_PDING, &snc->status); + + /* a4l_signal_sync() is bound not to be called upon the right + user process context; so, the status flags stores its mode. + Thus the proper event signaling function is called */ + if (test_and_clear_bit(__RT_WAITER, &snc->status)) { + rtdm_event_signal(&snc->rtdm_evt); + hit++; + } + + if (test_and_clear_bit(__NRT_WAITER, &snc->status)) { + rtdm_nrtsig_pend(&snc->nrt_sig); + hit++; + } + + if (hit == 0) { + /* At first signaling, we may not know the proper way + to send the event */ + rtdm_event_signal(&snc->rtdm_evt); + rtdm_nrtsig_pend(&snc->nrt_sig); + } +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c new file mode 100644 index 0000000..765c176 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c @@ -0,0 +1,308 @@ +/* + * Analogy for Linux, user interface (open, read, write, ioctl, proc) + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/uaccess.h> +#include <rtdm/driver.h> +#include <rtdm/analogy/device.h> + +MODULE_AUTHOR("Alexis Berlemont"); +MODULE_DESCRIPTION("Analogy core driver"); +MODULE_LICENSE("GPL"); + +int (* const a4l_ioctl_functions[]) (struct a4l_device_context *, void *) = { + [_IOC_NR(A4L_DEVCFG)] = a4l_ioctl_devcfg, + [_IOC_NR(A4L_DEVINFO)] = a4l_ioctl_devinfo, + [_IOC_NR(A4L_SUBDINFO)] = a4l_ioctl_subdinfo, + [_IOC_NR(A4L_CHANINFO)] = a4l_ioctl_chaninfo, + [_IOC_NR(A4L_RNGINFO)] = a4l_ioctl_rnginfo, + [_IOC_NR(A4L_CMD)] = a4l_ioctl_cmd, + [_IOC_NR(A4L_CANCEL)] = a4l_ioctl_cancel, + [_IOC_NR(A4L_INSNLIST)] = a4l_ioctl_insnlist, + [_IOC_NR(A4L_INSN)] = a4l_ioctl_insn, + [_IOC_NR(A4L_BUFCFG)] = a4l_ioctl_bufcfg, + [_IOC_NR(A4L_BUFINFO)] = a4l_ioctl_bufinfo, + [_IOC_NR(A4L_POLL)] = a4l_ioctl_poll, + [_IOC_NR(A4L_MMAP)] = a4l_ioctl_mmap, + [_IOC_NR(A4L_NBCHANINFO)] = a4l_ioctl_nbchaninfo, + [_IOC_NR(A4L_NBRNGINFO)] = a4l_ioctl_nbrnginfo, + [_IOC_NR(A4L_BUFCFG2)] = a4l_ioctl_bufcfg2, + [_IOC_NR(A4L_BUFINFO2)] = a4l_ioctl_bufinfo2 +}; + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry *a4l_proc_root; + +static int a4l_proc_devs_open(struct inode *inode, struct file *file) +{ + return single_open(file, a4l_rdproc_devs, NULL); +} + +static const DEFINE_PROC_OPS(a4l_proc_devs_ops, + a4l_proc_devs_open, + single_release, + seq_read, + NULL); + +static int a4l_proc_drvs_open(struct inode *inode, struct file *file) +{ + return single_open(file, a4l_rdproc_drvs, NULL); +} + +static const DEFINE_PROC_OPS(a4l_proc_drvs_ops, + a4l_proc_drvs_open, + single_release, + seq_read, + NULL); + +int a4l_init_proc(void) +{ + int ret = 0; + struct proc_dir_entry *entry; + + /* Creates the global directory */ + a4l_proc_root = proc_mkdir("analogy", NULL); + if (a4l_proc_root == NULL) { + __a4l_err("a4l_proc_init: " + "failed to create /proc/analogy\n"); + return -ENOMEM; + } + + /* Creates the devices related file */ + entry = proc_create("devices", 0444, a4l_proc_root, + &a4l_proc_devs_ops); + if (entry == NULL) { + __a4l_err("a4l_proc_init: " + "failed to create /proc/analogy/devices\n"); + ret = -ENOMEM; + goto err_proc_init; + } + + /* Creates the drivers related file */ + entry = proc_create("drivers", 0444, a4l_proc_root, + &a4l_proc_drvs_ops); + if (entry == NULL) { + __a4l_err("a4l_proc_init: " + "failed to create /proc/analogy/drivers\n"); + ret = -ENOMEM; + goto err_proc_init; + } + + return 0; + +err_proc_init: + remove_proc_entry("devices", a4l_proc_root); + remove_proc_entry("analogy", NULL); + return ret; +} + +void a4l_cleanup_proc(void) +{ + remove_proc_entry("drivers", a4l_proc_root); + remove_proc_entry("devices", a4l_proc_root); + remove_proc_entry("analogy", NULL); +} + +#else /* !CONFIG_PROC_FS */ + +#define a4l_init_proc() 0 +#define a4l_cleanup_proc() + +#endif /* CONFIG_PROC_FS */ + +int a4l_open(struct rtdm_fd *fd, int flags) +{ + struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd); + + /* Get a pointer on the selected device (thanks to minor index) */ + a4l_set_dev(cxt); + + /* Initialize the buffer structure */ + cxt->buffer = rtdm_malloc(sizeof(struct a4l_buffer)); + + a4l_init_buffer(cxt->buffer); + /* Allocate the asynchronous buffer + NOTE: it should be interesting to allocate the buffer only + on demand especially if the system is short of memory */ + if (cxt->dev->transfer.default_bufsize) + a4l_alloc_buffer(cxt->buffer, + cxt->dev->transfer.default_bufsize); + + __a4l_dbg(1, core_dbg, "cxt=%p cxt->buf=%p, cxt->buf->buf=%p\n", + cxt, cxt->buffer, cxt->buffer->buf); + + return 0; +} + +void a4l_close(struct rtdm_fd *fd) +{ + struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd); + + /* Cancel the maybe occuring asynchronous transfer */ + a4l_cancel_buffer(cxt); + + /* Free the buffer which was linked with this context and... */ + a4l_free_buffer(cxt->buffer); + + /* ...free the other buffer resources (sync) and... */ + a4l_cleanup_buffer(cxt->buffer); + + /* ...free the structure */ + rtdm_free(cxt->buffer); +} + +ssize_t a4l_read(struct rtdm_fd *fd, void *buf, size_t nbytes) +{ + struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd); + + /* Jump into the RT domain if possible */ + if (!rtdm_in_rt_context() && rtdm_rt_capable(fd)) + return -ENOSYS; + + if (nbytes == 0) + return 0; + + return a4l_read_buffer(cxt, buf, nbytes); +} + +ssize_t a4l_write(struct rtdm_fd *fd, const void *buf, size_t nbytes) +{ + struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd); + + /* Jump into the RT domain if possible */ + if (!rtdm_in_rt_context() && rtdm_rt_capable(fd)) + return -ENOSYS; + + if (nbytes == 0) + return 0; + + return a4l_write_buffer(cxt, buf, nbytes); +} + +int a4l_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg) +{ + struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd); + + return a4l_ioctl_functions[_IOC_NR(request)] (cxt, arg); +} + +int a4l_rt_select(struct rtdm_fd *fd, + rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index) +{ + struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd); + + return a4l_select(cxt, selector, type, fd_index); +} + +static struct rtdm_driver analogy_driver = { + .profile_info = RTDM_PROFILE_INFO(analogy, + RTDM_CLASS_EXPERIMENTAL, + RTDM_SUBCLASS_ANALOGY, + 0), + .device_flags = RTDM_NAMED_DEVICE, + .device_count = A4L_NB_DEVICES, + .context_size = sizeof(struct a4l_device_context), + .ops = { + .open = a4l_open, + .close = a4l_close, + .ioctl_rt = a4l_ioctl, + .read_rt = a4l_read, + .write_rt = a4l_write, + .ioctl_nrt = a4l_ioctl, + .read_nrt = a4l_read, + .write_nrt = a4l_write, + .select = a4l_rt_select, + }, +}; + +static struct rtdm_device rtdm_devs[A4L_NB_DEVICES] = { + [0 ... A4L_NB_DEVICES - 1] = { + .driver = &analogy_driver, + .label = "analogy%d", + } +}; + +int a4l_register(void) +{ + int i, ret; + + for (i = 0; i < A4L_NB_DEVICES; i++) { + ret = rtdm_dev_register(rtdm_devs + i); + if (ret) + goto fail; + } + + return 0; +fail: + while (i-- > 0) + rtdm_dev_unregister(rtdm_devs + i); + + return ret; +} + +void a4l_unregister(void) +{ + int i; + for (i = 0; i < A4L_NB_DEVICES; i++) + rtdm_dev_unregister(&(rtdm_devs[i])); +} + +static int __init a4l_init(void) +{ + int ret; + + if (!rtdm_available()) + return -ENOSYS; + + /* Initializes the devices */ + a4l_init_devs(); + + /* Initializes Analogy time management */ + a4l_init_time(); + + /* Registers RTDM / fops interface */ + ret = a4l_register(); + if (ret != 0) { + a4l_unregister(); + goto out_a4l_init; + } + + /* Initializes Analogy proc layer */ + ret = a4l_init_proc(); + +out_a4l_init: + return ret; +} + +static void __exit a4l_cleanup(void) +{ + /* Removes Analogy proc files */ + a4l_cleanup_proc(); + + /* Unregisters RTDM / fops interface */ + a4l_unregister(); +} + +module_init(a4l_init); +module_exit(a4l_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig new file mode 100644 index 0000000..ce5aa51 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig @@ -0,0 +1,5 @@ + +config XENO_DRIVERS_ANALOGY_S526 + depends on XENO_DRIVERS_ANALOGY + tristate "Sensoray Model 526 driver" + default n diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile new file mode 100644 index 0000000..51bad4d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile @@ -0,0 +1,6 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/analogy + +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_S526) += analogy_s526.o + +analogy_s526-y := s526.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c new file mode 100644 index 0000000..8ecda7e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c @@ -0,0 +1,756 @@ +/* + * Analogy driver for Sensoray Model 526 board + * + * Copyright (C) 2009 Simon Boulay <simon.boulay@gmail.com> + * + * Derived from comedi: + * Copyright (C) 2000 David A. Schleef <ds@schleef.org> + * 2006 Everett Wang <everett.wang@everteq.com> + * 2009 Ian Abbott <abbotti@mev.co.uk> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This code is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +/* + * Original code comes from comedi linux-next staging driver (2009.12.20) + * Board documentation: http://www.sensoray.com/products/526data.htm + * Everything should work as in comedi: + * - Encoder works + * - Analog input works + * - Analog output works + * - PWM output works + * - Commands are not supported yet. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <asm/byteorder.h> +#include <rtdm/analogy/device.h> + +/* Board description */ +#define S526_GPCT_CHANS 4 +#define S526_GPCT_BITS 24 +#define S526_AI_CHANS 10 /* 8 regular differential inputs + * channel 8 is "reference 0" (+10V) + * channel 9 is "reference 1" (0V) */ +#define S526_AI_BITS 16 +#define S526_AI_TIMEOUT 100 +#define S526_AO_CHANS 4 +#define S526_AO_BITS 16 +#define S526_DIO_CHANS 8 +#define S526_DIO_BITS 1 + +/* Ports */ +#define S526_IOSIZE 0x40 /* 64 bytes */ +#define S526_DEFAULT_ADDRESS 0x2C0 /* Manufacturing default */ + +/* Registers */ +#define REG_TCR 0x00 +#define REG_WDC 0x02 +#define REG_DAC 0x04 +#define REG_ADC 0x06 +#define REG_ADD 0x08 +#define REG_DIO 0x0A +#define REG_IER 0x0C +#define REG_ISR 0x0E +#define REG_MSC 0x10 +#define REG_C0L 0x12 +#define REG_C0H 0x14 +#define REG_C0M 0x16 +#define REG_C0C 0x18 +#define REG_C1L 0x1A +#define REG_C1H 0x1C +#define REG_C1M 0x1E +#define REG_C1C 0x20 +#define REG_C2L 0x22 +#define REG_C2H 0x24 +#define REG_C2M 0x26 +#define REG_C2C 0x28 +#define REG_C3L 0x2A +#define REG_C3H 0x2C +#define REG_C3M 0x2E +#define REG_C3C 0x30 +#define REG_EED 0x32 +#define REG_EEC 0x34 + +#define ISR_ADC_DONE 0x4 + +struct counter_mode_register_t { +#if defined (__LITTLE_ENDIAN_BITFIELD) + unsigned short coutSource:1; + unsigned short coutPolarity:1; + unsigned short autoLoadResetRcap:3; + unsigned short hwCtEnableSource:2; + unsigned short ctEnableCtrl:2; + unsigned short clockSource:2; + unsigned short countDir:1; + unsigned short countDirCtrl:1; + unsigned short outputRegLatchCtrl:1; + unsigned short preloadRegSel:1; + unsigned short reserved:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned short reserved:1; + unsigned short preloadRegSel:1; + unsigned short outputRegLatchCtrl:1; + unsigned short countDirCtrl:1; + unsigned short countDir:1; + unsigned short clockSource:2; + unsigned short ctEnableCtrl:2; + unsigned short hwCtEnableSource:2; + unsigned short autoLoadResetRcap:3; + unsigned short coutPolarity:1; + unsigned short coutSource:1; +#else +#error Unknown bit field order +#endif +}; + +union cmReg { + struct counter_mode_register_t reg; + unsigned short value; +}; + +/* Application Classes for GPCT Subdevices */ +enum S526_GPCT_APP_CLASS { + CountingAndTimeMeasurement, + SinglePulseGeneration, + PulseTrainGeneration, + PositionMeasurement, + Miscellaneous +}; + +/* GPCT subdevices configuration */ +#define MAX_GPCT_CONFIG_DATA 6 +struct s526GPCTConfig { + enum S526_GPCT_APP_CLASS app; + int data[MAX_GPCT_CONFIG_DATA]; +}; + +typedef struct s526_priv { + unsigned long io_base; +} s526_priv_t; + +struct s526_subd_gpct_priv { + struct s526GPCTConfig config[4]; +}; + +struct s526_subd_ai_priv { + uint16_t config; +}; + +struct s526_subd_ao_priv { + uint16_t readback[2]; +}; + +struct s526_subd_dio_priv { + int io_bits; + unsigned int state; +}; + +#define devpriv ((s526_priv_t*)(dev->priv)) + +#define ADDR_REG(reg) (devpriv->io_base + (reg)) +#define ADDR_CHAN_REG(reg, chan) (devpriv->io_base + (reg) + (chan) * 8) + + +static int s526_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_gpct_priv *subdpriv = + (struct s526_subd_gpct_priv *)subd->priv; + unsigned int *data = (unsigned int *)insn->data; + int subdev_channel = CR_CHAN(insn->chan_desc); + int i; + short value; + union cmReg cmReg; + + a4l_dbg(1, drv_dbg, dev, + "s526_gpct_insn_config: Configuring Channel %d\n", + subdev_channel); + + for (i = 0; i < MAX_GPCT_CONFIG_DATA; i++) { + subdpriv->config[subdev_channel].data[i] = data[i]; + a4l_dbg(1, drv_dbg, dev, "data[%d]=%x\n", i, data[i]); + } + + switch (data[0]) { + case A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER: + /* + * data[0]: Application Type + * data[1]: Counter Mode Register Value + * data[2]: Pre-load Register Value + * data[3]: Conter Control Register + */ + a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring Encoder\n"); + subdpriv->config[subdev_channel].app = PositionMeasurement; + + /* Set Counter Mode Register */ + cmReg.value = data[1] & 0xFFFF; + + a4l_dbg(1, drv_dbg, dev, "Counter Mode register=%x\n", cmReg.value); + outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); + + /* Reset the counter if it is software preload */ + if (cmReg.reg.autoLoadResetRcap == 0) { + outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */ + /* outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /\* Load the counter from PR0 *\/ */ + } + break; + + case A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: + /* + * data[0]: Application Type + * data[1]: Counter Mode Register Value + * data[2]: Pre-load Register 0 Value + * data[3]: Pre-load Register 1 Value + * data[4]: Conter Control Register + */ + a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring SPG\n"); + subdpriv->config[subdev_channel].app = SinglePulseGeneration; + + /* Set Counter Mode Register */ + cmReg.value = (short)(data[1] & 0xFFFF); + cmReg.reg.preloadRegSel = 0; /* PR0 */ + outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); + + /* Load the pre-load register 0 high word */ + value = (short)((data[2] >> 16) & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); + + /* Load the pre-load register 0 low word */ + value = (short)(data[2] & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); + + /* Set Counter Mode Register */ + cmReg.value = (short)(data[1] & 0xFFFF); + cmReg.reg.preloadRegSel = 1; /* PR1 */ + outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); + + /* Load the pre-load register 1 high word */ + value = (short)((data[3] >> 16) & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); + + /* Load the pre-load register 1 low word */ + value = (short)(data[3] & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); + + /* Write the Counter Control Register */ + if (data[4] != 0) { + value = (short)(data[4] & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel)); + } + break; + + case A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: + /* + * data[0]: Application Type + * data[1]: Counter Mode Register Value + * data[2]: Pre-load Register 0 Value + * data[3]: Pre-load Register 1 Value + * data[4]: Conter Control Register + */ + a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring PTG\n"); + subdpriv->config[subdev_channel].app = PulseTrainGeneration; + + /* Set Counter Mode Register */ + cmReg.value = (short)(data[1] & 0xFFFF); + cmReg.reg.preloadRegSel = 0; /* PR0 */ + outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); + + /* Load the pre-load register 0 high word */ + value = (short)((data[2] >> 16) & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); + + /* Load the pre-load register 0 low word */ + value = (short)(data[2] & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); + + /* Set Counter Mode Register */ + cmReg.value = (short)(data[1] & 0xFFFF); + cmReg.reg.preloadRegSel = 1; /* PR1 */ + outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel)); + + /* Load the pre-load register 1 high word */ + value = (short)((data[3] >> 16) & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); + + /* Load the pre-load register 1 low word */ + value = (short)(data[3] & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); + + /* Write the Counter Control Register */ + if (data[4] != 0) { + value = (short)(data[4] & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel)); + } + break; + + default: + a4l_err(dev, "s526_gpct_insn_config: unsupported GPCT_insn_config\n"); + return -EINVAL; + break; + } + + return 0; +} + +static int s526_gpct_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + uint32_t *data = (uint32_t *)insn->data; + int counter_channel = CR_CHAN(insn->chan_desc); + unsigned short datalow; + unsigned short datahigh; + int i; + + if (insn->data_size <= 0) { + a4l_err(dev, "s526_gpct_rinsn: data size should be > 0\n"); + return -EINVAL; + } + + for (i = 0; i < insn->data_size / sizeof(uint32_t); i++) { + datalow = inw(ADDR_CHAN_REG(REG_C0L, counter_channel)); + datahigh = inw(ADDR_CHAN_REG(REG_C0H, counter_channel)); + data[i] = (int)(datahigh & 0x00FF); + data[i] = (data[i] << 16) | (datalow & 0xFFFF); + a4l_dbg(1, drv_dbg, dev, + "s526_gpct_rinsn GPCT[%d]: %x(0x%04x, 0x%04x)\n", + counter_channel, data[i], datahigh, datalow); + } + + return 0; +} + +static int s526_gpct_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_gpct_priv *subdpriv = + (struct s526_subd_gpct_priv *)subd->priv; + uint32_t *data = (uint32_t *)insn->data; + int subdev_channel = CR_CHAN(insn->chan_desc); + short value; + union cmReg cmReg; + + a4l_dbg(1, drv_dbg, dev, + "s526_gpct_winsn: GPCT_INSN_WRITE on channel %d\n", + subdev_channel); + + cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel)); + a4l_dbg(1, drv_dbg, dev, + "s526_gpct_winsn: Counter Mode Register: %x\n", cmReg.value); + + /* Check what Application of Counter this channel is configured for */ + switch (subdpriv->config[subdev_channel].app) { + case PositionMeasurement: + a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: PM\n"); + outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H, + subdev_channel)); + outw(0xFFFF & (*data), + ADDR_CHAN_REG(REG_C0L, subdev_channel)); + break; + + case SinglePulseGeneration: + a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: SPG\n"); + outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H, + subdev_channel)); + outw(0xFFFF & (*data), + ADDR_CHAN_REG(REG_C0L, subdev_channel)); + break; + + case PulseTrainGeneration: + /* + * data[0] contains the PULSE_WIDTH + * data[1] contains the PULSE_PERIOD + * @pre PULSE_PERIOD > PULSE_WIDTH > 0 + * The above periods must be expressed as a multiple of the + * pulse frequency on the selected source + */ + a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: PTG\n"); + if ((data[1] > data[0]) && (data[0] > 0)) { + (subdpriv->config[subdev_channel]).data[0] = data[0]; + (subdpriv->config[subdev_channel]).data[1] = data[1]; + } else { + a4l_err(dev, + "s526_gpct_winsn: INSN_WRITE: PTG: Problem with Pulse params -> %du %du\n", + data[0], data[1]); + return -EINVAL; + } + + value = (short)((*data >> 16) & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel)); + value = (short)(*data & 0xFFFF); + outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel)); + break; + default: /* Impossible */ + a4l_err(dev, + "s526_gpct_winsn: INSN_WRITE: Functionality %d not implemented yet\n", + subdpriv->config[subdev_channel].app); + return -EINVAL; + } + + return 0; +} + +static int s526_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_ai_priv *subdpriv = + (struct s526_subd_ai_priv *)subd->priv; + unsigned int *data = (unsigned int *)insn->data; + + if (insn->data_size < sizeof(unsigned int)) + return -EINVAL; + + /* data[0] : channels was set in relevant bits. + * data[1] : delay + */ + /* COMMENT: abbotti 2008-07-24: I don't know why you'd want to + * enable channels here. The channel should be enabled in the + * INSN_READ handler. */ + + /* Enable ADC interrupt */ + outw(ISR_ADC_DONE, ADDR_REG(REG_IER)); + a4l_dbg(1, drv_dbg, dev, + "s526_ai_insn_config: ADC current value: 0x%04x\n", + inw(ADDR_REG(REG_ADC))); + + subdpriv->config = (data[0] & 0x3FF) << 5; + if (data[1] > 0) + subdpriv->config |= 0x8000; /* set the delay */ + + subdpriv->config |= 0x0001; /* ADC start bit. */ + + return 0; +} + +static int s526_ai_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_ai_priv *subdpriv = + (struct s526_subd_ai_priv *)subd->priv; + uint16_t *data = (uint16_t *)insn->data; + int n, i; + int chan = CR_CHAN(insn->chan_desc); + uint16_t value; + uint16_t d; + uint16_t status; + + /* Set configured delay, enable channel for this channel only, + * select "ADC read" channel, set "ADC start" bit. */ + value = (subdpriv->config & 0x8000) | + ((1 << 5) << chan) | (chan << 1) | 0x0001; + + /* convert n samples */ + for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) { + /* trigger conversion */ + outw(value, ADDR_REG(REG_ADC)); + a4l_dbg(1, drv_dbg, dev, "s526_ai_rinsn: Wrote 0x%04x to ADC\n", + value); + + /* wait for conversion to end */ + for (i = 0; i < S526_AI_TIMEOUT; i++) { + status = inw(ADDR_REG(REG_ISR)); + if (status & ISR_ADC_DONE) { + outw(ISR_ADC_DONE, ADDR_REG(REG_ISR)); + break; + } + } + if (i == S526_AI_TIMEOUT) { + a4l_warn(dev, "s526_ai_rinsn: ADC(0x%04x) timeout\n", + inw(ADDR_REG(REG_ISR))); + return -ETIMEDOUT; + } + + /* read data */ + d = inw(ADDR_REG(REG_ADD)); + a4l_dbg(1, drv_dbg, dev, "s526_ai_rinsn: AI[%d]=0x%04x\n", + n, (uint16_t)(d & 0xFFFF)); + + /* munge data */ + data[n] = d ^ 0x8000; + } + + return 0; +} + +static int s526_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_ao_priv *subdpriv = + (struct s526_subd_ao_priv *)subd->priv; + uint16_t *data = (uint16_t *)insn->data; + int i; + int chan = CR_CHAN(insn->chan_desc); + uint16_t val; + + val = chan << 1; + outw(val, ADDR_REG(REG_DAC)); + + for (i = 0; i < insn->data_size / sizeof(uint16_t); i++) { + outw(data[i], ADDR_REG(REG_ADD)); /* write the data to preload register */ + subdpriv->readback[chan] = data[i]; + outw(val + 1, ADDR_REG(REG_DAC)); /* starts the D/A conversion. */ + } + + return 0; +} + +static int s526_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct s526_subd_ao_priv *subdpriv = + (struct s526_subd_ao_priv *)subd->priv; + uint16_t *data = (uint16_t *)insn->data; + int i; + int chan = CR_CHAN(insn->chan_desc); + + for (i = 0; i < insn->data_size / sizeof(uint16_t); i++) + data[i] = subdpriv->readback[chan]; + + return 0; +} + +static int s526_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_dio_priv *subdpriv = + (struct s526_subd_dio_priv *)subd->priv; + unsigned int *data = (unsigned int *)insn->data; + int chan = CR_CHAN(insn->chan_desc); + int group, mask; + + group = chan >> 2; + mask = 0xF << (group << 2); + + switch (data[0]) { + case A4L_INSN_CONFIG_DIO_OUTPUT: + subdpriv->state |= 1 << (group + 10); /* bit 10/11 set the + * group 1/2's mode */ + subdpriv->io_bits |= mask; + break; + case A4L_INSN_CONFIG_DIO_INPUT: + subdpriv->state &= ~(1 << (group + 10)); /* 1 is output, 0 is + * input. */ + subdpriv->io_bits &= ~mask; + break; + case A4L_INSN_CONFIG_DIO_QUERY: + data[1] = + (subdpriv->io_bits & mask) ? A4L_OUTPUT : A4L_INPUT; + return 0; + default: + return -EINVAL; + } + + outw(subdpriv->state, ADDR_REG(REG_DIO)); + + return 0; +} + +static int s526_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct a4l_device *dev = subd->dev; + struct s526_subd_dio_priv *subdpriv = + (struct s526_subd_dio_priv *)subd->priv; + uint8_t *data = (uint8_t *)insn->data; + + if (insn->data_size != 2 * sizeof(uint8_t)) + return -EINVAL; + + if (data[0]) { + subdpriv->state &= ~(data[0]); + subdpriv->state |= data[0] & data[1]; + + outw(subdpriv->state, ADDR_REG(REG_DIO)); + } + + data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */ + + return 0; +} + +/* --- Channels descriptor --- */ + +static struct a4l_channels_desc s526_chan_desc_gpct = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = S526_GPCT_CHANS, + .chans = { + {A4L_CHAN_AREF_GROUND, S526_GPCT_BITS}, + }, +}; + +static struct a4l_channels_desc s526_chan_desc_ai = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = S526_AI_CHANS, + .chans = { + {A4L_CHAN_AREF_GROUND, S526_AI_BITS}, + }, +}; + +static struct a4l_channels_desc s526_chan_desc_ao = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = S526_AO_CHANS, + .chans = { + {A4L_CHAN_AREF_GROUND, S526_AO_BITS}, + }, +}; + +static struct a4l_channels_desc s526_chan_desc_dio = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = S526_DIO_CHANS, + .chans = { + {A4L_CHAN_AREF_GROUND, S526_DIO_BITS}, + }, +}; + +/* --- Subdevice initialization functions --- */ + +/* General purpose counter/timer (gpct) */ +static void setup_subd_gpct(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_COUNTER; + subd->chan_desc = &s526_chan_desc_gpct; + subd->insn_read = s526_gpct_rinsn; + subd->insn_config = s526_gpct_insn_config; + subd->insn_write = s526_gpct_winsn; +} + +/* Analog input subdevice */ +static void setup_subd_ai(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_AI; + subd->chan_desc = &s526_chan_desc_ai; + subd->rng_desc = &a4l_range_bipolar10; + subd->insn_read = s526_ai_rinsn; + subd->insn_config = s526_ai_insn_config; +} + +/* Analog output subdevice */ +static void setup_subd_ao(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_AO; + subd->chan_desc = &s526_chan_desc_ao; + subd->rng_desc = &a4l_range_bipolar10; + subd->insn_write = s526_ao_winsn; + subd->insn_read = s526_ao_rinsn; +} + +/* Digital i/o subdevice */ +static void setup_subd_dio(struct a4l_subdevice *subd) +{ + subd->flags = A4L_SUBD_DIO; + subd->chan_desc = &s526_chan_desc_dio; + subd->rng_desc = &range_digital; + subd->insn_bits = s526_dio_insn_bits; + subd->insn_config = s526_dio_insn_config; +} + +struct setup_subd { + void (*setup_func) (struct a4l_subdevice *); + int sizeof_priv; +}; + +static struct setup_subd setup_subds[4] = { + { + .setup_func = setup_subd_gpct, + .sizeof_priv = sizeof(struct s526_subd_gpct_priv), + }, + { + .setup_func = setup_subd_ai, + .sizeof_priv = sizeof(struct s526_subd_ai_priv), + }, + { + .setup_func = setup_subd_ao, + .sizeof_priv = sizeof(struct s526_subd_ao_priv), + }, + { + .setup_func = setup_subd_dio, + .sizeof_priv = sizeof(struct s526_subd_dio_priv), + }, +}; + +static int dev_s526_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + int io_base; + int i; + int err = 0; + + if (arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) { + a4l_warn(dev, + "dev_s526_attach: no attach options specified; " + "using defaults: addr=0x%x\n", + S526_DEFAULT_ADDRESS); + io_base = S526_DEFAULT_ADDRESS; + } else { + io_base = ((unsigned long *)arg->opts)[0]; + } + + if (!request_region(io_base, S526_IOSIZE, "s526")) { + a4l_err(dev, "dev_s526_attach: I/O port conflict\n"); + return -EIO; + } + + /* Allocate the subdevice structures. */ + for (i = 0; i < 4; i++) { + struct a4l_subdevice *subd = a4l_alloc_subd(setup_subds[i].sizeof_priv, + setup_subds[i].setup_func); + + if (subd == NULL) + return -ENOMEM; + + err = a4l_add_subd(dev, subd); + if (err != i) + return err; + } + + devpriv->io_base = io_base; + + a4l_info(dev, " attached (address = 0x%x)\n", io_base); + + return 0; +} + +static int dev_s526_detach(struct a4l_device *dev) +{ + int err = 0; + + if (devpriv->io_base != 0) + release_region(devpriv->io_base, S526_IOSIZE); + + return err; +} + +static struct a4l_driver drv_s526 = { + .owner = THIS_MODULE, + .board_name = "analogy_s526", + .driver_name = "s526", + .attach = dev_s526_attach, + .detach = dev_s526_detach, + .privdata_size = sizeof(s526_priv_t), +}; + +static int __init drv_s526_init(void) +{ + return a4l_register_drv(&drv_s526); +} + +static void __exit drv_s526_cleanup(void) +{ + a4l_unregister_drv(&drv_s526); +} + +MODULE_DESCRIPTION("Analogy driver for Sensoray Model 526 board."); +MODULE_LICENSE("GPL"); + +module_init(drv_s526_init); +module_exit(drv_s526_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c new file mode 100644 index 0000000..a6c9780 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c @@ -0,0 +1,449 @@ +/* + * Analogy for Linux, subdevice, channel and range related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/mman.h> +#include <asm/io.h> +#include <asm/errno.h> +#include <rtdm/analogy/device.h> + +/* --- Common ranges declarations --- */ + +struct a4l_rngtab rng_bipolar10 = { 1, { + RANGE_V(-10, 10), + }}; +struct a4l_rngdesc a4l_range_bipolar10 = RNG_GLOBAL(rng_bipolar10); + +struct a4l_rngtab rng_bipolar5 = { 1, { + RANGE_V(-5, 5), + }}; +struct a4l_rngdesc a4l_range_bipolar5 = RNG_GLOBAL(rng_bipolar5); + +struct a4l_rngtab rng_unipolar10 = { 1, { + RANGE_V(0, 10), + }}; +struct a4l_rngdesc a4l_range_unipolar10 = RNG_GLOBAL(rng_unipolar10); + +struct a4l_rngtab rng_unipolar5 = { 1, { + RANGE_V(0, 5), + }}; +struct a4l_rngdesc a4l_range_unipolar5 = RNG_GLOBAL(rng_unipolar5); + +struct a4l_rngtab rng_unknown = { 1, { + RANGE(0, 1), + }}; +struct a4l_rngdesc a4l_range_unknown = RNG_GLOBAL(rng_unknown); + +struct a4l_rngtab rng_fake = { 0, { + RANGE(0, 0), + }}; +struct a4l_rngdesc a4l_range_fake = RNG_GLOBAL(rng_fake); + +/* --- Basic channel / range management functions --- */ + +struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice *sb, int idx) +{ + int i = (sb->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? idx : 0; + return &(sb->chan_desc->chans[i]); +} + +struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice *sb, int chidx, int rngidx) +{ + int i = (sb->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? chidx : 0; + return &(sb->rng_desc->rngtabs[i]->rngs[rngidx]); +} + +int a4l_check_chanlist(struct a4l_subdevice *subd, + unsigned char nb_chan, unsigned int *chans) +{ + int i, j; + + if (nb_chan > subd->chan_desc->length) + return -EINVAL; + + for (i = 0; i < nb_chan; i++) { + j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0; + + if (CR_CHAN(chans[i]) >= subd->chan_desc->length) { + __a4l_err("a4l_check_chanlist: " + "chan idx out_of range (%u>=%lu)\n", + CR_CHAN(chans[i]), subd->chan_desc->length); + return -EINVAL; + } + if (CR_AREF(chans[i]) != 0 && + (CR_AREF(chans[i]) & subd->chan_desc->chans[j].flags) == 0) + { + __a4l_err("a4l_check_chanlist: " + "bad channel type\n"); + return -EINVAL; + } + } + + if (subd->rng_desc == NULL) + return 0; + + for (i = 0; i < nb_chan; i++) { + j = (subd->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0; + + if (CR_RNG(chans[i]) > subd->rng_desc->rngtabs[j]->length) { + __a4l_err("a4l_check_chanlist: " + "rng idx out_of range (%u>=%u)\n", + CR_RNG(chans[i]), + subd->rng_desc->rngtabs[j]->length); + return -EINVAL; + } + } + + return 0; +} + +/* --- Upper layer functions --- */ + +struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv, + void (*setup)(struct a4l_subdevice *)) +{ + struct a4l_subdevice *subd; + + subd = rtdm_malloc(sizeof(struct a4l_subdevice) + sizeof_priv); + + if(subd != NULL) { + memset(subd, 0 , sizeof(struct a4l_subdevice) + sizeof_priv); + if(setup != NULL) + setup(subd); + } + + return subd; +} + +int a4l_add_subd(struct a4l_device * dev, struct a4l_subdevice * subd) +{ + struct list_head *this; + int i = 0; + + /* Basic checking */ + if (dev == NULL || subd == NULL) + return -EINVAL; + + list_add_tail(&subd->list, &dev->subdvsq); + + subd->dev = dev; + + list_for_each(this, &dev->subdvsq) { + i++; + } + + subd->idx = --i; + + return i; +} + +struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx) +{ + int i = 0; + struct a4l_subdevice *subd = NULL; + struct list_head *this; + + /* This function is not optimized as we do not go through the + transfer structure */ + + list_for_each(this, &dev->subdvsq) { + if(idx == i++) + subd = list_entry(this, struct a4l_subdevice, list); + } + + return subd; +} + +/* --- IOCTL / FOPS functions --- */ + +int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + int i, ret = 0; + a4l_sbinfo_t *subd_info; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_subdinfo: unattached device\n"); + return -EINVAL; + } + + subd_info = rtdm_malloc(dev->transfer.nb_subd * + sizeof(a4l_sbinfo_t)); + if (subd_info == NULL) + return -ENOMEM; + + for (i = 0; i < dev->transfer.nb_subd; i++) { + subd_info[i].flags = dev->transfer.subds[i]->flags; + subd_info[i].status = dev->transfer.subds[i]->status; + subd_info[i].nb_chan = + (dev->transfer.subds[i]->chan_desc != NULL) ? + dev->transfer.subds[i]->chan_desc->length : 0; + } + + if (rtdm_safe_copy_to_user(fd, + arg, + subd_info, dev->transfer.nb_subd * + sizeof(a4l_sbinfo_t)) != 0) + ret = -EFAULT; + + rtdm_free(subd_info); + + return ret; + +} + +int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + a4l_chinfo_arg_t inarg; + + /* Basic checking */ + if (!dev->flags & A4L_DEV_ATTACHED_NR) { + __a4l_err("a4l_ioctl_nbchaninfo: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &inarg, arg, + sizeof(a4l_chinfo_arg_t)) != 0) + return -EFAULT; + + if (inarg.idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_ioctl_nbchaninfo: subdevice index " + "out of range\n"); + return -EINVAL; + } + + if(dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) + inarg.info = (void *)0; + else + inarg.info = (void *)(unsigned long) + dev->transfer.subds[inarg.idx_subd]->chan_desc->length; + + if (rtdm_safe_copy_to_user(fd, + arg, + &inarg, sizeof(a4l_chinfo_arg_t)) != 0) + return -EFAULT; + + return 0; +} + +int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int i, ret = 0; + struct a4l_device *dev = a4l_get_dev(cxt); + a4l_chinfo_t *chan_info; + a4l_chinfo_arg_t inarg; + struct a4l_channels_desc *chan_desc; + struct a4l_rngdesc *rng_desc; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_chaninfo: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &inarg, arg, + sizeof(a4l_chinfo_arg_t)) != 0) + return -EFAULT; + + if (inarg.idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_ioctl_chaninfo: bad subdevice index\n"); + return -EINVAL; + } + + chan_desc = dev->transfer.subds[inarg.idx_subd]->chan_desc; + rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc; + + if (chan_desc == NULL) { + __a4l_err("a4l_ioctl_chaninfo: no channel descriptor " + "for subdevice %d\n", inarg.idx_subd); + return -EINVAL; + } + + if(rng_desc == NULL) + rng_desc = &a4l_range_fake; + + chan_info = rtdm_malloc(chan_desc->length * sizeof(a4l_chinfo_t)); + if (chan_info == NULL) + return -ENOMEM; + + /* If the channel descriptor is global, the fields are filled + with the same instance of channel descriptor */ + for (i = 0; i < chan_desc->length; i++) { + int j = + (chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0; + int k = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0; + + chan_info[i].chan_flags = chan_desc->chans[j].flags; + chan_info[i].nb_bits = chan_desc->chans[j].nb_bits; + chan_info[i].nb_rng = rng_desc->rngtabs[k]->length; + + if (chan_desc->mode == A4L_CHAN_GLOBAL_CHANDESC) + chan_info[i].chan_flags |= A4L_CHAN_GLOBAL; + } + + if (rtdm_safe_copy_to_user(fd, + inarg.info, + chan_info, + chan_desc->length * + sizeof(a4l_chinfo_t)) != 0) + return -EFAULT; + + rtdm_free(chan_info); + + return ret; +} + +int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg) +{ + int i; + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + struct a4l_device *dev = a4l_get_dev(cxt); + a4l_rnginfo_arg_t inarg; + struct a4l_rngdesc *rng_desc; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_nbrnginfo: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &inarg, + arg, sizeof(a4l_rnginfo_arg_t)) != 0) + return -EFAULT; + + if (inarg.idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_ioctl_nbrnginfo: bad subdevice index\n"); + return -EINVAL; + } + + if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) { + __a4l_err("a4l_ioctl_nbrnginfo: no channel descriptor " + "for subdevice %d\n", inarg.idx_subd); + return -EINVAL; + } + + if (inarg.idx_chan >= + dev->transfer.subds[inarg.idx_subd]->chan_desc->length) { + __a4l_err("a4l_ioctl_nbrnginfo: bad channel index\n"); + return -EINVAL; + } + + rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc; + if (rng_desc != NULL) { + i = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? + inarg.idx_chan : 0; + inarg.info = (void *)(unsigned long) + rng_desc->rngtabs[i]->length; + } else + inarg.info = (void *)0; + + + if (rtdm_safe_copy_to_user(fd, + arg, + &inarg, sizeof(a4l_rnginfo_arg_t)) != 0) + return -EFAULT; + + return 0; +} + +int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg) +{ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + int i, ret = 0; + unsigned int tmp; + struct a4l_device *dev = a4l_get_dev(cxt); + struct a4l_rngdesc *rng_desc; + a4l_rnginfo_t *rng_info; + a4l_rnginfo_arg_t inarg; + + /* Basic checking */ + if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) { + __a4l_err("a4l_ioctl_rnginfo: unattached device\n"); + return -EINVAL; + } + + if (rtdm_safe_copy_from_user(fd, + &inarg, + arg, sizeof(a4l_rnginfo_arg_t)) != 0) + return -EFAULT; + + if (inarg.idx_subd >= dev->transfer.nb_subd) { + __a4l_err("a4l_ioctl_rnginfo: bad subdevice index\n"); + return -EINVAL; + } + + if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) { + __a4l_err("a4l_ioctl_rnginfo: no channel descriptor " + "for subdevice %d\n", inarg.idx_subd); + return -EINVAL; + } + + if (inarg.idx_chan >= + dev->transfer.subds[inarg.idx_subd]->chan_desc->length) { + __a4l_err("a4l_ioctl_rnginfo: bad channel index\n"); + return -EINVAL; + } + + rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc; + if (rng_desc == NULL) { + __a4l_err("a4l_ioctl_rnginfo: no range descriptor " + "for channel %d\n", inarg.idx_chan); + return -EINVAL; + } + + /* If the range descriptor is global, + we take the first instance */ + tmp = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? + inarg.idx_chan : 0; + + rng_info = rtdm_malloc(rng_desc->rngtabs[tmp]->length * + sizeof(a4l_rnginfo_t)); + if (rng_info == NULL) + return -ENOMEM; + + for (i = 0; i < rng_desc->rngtabs[tmp]->length; i++) { + rng_info[i].min = rng_desc->rngtabs[tmp]->rngs[i].min; + rng_info[i].max = rng_desc->rngtabs[tmp]->rngs[i].max; + rng_info[i].flags = rng_desc->rngtabs[tmp]->rngs[i].flags; + + if (rng_desc->mode == A4L_RNG_GLOBAL_RNGDESC) + rng_info[i].flags |= A4L_RNG_GLOBAL; + } + + if (rtdm_safe_copy_to_user(fd, + inarg.info, + rng_info, + rng_desc->rngtabs[tmp]->length * + sizeof(a4l_rnginfo_t)) != 0) + return -EFAULT; + + rtdm_free(rng_info); + + return ret; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig new file mode 100644 index 0000000..15db782 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig @@ -0,0 +1,13 @@ + +config XENO_DRIVERS_ANALOGY_FAKE + depends on XENO_DRIVERS_ANALOGY + tristate "Fake driver" + default n + help + + The fake driver displays many subdevices: + - 0: analog input; + - 1: digital input / output; + - 2: analog output; + - 3: analog input; data written into the subdevice 2 can be + read here. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile new file mode 100644 index 0000000..e92e5bc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile @@ -0,0 +1,8 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/analogy + +obj-$(CONFIG_XENO_DRIVERS_ANALOGY_FAKE) += analogy_fake.o + +analogy_fake-y := fake.o + +analogy_loop-y := loop.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c new file mode 100644 index 0000000..c80c1cc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c @@ -0,0 +1,686 @@ +#include <linux/module.h> +#include <rtdm/analogy/device.h> + +#define TASK_PERIOD 1000000 + +#define AI_SUBD 0 +#define DIO_SUBD 1 +#define AO_SUBD 2 +#define AI2_SUBD 3 + +#define TRANSFER_SIZE 0x1000 + +/* --- Driver related structures --- */ +struct fake_priv { + /* Attach configuration parameters + (they should be relocated in ai_priv) */ + unsigned long amplitude_div; + unsigned long quanta_cnt; + + /* Task descriptor */ + rtdm_task_t task; + + /* Statuses of the asynchronous subdevices */ + int ai_running; + int ao_running; + int ai2_running; +}; + +struct ai_priv { + + /* Specific timing fields */ + unsigned long scan_period_ns; + unsigned long convert_period_ns; + unsigned long current_ns; + unsigned long reminder_ns; + unsigned long long last_ns; + + /* Misc fields */ + unsigned long amplitude_div; + unsigned long quanta_cnt; +}; + +struct ao_ai2_priv { + /* Asynchronous loop stuff */ + uint8_t buffer[TRANSFER_SIZE]; + int count; + /* Synchronous loop stuff */ + uint16_t insn_value; +}; + +struct dio_priv { + /* Bits status */ + uint16_t bits_values; +}; + +/* --- Channels / ranges part --- */ + +/* Channels descriptors */ + +static struct a4l_channels_desc analog_chandesc = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 8, + .chans = { + {A4L_CHAN_AREF_GROUND, 16}, + }, +}; + +static struct a4l_channels_desc dio_chandesc = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 16, + .chans = { + {A4L_CHAN_AREF_GROUND, 1}, + }, +}; + +/* Ranges tab */ +static struct a4l_rngtab analog_rngtab = { + .length = 2, + .rngs = { + RANGE_V(-5,5), + RANGE_V(-10,10), + }, +}; +/* Ranges descriptor */ +static struct a4l_rngdesc analog_rngdesc = RNG_GLOBAL(analog_rngtab); + +/* Command options masks */ + +static struct a4l_cmd_desc ai_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_NOW, + .scan_begin_src = TRIG_TIMER, + .convert_src = TRIG_NOW | TRIG_TIMER, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_COUNT | TRIG_NONE, +}; + +static struct a4l_cmd_desc ao_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_NOW | TRIG_INT, + .scan_begin_src = TRIG_TIMER, + .convert_src = TRIG_NOW | TRIG_TIMER, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_COUNT | TRIG_NONE, +}; + +/* --- Analog input simulation --- */ + +/* --- Values generation for 1st AI --- */ + +static inline uint16_t ai_value_output(struct ai_priv *priv) +{ + static uint16_t output_tab[8] = { + 0x0001, 0x2000, 0x4000, 0x6000, + 0x8000, 0xa000, 0xc000, 0xffff + }; + static unsigned int output_idx; + static DEFINE_RTDM_LOCK(output_lock); + + unsigned long flags; + unsigned int idx; + + rtdm_lock_get_irqsave(&output_lock, flags); + + output_idx += priv->quanta_cnt; + if(output_idx == 8) + output_idx = 0; + idx = output_idx; + + rtdm_lock_put_irqrestore(&output_lock, flags); + + return output_tab[idx] / priv->amplitude_div; +} + +int ai_push_values(struct a4l_subdevice *subd) +{ + uint64_t now_ns, elapsed_ns = 0; + struct a4l_cmd_desc *cmd; + struct ai_priv *priv; + int i = 0; + + if (!subd) + return -EINVAL; + + priv = (struct ai_priv *)subd->priv; + + cmd = a4l_get_cmd(subd); + if (!cmd) + return -EPIPE; + + now_ns = a4l_get_time(); + elapsed_ns += now_ns - priv->last_ns + priv->reminder_ns; + priv->last_ns = now_ns; + + while(elapsed_ns >= priv->scan_period_ns) { + int j; + + for(j = 0; j < cmd->nb_chan; j++) { + uint16_t value = ai_value_output(priv); + a4l_buf_put(subd, &value, sizeof(uint16_t)); + } + + elapsed_ns -= priv->scan_period_ns; + i++; + } + + priv->current_ns += i * priv->scan_period_ns; + priv->reminder_ns = elapsed_ns; + + if (i != 0) + a4l_buf_evt(subd, 0); + + return 0; +} + +/* --- Data retrieval for AO --- */ + +int ao_pull_values(struct a4l_subdevice *subd) +{ + struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv; + int err; + + /* Let's have a look at how many samples are available */ + priv->count = a4l_buf_count(subd) < TRANSFER_SIZE ? + a4l_buf_count(subd) : TRANSFER_SIZE; + + if (!priv->count) + return 0; + + err = a4l_buf_get(subd, priv->buffer, priv->count); + if (err < 0) { + a4l_err(subd->dev, "ao_get_values: a4l_buf_get failed (err=%d)\n", err); + priv->count = 0; + return err; + + } + + a4l_info(subd->dev, " %d bytes added to private buffer from async p=%p\n", + priv->count, subd->buf->buf); + + a4l_buf_evt(subd, 0); + + return 0; +} + +/* --- Data redirection for 2nd AI (from AO) --- */ + +int ai2_push_values(struct a4l_subdevice *subd) +{ + struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv); + int err = 0; + + if (priv->count) { + err = a4l_buf_put(subd, priv->buffer, priv->count); + + /* If there is no more place in the asynchronous + buffer, data are likely to be dropped; it is just a + test driver so no need to implement trickier mechanism */ + err = (err == -EAGAIN) ? 0 : err; + + a4l_info(subd->dev, "%d bytes added to async buffer p=%p\n", + priv->count, subd->buf->buf); + + priv->count = 0; + if (err < 0) + a4l_err(subd->dev, + "ai2_push_values: " + "a4l_buf_put failed (err=%d)\n", err); + else + a4l_buf_evt(subd, 0); + } + + return err; +} + +/* --- Asynchronous AI functions --- */ + +static int ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct fake_priv *priv = (struct fake_priv *)subd->dev->priv; + struct ai_priv *ai_priv = (struct ai_priv *)subd->priv; + + ai_priv->scan_period_ns = cmd->scan_begin_arg; + ai_priv->convert_period_ns = (cmd->convert_src==TRIG_TIMER)? + cmd->convert_arg:0; + + a4l_dbg(1, drv_dbg, subd->dev, "scan_period=%luns convert_period=%luns\n", + ai_priv->scan_period_ns, ai_priv->convert_period_ns); + + ai_priv->last_ns = a4l_get_time(); + + ai_priv->current_ns = ((unsigned long)ai_priv->last_ns); + ai_priv->reminder_ns = 0; + + priv->ai_running = 1; + + return 0; + +} + +static int ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + if(cmd->scan_begin_src == TRIG_TIMER) + { + if (cmd->scan_begin_arg < 1000) + return -EINVAL; + + if (cmd->convert_src == TRIG_TIMER && + cmd->scan_begin_arg < (cmd->convert_arg * cmd->nb_chan)) + return -EINVAL; + } + + return 0; +} + +static void ai_cancel(struct a4l_subdevice *subd) +{ + struct fake_priv *priv = (struct fake_priv *)subd->dev->priv; + + priv->ai_running = 0; +} + +static void ai_munge(struct a4l_subdevice *subd, void *buf, unsigned long size) +{ + int i; + + for(i = 0; i < size / sizeof(uint16_t); i++) + ((uint16_t *)buf)[i] += 1; +} + +/* --- Asynchronous A0 functions --- */ + +int ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + a4l_info(subd->dev, "(subd=%d)\n", subd->idx); + return 0; +} + +int ao_trigger(struct a4l_subdevice *subd, lsampl_t trignum) +{ + struct fake_priv *priv = (struct fake_priv *)subd->dev->priv; + + a4l_info(subd->dev, "(subd=%d)\n", subd->idx); + priv->ao_running = 1; + return 0; +} + +void ao_cancel(struct a4l_subdevice *subd) +{ + struct fake_priv *priv = (struct fake_priv *)subd->dev->priv; + struct ao_ai2_priv *ao_priv = (struct ao_ai2_priv *)subd->priv; + int running; + + a4l_info(subd->dev, "(subd=%d)\n", subd->idx); + priv->ao_running = 0; + + running = priv->ai2_running; + if (running) { + struct a4l_subdevice *ai2_subd = + (struct a4l_subdevice *)a4l_get_subd(subd->dev, AI2_SUBD); + /* Here, we have not saved the required amount of + data; so, we cannot know whether or not, it is the + end of the acquisition; that is why we force it */ + priv->ai2_running = 0; + ao_priv->count = 0; + + a4l_info(subd->dev, "subd %d cancelling subd %d too \n", + subd->idx, AI2_SUBD); + + a4l_buf_evt(ai2_subd, A4L_BUF_EOA); + } +} + +/* --- Asynchronous 2nd AI functions --- */ + +int ai2_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + struct fake_priv *priv = (struct fake_priv *)subd->dev->priv; + + a4l_info(subd->dev, "(subd=%d)\n", subd->idx); + priv->ai2_running = 1; + return 0; +} + +void ai2_cancel(struct a4l_subdevice *subd) +{ + struct fake_priv *priv = (struct fake_priv *)subd->dev->priv; + struct ao_ai2_priv *ai2_priv = *((struct ao_ai2_priv **)subd->priv); + + int running; + + a4l_info(subd->dev, "(subd=%d)\n", subd->idx); + priv->ai2_running = 0; + + running = priv->ao_running; + if (running) { + struct a4l_subdevice *ao_subd = + (struct a4l_subdevice *)a4l_get_subd(subd->dev, AO_SUBD); + /* Here, we have not saved the required amount of + data; so, we cannot know whether or not, it is the + end of the acquisition; that is why we force it */ + priv->ao_running = 0; + ai2_priv->count = 0; + + a4l_info(subd->dev, "subd %d cancelling subd %d too \n", + subd->idx, AO_SUBD); + + a4l_buf_evt(ao_subd, A4L_BUF_EOA); + } + +} + + +/* --- Synchronous AI functions --- */ + +static int ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct ai_priv *priv = (struct ai_priv *)subd->priv; + uint16_t *data = (uint16_t *)insn->data; + int i; + + for(i = 0; i < insn->data_size / sizeof(uint16_t); i++) + data[i] = ai_value_output(priv); + + return 0; +} + +/* --- Synchronous DIO function --- */ + +static int dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct dio_priv *priv = (struct dio_priv *)subd->priv; + uint16_t *data = (uint16_t *)insn->data; + + if (insn->data_size != 2 * sizeof(uint16_t)) + return -EINVAL; + + if (data[0] != 0) { + priv->bits_values &= ~(data[0]); + priv->bits_values |= (data[0] & data[1]); + } + + data[1] = priv->bits_values; + + return 0; +} + +/* --- Synchronous AO + AI2 functions --- */ + +int ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv; + uint16_t *data = (uint16_t *)insn->data; + + /* Checks the buffer size */ + if (insn->data_size != sizeof(uint16_t)) + return -EINVAL; + + /* Retrieves the value to memorize */ + priv->insn_value = data[0]; + + return 0; +} + +int ai2_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv); + uint16_t *data = (uint16_t *)insn->data; + + /* Checks the buffer size */ + if (insn->data_size != sizeof(uint16_t)) + return -EINVAL; + + /* Sets the memorized value */ + data[0] = priv->insn_value; + + return 0; +} + +/* --- Global task part --- */ + +/* One task is enough for all the asynchronous subdevices, it is just a fake + * driver after all + */ +static void task_proc(void *arg) +{ + struct a4l_subdevice *ai_subd, *ao_subd, *ai2_subd; + struct a4l_device *dev; + struct fake_priv *priv; + int running; + + dev = arg; + ai_subd = a4l_get_subd(dev, AI_SUBD); + ao_subd = a4l_get_subd(dev, AO_SUBD); + ai2_subd = a4l_get_subd(dev, AI2_SUBD); + + priv = dev->priv; + + while(!rtdm_task_should_stop()) { + + /* copy sample static data from the subd private buffer to the + * asynchronous buffer + */ + running = priv->ai_running; + if (running && ai_push_values(ai_subd) < 0) { + /* on error, wait for detach to destroy the task */ + rtdm_task_sleep(RTDM_TIMEOUT_INFINITE); + continue; + } + + /* + * pull the data from the output subdevice (asynchronous buffer) + * into its private buffer + */ + running = priv->ao_running; + if (running && ao_pull_values(ao_subd) < 0) { + rtdm_task_sleep(RTDM_TIMEOUT_INFINITE); + continue; + } + + running = priv->ai2_running; + /* + * then loop it to the ai2 subd since their private data is shared: so + * pull the data from the private buffer back into the device's + * asynchronous buffer + */ + if (running && ai2_push_values(ai2_subd) < 0) { + rtdm_task_sleep(RTDM_TIMEOUT_INFINITE); + continue; + } + + rtdm_task_sleep(TASK_PERIOD); + } +} + +/* --- Initialization functions --- */ + +void setup_ai_subd(struct a4l_subdevice *subd) +{ + /* Fill the subdevice structure */ + subd->flags |= A4L_SUBD_AI; + subd->flags |= A4L_SUBD_CMD; + subd->flags |= A4L_SUBD_MMAP; + subd->rng_desc = &analog_rngdesc; + subd->chan_desc = &analog_chandesc; + subd->do_cmd = ai_cmd; + subd->do_cmdtest = ai_cmdtest; + subd->cancel = ai_cancel; + subd->munge = ai_munge; + subd->cmd_mask = &ai_cmd_mask; + subd->insn_read = ai_insn_read; +} + +void setup_dio_subd(struct a4l_subdevice *subd) +{ + /* Fill the subdevice structure */ + subd->flags |= A4L_SUBD_DIO; + subd->chan_desc = &dio_chandesc; + subd->rng_desc = &range_digital; + subd->insn_bits = dio_insn_bits; +} + +void setup_ao_subd(struct a4l_subdevice *subd) +{ + /* Fill the subdevice structure */ + subd->flags |= A4L_SUBD_AO; + subd->flags |= A4L_SUBD_CMD; + subd->flags |= A4L_SUBD_MMAP; + subd->rng_desc = &analog_rngdesc; + subd->chan_desc = &analog_chandesc; + subd->do_cmd = ao_cmd; + subd->cancel = ao_cancel; + subd->trigger = ao_trigger; + subd->cmd_mask = &ao_cmd_mask; + subd->insn_write = ao_insn_write; +} + +void setup_ai2_subd(struct a4l_subdevice *subd) +{ + /* Fill the subdevice structure */ + subd->flags |= A4L_SUBD_AI; + subd->flags |= A4L_SUBD_CMD; + subd->flags |= A4L_SUBD_MMAP; + subd->rng_desc = &analog_rngdesc; + subd->chan_desc = &analog_chandesc; + subd->do_cmd = ai2_cmd; + subd->cancel = ai2_cancel; + subd->cmd_mask = &ai_cmd_mask; + subd->insn_read = ai2_insn_read; +} + +/* --- Attach / detach functions --- */ + +int test_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg) +{ + typedef void (*setup_subd_function) (struct a4l_subdevice *subd); + struct fake_priv *priv = (struct fake_priv *) dev->priv; + struct a4l_subdevice *subd; + unsigned long tmp; + struct ai_priv *r; + int i, ret = 0; + + struct initializers { + struct a4l_subdevice *subd; + setup_subd_function init; + int private_len; + char *name; + int index; + } sds[] = { + [AI_SUBD] = { + .name = "AI", + .private_len = sizeof(struct ai_priv), + .init = setup_ai_subd, + .index = AI_SUBD, + .subd = NULL, + }, + [DIO_SUBD] = { + .name = "DIO", + .private_len = sizeof(struct dio_priv), + .init = setup_dio_subd, + .index = DIO_SUBD, + .subd = NULL, + }, + [AO_SUBD] = { + .name = "AO", + .private_len = sizeof(struct ao_ai2_priv), + .init = setup_ao_subd, + .index = AO_SUBD, + .subd = NULL, + }, + [AI2_SUBD] = { + .name = "AI2", + .private_len = sizeof(struct ao_ai2_priv *), + .init = setup_ai2_subd, + .index = AI2_SUBD, + .subd = NULL, + }, + }; + + a4l_dbg(1, drv_dbg, dev, "starting attach procedure...\n"); + + /* Set default values for attach parameters */ + priv->amplitude_div = 1; + priv->quanta_cnt = 1; + if (arg->opts_size) { + unsigned long *args = (unsigned long *)arg->opts; + priv->amplitude_div = args[0]; + if (arg->opts_size == 2 * sizeof(unsigned long)) + priv->quanta_cnt = (args[1] > 7 || args[1] == 0) ? + 1 : args[1]; + } + + /* create and register the subdevices */ + for (i = 0; i < ARRAY_SIZE(sds) ; i++) { + + subd = a4l_alloc_subd(sds[i].private_len, sds[i].init); + if (subd == NULL) + return -ENOMEM; + + ret = a4l_add_subd(dev, subd); + if (ret != sds[i].index) + return (ret < 0) ? ret : -EINVAL; + + sds[i].subd = subd; + + a4l_dbg(1, drv_dbg, dev, " %s subdev registered \n", sds[i].name); + } + + /* initialize specifics */ + r = (void *) sds[AI_SUBD].subd->priv; + r->amplitude_div = priv->amplitude_div; + r->quanta_cnt = priv->quanta_cnt; + + /* A0 and AI2 shared their private buffers */ + tmp = (unsigned long) sds[AO_SUBD].subd->priv; + memcpy(sds[AI2_SUBD].subd->priv, &tmp, sds[AI2_SUBD].private_len) ; + + /* create the task */ + ret = rtdm_task_init(&priv->task, "Fake AI task", task_proc, dev, + RTDM_TASK_HIGHEST_PRIORITY, 0); + if (ret) + a4l_dbg(1, drv_dbg, dev, "Error creating A4L task \n"); + + a4l_dbg(1, drv_dbg, dev, "attach procedure completed: " + "adiv = %lu, qcount = %lu \n" + , priv->amplitude_div, priv->quanta_cnt); + + return ret; +} + +int test_detach(struct a4l_device *dev) +{ + struct fake_priv *priv = (struct fake_priv *)dev->priv; + + rtdm_task_destroy(&priv->task); + a4l_dbg(1, drv_dbg, dev, "detach procedure complete\n"); + + return 0; +} + +/* --- Module stuff --- */ + +static struct a4l_driver test_drv = { + .owner = THIS_MODULE, + .board_name = "analogy_fake", + .driver_name = "fake", + .attach = test_attach, + .detach = test_detach, + .privdata_size = sizeof(struct fake_priv), +}; + +static int __init a4l_fake_init(void) +{ + return a4l_register_drv(&test_drv); +} + +static void __exit a4l_fake_cleanup(void) +{ + a4l_unregister_drv(&test_drv); +} + +MODULE_DESCRIPTION("Analogy fake driver"); +MODULE_LICENSE("GPL"); + +module_init(a4l_fake_init); +module_exit(a4l_fake_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c new file mode 100644 index 0000000..aaef81d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c @@ -0,0 +1,285 @@ +#include <linux/module.h> +#include <rtdm/analogy/device.h> + +#define LOOP_TASK_PERIOD 1000000 +#define LOOP_NB_BITS 16 + +#define LOOP_INPUT_SUBD 0 +#define LOOP_OUTPUT_SUBD 1 + +/* Channels descriptor */ +static struct a4l_channels_desc loop_chandesc = { + .mode = A4L_CHAN_GLOBAL_CHANDESC, + .length = 8, + .chans = { + {A4L_CHAN_AREF_GROUND, LOOP_NB_BITS}, + }, +}; + +/* Ranges tab */ +static struct a4l_rngtab loop_rngtab = { + .length = 2, + .rngs = { + RANGE_V(-5,5), + RANGE_V(-10,10), + }, +}; +/* Ranges descriptor */ +struct a4l_rngdesc loop_rngdesc = RNG_GLOBAL(loop_rngtab); + +/* Command options mask */ +static struct a4l_cmd_desc loop_cmd_mask = { + .idx_subd = 0, + .start_src = TRIG_NOW | TRIG_INT, + .scan_begin_src = TRIG_TIMER, + .convert_src = TRIG_NOW | TRIG_TIMER, + .scan_end_src = TRIG_COUNT, + .stop_src = TRIG_COUNT| TRIG_NONE, +}; + +/* Private data organization */ +struct loop_priv { + + /* Task descriptor */ + rtdm_task_t loop_task; + + /* Misc fields */ + int loop_running; + uint16_t loop_insn_value; +}; +typedef struct loop_priv lpprv_t; + +/* Attach arguments contents */ +struct loop_attach_arg { + unsigned long period; +}; +typedef struct loop_attach_arg lpattr_t; + +static void loop_task_proc(void *arg); + +/* --- Task part --- */ + +/* Timer task routine */ +static void loop_task_proc(void *arg) +{ + struct a4l_device *dev = (struct a4l_device*)arg; + struct a4l_subdevice *input_subd, *output_subd; + lpprv_t *priv = (lpprv_t *)dev->priv; + + input_subd = a4l_get_subd(dev, LOOP_INPUT_SUBD); + output_subd = a4l_get_subd(dev, LOOP_OUTPUT_SUBD); + + if (input_subd == NULL || output_subd == NULL) { + a4l_err(dev, "loop_task_proc: subdevices unavailable\n"); + return; + } + + while (1) { + + int running; + + running = priv->loop_running; + + if (running) { + uint16_t value; + int ret=0; + + while (ret==0) { + + ret = a4l_buf_get(output_subd, + &value, sizeof(uint16_t)); + if (ret == 0) { + + a4l_info(dev, + "loop_task_proc: " + "data available\n"); + + a4l_buf_evt(output_subd, 0); + + ret = a4l_buf_put(input_subd, + &value, + sizeof(uint16_t)); + + if (ret==0) + a4l_buf_evt(input_subd, 0); + } + } + } + + rtdm_task_sleep(LOOP_TASK_PERIOD); + } +} + +/* --- Analogy Callbacks --- */ + +/* Command callback */ +int loop_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd) +{ + a4l_info(subd->dev, "loop_cmd: (subd=%d)\n", subd->idx); + + return 0; + +} + +/* Trigger callback */ +int loop_trigger(struct a4l_subdevice *subd, lsampl_t trignum) +{ + lpprv_t *priv = (lpprv_t *)subd->dev->priv; + + a4l_info(subd->dev, "loop_trigger: (subd=%d)\n", subd->idx); + + priv->loop_running = 1; + + return 0; +} + +/* Cancel callback */ +void loop_cancel(struct a4l_subdevice *subd) +{ + lpprv_t *priv = (lpprv_t *)subd->dev->priv; + + a4l_info(subd->dev, "loop_cancel: (subd=%d)\n", subd->idx); + + priv->loop_running = 0; +} + +/* Read instruction callback */ +int loop_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + lpprv_t *priv = (lpprv_t*)subd->dev->priv; + uint16_t *data = (uint16_t *)insn->data; + + /* Checks the buffer size */ + if (insn->data_size != sizeof(uint16_t)) + return -EINVAL; + + /* Sets the memorized value */ + data[0] = priv->loop_insn_value; + + return 0; +} + +/* Write instruction callback */ +int loop_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn) +{ + lpprv_t *priv = (lpprv_t*)subd->dev->priv; + uint16_t *data = (uint16_t *)insn->data; + + /* Checks the buffer size */ + if (insn->data_size != sizeof(uint16_t)) + return -EINVAL; + + /* Retrieves the value to memorize */ + priv->loop_insn_value = data[0]; + + return 0; +} + +void setup_input_subd(struct a4l_subdevice *subd) +{ + memset(subd, 0, sizeof(struct a4l_subdevice)); + + subd->flags |= A4L_SUBD_AI; + subd->flags |= A4L_SUBD_CMD; + subd->flags |= A4L_SUBD_MMAP; + subd->rng_desc = &loop_rngdesc; + subd->chan_desc = &loop_chandesc; + subd->do_cmd = loop_cmd; + subd->cancel = loop_cancel; + subd->cmd_mask = &loop_cmd_mask; + subd->insn_read = loop_insn_read; + subd->insn_write = loop_insn_write; +} + +void setup_output_subd(struct a4l_subdevice *subd) +{ + memset(subd, 0, sizeof(struct a4l_subdevice)); + + subd->flags = A4L_SUBD_AO; + subd->flags |= A4L_SUBD_CMD; + subd->flags |= A4L_SUBD_MMAP; + subd->rng_desc = &loop_rngdesc; + subd->chan_desc = &loop_chandesc; + subd->do_cmd = loop_cmd; + subd->cancel = loop_cancel; + subd->trigger = loop_trigger; + subd->cmd_mask = &loop_cmd_mask; + subd->insn_read = loop_insn_read; + subd->insn_write = loop_insn_write; +} + +/* Attach callback */ +int loop_attach(struct a4l_device *dev, + a4l_lnkdesc_t *arg) +{ + int ret = 0; + struct a4l_subdevice *subd; + lpprv_t *priv = (lpprv_t *)dev->priv; + + /* Add the fake input subdevice */ + subd = a4l_alloc_subd(0, setup_input_subd); + if (subd == NULL) + return -ENOMEM; + + ret = a4l_add_subd(dev, subd); + if (ret != LOOP_INPUT_SUBD) + /* Let Analogy free the lately allocated subdevice */ + return (ret < 0) ? ret : -EINVAL; + + /* Add the fake output subdevice */ + subd = a4l_alloc_subd(0, setup_output_subd); + if (subd == NULL) + /* Let Analogy free the lately allocated subdevice */ + return -ENOMEM; + + ret = a4l_add_subd(dev, subd); + if (ret != LOOP_OUTPUT_SUBD) + /* Let Analogy free the lately allocated subdevices */ + return (ret < 0) ? ret : -EINVAL; + + priv->loop_running = 0; + priv->loop_insn_value = 0; + + ret = rtmd_task_init(&priv->loop_task, + "a4l_loop task", + loop_task_proc, + dev, RTDM_TASK_HIGHEST_PRIORITY, 0); + + return ret; +} + +/* Detach callback */ +int loop_detach(struct a4l_device *dev) +{ + lpprv_t *priv = (lpprv_t *)dev->priv; + + rtdm_task_destroy(&priv->loop_task); + + return 0; +} + +/* --- Module part --- */ + +static struct a4l_driver loop_drv = { + .owner = THIS_MODULE, + .board_name = "analogy_loop", + .attach = loop_attach, + .detach = loop_detach, + .privdata_size = sizeof(lpprv_t), +}; + +static int __init a4l_loop_init(void) +{ + return a4l_register_drv(&loop_drv); +} + +static void __exit a4l_loop_cleanup(void) +{ + a4l_unregister_drv(&loop_drv); +} + +MODULE_DESCRIPTION("Analogy loop driver"); +MODULE_LICENSE("GPL"); + +module_init(a4l_loop_init); +module_exit(a4l_loop_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c new file mode 100644 index 0000000..bf19c8c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c @@ -0,0 +1,259 @@ +/* + * Analogy for Linux, transfer related features + * + * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org> + * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr> + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/fs.h> +#include <asm/errno.h> +#include <rtdm/analogy/device.h> + +#include "proc.h" + +/* --- Initialization / cleanup / cancel functions --- */ + +int a4l_precleanup_transfer(struct a4l_device_context * cxt) +{ + struct a4l_device *dev; + struct a4l_transfer *tsf; + int i, err = 0; + + dev = a4l_get_dev(cxt); + tsf = &dev->transfer; + + if (tsf == NULL) { + __a4l_err("a4l_precleanup_transfer: " + "incoherent status, transfer block not reachable\n"); + return -ENODEV; + } + + for (i = 0; i < tsf->nb_subd; i++) { + unsigned long *status = &tsf->subds[i]->status; + + __a4l_dbg(1, core_dbg, "subd[%d]->status=0x%08lx\n", i, *status); + + if (test_and_set_bit(A4L_SUBD_BUSY, status)) { + __a4l_err("a4l_precleanup_transfer: " + "device busy, acquisition occuring\n"); + err = -EBUSY; + goto out_error; + } else + set_bit(A4L_SUBD_CLEAN, status); + } + + return 0; + +out_error: + for (i = 0; i < tsf->nb_subd; i++) { + unsigned long *status = &tsf->subds[i]->status; + + if (test_bit(A4L_SUBD_CLEAN, status)){ + clear_bit(A4L_SUBD_BUSY, status); + clear_bit(A4L_SUBD_CLEAN, status); + } + } + + return err; +} + +int a4l_cleanup_transfer(struct a4l_device_context * cxt) +{ + struct a4l_device *dev; + struct a4l_transfer *tsf; + + dev = a4l_get_dev(cxt); + tsf = &dev->transfer; + + /* Releases the pointers tab, if need be */ + if (tsf->subds != NULL) { + rtdm_free(tsf->subds); + } + + memset(tsf, 0, sizeof(struct a4l_transfer)); + + return 0; +} + +void a4l_presetup_transfer(struct a4l_device_context *cxt) +{ + struct a4l_device *dev = NULL; + struct a4l_transfer *tsf; + + dev = a4l_get_dev(cxt); + tsf = &dev->transfer; + + /* Clear the structure */ + memset(tsf, 0, sizeof(struct a4l_transfer)); + + tsf->default_bufsize = A4L_BUF_DEFSIZE; + + /* 0 is also considered as a valid IRQ, then + the IRQ number must be initialized with another value */ + tsf->irq_desc.irq = A4L_IRQ_UNUSED; +} + +int a4l_setup_transfer(struct a4l_device_context * cxt) +{ + struct a4l_device *dev = NULL; + struct a4l_transfer *tsf; + struct list_head *this; + int i = 0, ret = 0; + + dev = a4l_get_dev(cxt); + tsf = &dev->transfer; + + /* Recovers the subdevices count + (as they are registered in a linked list */ + list_for_each(this, &dev->subdvsq) { + tsf->nb_subd++; + } + + __a4l_dbg(1, core_dbg, "nb_subd=%d\n", tsf->nb_subd); + + /* Allocates a suitable tab for the subdevices */ + tsf->subds = rtdm_malloc(tsf->nb_subd * sizeof(struct a4l_subdevice *)); + if (tsf->subds == NULL) { + __a4l_err("a4l_setup_transfer: call1(alloc) failed \n"); + ret = -ENOMEM; + goto out_setup_tsf; + } + + /* Recovers the subdevices pointers */ + list_for_each(this, &dev->subdvsq) { + tsf->subds[i++] = list_entry(this, struct a4l_subdevice, list); + } + +out_setup_tsf: + + if (ret != 0) + a4l_cleanup_transfer(cxt); + + return ret; +} + +/* --- IRQ handling section --- */ + +int a4l_request_irq(struct a4l_device * dev, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie) +{ + int ret; + + if (dev->transfer.irq_desc.irq != A4L_IRQ_UNUSED) + return -EBUSY; + + ret = __a4l_request_irq(&dev->transfer.irq_desc, irq, handler, flags, + cookie); + if (ret != 0) { + __a4l_err("a4l_request_irq: IRQ registration failed\n"); + dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED; + } + + return ret; +} + +int a4l_free_irq(struct a4l_device * dev, unsigned int irq) +{ + + int ret = 0; + + if (dev->transfer.irq_desc.irq != irq) + return -EINVAL; + + /* There is less need to use a spinlock + than for a4l_request_irq() */ + ret = __a4l_free_irq(&dev->transfer.irq_desc); + + if (ret == 0) + dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED; + + return ret; +} + +unsigned int a4l_get_irq(struct a4l_device * dev) +{ + return dev->transfer.irq_desc.irq; +} + +/* --- Proc section --- */ + +#ifdef CONFIG_PROC_FS + +int a4l_rdproc_transfer(struct seq_file *seq, void *v) +{ + struct a4l_transfer *transfer = (struct a4l_transfer *) seq->private; + int i; + + if (v != SEQ_START_TOKEN) + return -EINVAL; + + seq_printf(seq, "-- Subdevices --\n\n"); + seq_printf(seq, "| idx | type\n"); + + /* Gives the subdevice type's name */ + for (i = 0; i < transfer->nb_subd; i++) { + char *type; + switch (transfer->subds[i]->flags & A4L_SUBD_TYPES) { + case A4L_SUBD_UNUSED: + type = "Unused subdevice"; + break; + case A4L_SUBD_AI: + type = "Analog input subdevice"; + break; + case A4L_SUBD_AO: + type = "Analog output subdevice"; + break; + case A4L_SUBD_DI: + type = "Digital input subdevice"; + break; + case A4L_SUBD_DO: + type = "Digital output subdevice"; + break; + case A4L_SUBD_DIO: + type = "Digital input/output subdevice"; + break; + case A4L_SUBD_COUNTER: + type = "Counter subdevice"; + break; + case A4L_SUBD_TIMER: + type = "Timer subdevice"; + break; + case A4L_SUBD_MEMORY: + type = "Memory subdevice"; + break; + case A4L_SUBD_CALIB: + type = "Calibration subdevice"; + break; + case A4L_SUBD_PROC: + type = "Processor subdevice"; + break; + case A4L_SUBD_SERIAL: + type = "Serial subdevice"; + break; + default: + type = "Unknown subdevice"; + } + + seq_printf(seq, "| %02d | %s\n", i, type); + } + + return 0; +} + +#endif /* CONFIG_PROC_FS */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig new file mode 100644 index 0000000..3241597 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig @@ -0,0 +1,3 @@ + +config XENO_DRIVERS_AUTOTUNE + tristate diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile new file mode 100644 index 0000000..12ba6cf --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile @@ -0,0 +1,4 @@ + +obj-$(CONFIG_XENO_DRIVERS_AUTOTUNE) += xeno_autotune.o + +xeno_autotune-y := autotune.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c new file mode 100644 index 0000000..d9208f2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c @@ -0,0 +1,820 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/atomic.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/sort.h> +#include <cobalt/kernel/arith.h> +#include <rtdm/driver.h> +#include <rtdm/autotune.h> + +MODULE_DESCRIPTION("Xenomai/cobalt core clock autotuner"); +MODULE_AUTHOR("Philippe Gerum <rpm@xenomai.org>"); +MODULE_LICENSE("GPL"); + +/* Auto-tuning services for the Cobalt core clock. */ + +#define SAMPLING_TIME 500000000UL +#define ADJUSTMENT_STEP 500 +#define WARMUP_STEPS 10 +#define AUTOTUNE_STEPS 40 + +#define progress(__tuner, __fmt, __args...) \ + do { \ + if (!(__tuner)->quiet) \ + printk(XENO_INFO "autotune(%s) " __fmt "\n", \ + (__tuner)->name, ##__args); \ + } while (0) + +struct tuning_score { + int pmean; + int stddev; + int minlat; + unsigned int step; + unsigned int gravity; +}; + +struct tuner_state { + xnticks_t ideal; + xnticks_t step; + int min_lat; + int max_lat; + int prev_mean; + long long prev_sqs; + long long cur_sqs; + unsigned int sum; + unsigned int cur_samples; + unsigned int max_samples; +}; + +struct gravity_tuner { + const char *name; + unsigned int (*get_gravity)(struct gravity_tuner *tuner); + void (*set_gravity)(struct gravity_tuner *tuner, unsigned int gravity); + unsigned int (*adjust_gravity)(struct gravity_tuner *tuner, int adjust); + int (*init_tuner)(struct gravity_tuner *tuner); + int (*start_tuner)(struct gravity_tuner *tuner, xnticks_t start_time, + xnticks_t interval); + void (*destroy_tuner)(struct gravity_tuner *tuner); + struct tuner_state state; + rtdm_event_t done; + int status; + int quiet; + struct tuning_score scores[AUTOTUNE_STEPS]; + int nscores; + atomic_t refcount; +}; + +struct irq_gravity_tuner { + rtdm_timer_t timer; + struct gravity_tuner tuner; +}; + +struct kthread_gravity_tuner { + rtdm_task_t task; + rtdm_event_t barrier; + xnticks_t start_time; + xnticks_t interval; + struct gravity_tuner tuner; +}; + +struct uthread_gravity_tuner { + rtdm_timer_t timer; + rtdm_event_t pulse; + struct gravity_tuner tuner; +}; + +struct autotune_context { + struct gravity_tuner *tuner; + struct autotune_setup setup; + rtdm_lock_t tuner_lock; +}; + +static inline void init_tuner(struct gravity_tuner *tuner) +{ + rtdm_event_init(&tuner->done, 0); + tuner->status = 0; + atomic_set(&tuner->refcount, 0); +} + +static inline void destroy_tuner(struct gravity_tuner *tuner) +{ + rtdm_event_destroy(&tuner->done); +} + +static inline void done_sampling(struct gravity_tuner *tuner, + int status) +{ + tuner->status = status; + rtdm_event_signal(&tuner->done); +} + +static int add_sample(struct gravity_tuner *tuner, xnticks_t timestamp) +{ + struct tuner_state *state; + int n, delta, cur_mean; + + state = &tuner->state; + + delta = (int)(timestamp - state->ideal); + if (delta < state->min_lat) + state->min_lat = delta; + if (delta > state->max_lat) + state->max_lat = delta; + if (delta < 0) + delta = 0; + + state->sum += delta; + state->ideal += state->step; + n = ++state->cur_samples; + + /* + * Knuth citing Welford in TAOCP (Vol 2), single-pass + * computation of variance using a recurrence relation. + */ + if (n == 1) + state->prev_mean = delta; + else { + cur_mean = state->prev_mean + (delta - state->prev_mean) / n; + state->cur_sqs = state->prev_sqs + (delta - state->prev_mean) + * (delta - cur_mean); + state->prev_mean = cur_mean; + state->prev_sqs = state->cur_sqs; + } + + if (n >= state->max_samples) { + done_sampling(tuner, 0); + return 1; /* Finished. */ + } + + return 0; /* Keep going. */ +} + +static void timer_handler(rtdm_timer_t *timer) +{ + struct irq_gravity_tuner *irq_tuner; + xnticks_t now; + + irq_tuner = container_of(timer, struct irq_gravity_tuner, timer); + now = xnclock_read_raw(&nkclock); + + if (add_sample(&irq_tuner->tuner, now)) + rtdm_timer_stop_in_handler(timer); +} + +static int init_irq_tuner(struct gravity_tuner *tuner) +{ + struct irq_gravity_tuner *irq_tuner; + int ret; + + irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner); + ret = rtdm_timer_init(&irq_tuner->timer, timer_handler, "autotune"); + if (ret) + return ret; + + init_tuner(tuner); + + return 0; +} + +static void destroy_irq_tuner(struct gravity_tuner *tuner) +{ + struct irq_gravity_tuner *irq_tuner; + + irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner); + rtdm_timer_destroy(&irq_tuner->timer); + destroy_tuner(tuner); +} + +static unsigned int get_irq_gravity(struct gravity_tuner *tuner) +{ + return nkclock.gravity.irq; +} + +static void set_irq_gravity(struct gravity_tuner *tuner, unsigned int gravity) +{ + nkclock.gravity.irq = gravity; +} + +static unsigned int adjust_irq_gravity(struct gravity_tuner *tuner, int adjust) +{ + return nkclock.gravity.irq += adjust; +} + +static int start_irq_tuner(struct gravity_tuner *tuner, + xnticks_t start_time, xnticks_t interval) +{ + struct irq_gravity_tuner *irq_tuner; + + irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner); + + return rtdm_timer_start(&irq_tuner->timer, start_time, + interval, RTDM_TIMERMODE_ABSOLUTE); +} + +struct irq_gravity_tuner irq_tuner = { + .tuner = { + .name = "irqhand", + .init_tuner = init_irq_tuner, + .destroy_tuner = destroy_irq_tuner, + .get_gravity = get_irq_gravity, + .set_gravity = set_irq_gravity, + .adjust_gravity = adjust_irq_gravity, + .start_tuner = start_irq_tuner, + }, +}; + +void task_handler(void *arg) +{ + struct kthread_gravity_tuner *k_tuner = arg; + xnticks_t now; + int ret = 0; + + for (;;) { + if (rtdm_task_should_stop()) + break; + + ret = rtdm_event_wait(&k_tuner->barrier); + if (ret) + break; + + ret = rtdm_task_set_period(&k_tuner->task, k_tuner->start_time, + k_tuner->interval); + if (ret) + break; + + for (;;) { + ret = rtdm_task_wait_period(NULL); + if (ret && ret != -ETIMEDOUT) + goto out; + + now = xnclock_read_raw(&nkclock); + if (add_sample(&k_tuner->tuner, now)) { + rtdm_task_set_period(&k_tuner->task, 0, 0); + break; + } + } + } +out: + done_sampling(&k_tuner->tuner, ret); + rtdm_task_destroy(&k_tuner->task); +} + +static int init_kthread_tuner(struct gravity_tuner *tuner) +{ + struct kthread_gravity_tuner *k_tuner; + + init_tuner(tuner); + k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner); + rtdm_event_init(&k_tuner->barrier, 0); + + return rtdm_task_init(&k_tuner->task, "autotune", + task_handler, k_tuner, + RTDM_TASK_HIGHEST_PRIORITY, 0); +} + +static void destroy_kthread_tuner(struct gravity_tuner *tuner) +{ + struct kthread_gravity_tuner *k_tuner; + + k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner); + rtdm_task_destroy(&k_tuner->task); + rtdm_event_destroy(&k_tuner->barrier); +} + +static unsigned int get_kthread_gravity(struct gravity_tuner *tuner) +{ + return nkclock.gravity.kernel; +} + +static void set_kthread_gravity(struct gravity_tuner *tuner, unsigned int gravity) +{ + nkclock.gravity.kernel = gravity; +} + +static unsigned int adjust_kthread_gravity(struct gravity_tuner *tuner, int adjust) +{ + return nkclock.gravity.kernel += adjust; +} + +static int start_kthread_tuner(struct gravity_tuner *tuner, + xnticks_t start_time, xnticks_t interval) +{ + struct kthread_gravity_tuner *k_tuner; + + k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner); + + k_tuner->start_time = start_time; + k_tuner->interval = interval; + rtdm_event_signal(&k_tuner->barrier); + + return 0; +} + +struct kthread_gravity_tuner kthread_tuner = { + .tuner = { + .name = "kthread", + .init_tuner = init_kthread_tuner, + .destroy_tuner = destroy_kthread_tuner, + .get_gravity = get_kthread_gravity, + .set_gravity = set_kthread_gravity, + .adjust_gravity = adjust_kthread_gravity, + .start_tuner = start_kthread_tuner, + }, +}; + +static void pulse_handler(rtdm_timer_t *timer) +{ + struct uthread_gravity_tuner *u_tuner; + + u_tuner = container_of(timer, struct uthread_gravity_tuner, timer); + rtdm_event_signal(&u_tuner->pulse); +} + +static int init_uthread_tuner(struct gravity_tuner *tuner) +{ + struct uthread_gravity_tuner *u_tuner; + int ret; + + u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner); + ret = rtdm_timer_init(&u_tuner->timer, pulse_handler, "autotune"); + if (ret) + return ret; + + xntimer_set_gravity(&u_tuner->timer, XNTIMER_UGRAVITY); /* gasp... */ + rtdm_event_init(&u_tuner->pulse, 0); + init_tuner(tuner); + + return 0; +} + +static void destroy_uthread_tuner(struct gravity_tuner *tuner) +{ + struct uthread_gravity_tuner *u_tuner; + + u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner); + rtdm_timer_destroy(&u_tuner->timer); + rtdm_event_destroy(&u_tuner->pulse); +} + +static unsigned int get_uthread_gravity(struct gravity_tuner *tuner) +{ + return nkclock.gravity.user; +} + +static void set_uthread_gravity(struct gravity_tuner *tuner, unsigned int gravity) +{ + nkclock.gravity.user = gravity; +} + +static unsigned int adjust_uthread_gravity(struct gravity_tuner *tuner, int adjust) +{ + return nkclock.gravity.user += adjust; +} + +static int start_uthread_tuner(struct gravity_tuner *tuner, + xnticks_t start_time, xnticks_t interval) +{ + struct uthread_gravity_tuner *u_tuner; + + u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner); + + return rtdm_timer_start(&u_tuner->timer, start_time, + interval, RTDM_TIMERMODE_ABSOLUTE); +} + +static int add_uthread_sample(struct gravity_tuner *tuner, + nanosecs_abs_t user_timestamp) +{ + struct uthread_gravity_tuner *u_tuner; + int ret; + + u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner); + + if (user_timestamp && + add_sample(tuner, xnclock_ns_to_ticks(&nkclock, user_timestamp))) { + rtdm_timer_stop(&u_tuner->timer); + /* Tell the caller to park until next round. */ + ret = -EPIPE; + } else + ret = rtdm_event_wait(&u_tuner->pulse); + + return ret; +} + +struct uthread_gravity_tuner uthread_tuner = { + .tuner = { + .name = "uthread", + .init_tuner = init_uthread_tuner, + .destroy_tuner = destroy_uthread_tuner, + .get_gravity = get_uthread_gravity, + .set_gravity = set_uthread_gravity, + .adjust_gravity = adjust_uthread_gravity, + .start_tuner = start_uthread_tuner, + }, +}; + +static inline void build_score(struct gravity_tuner *tuner, int step) +{ + struct tuner_state *state = &tuner->state; + unsigned int variance, n; + + n = state->cur_samples; + tuner->scores[step].pmean = state->sum / n; + variance = n > 1 ? xnarch_llimd(state->cur_sqs, 1, n - 1) : 0; + tuner->scores[step].stddev = int_sqrt(variance); + tuner->scores[step].minlat = state->min_lat; + tuner->scores[step].gravity = tuner->get_gravity(tuner); + tuner->scores[step].step = step; + tuner->nscores++; +} + +static int cmp_score_mean(const void *c, const void *r) +{ + const struct tuning_score *sc = c, *sr = r; + return sc->pmean - sr->pmean; +} + +static int cmp_score_stddev(const void *c, const void *r) +{ + const struct tuning_score *sc = c, *sr = r; + return sc->stddev - sr->stddev; +} + +static int cmp_score_minlat(const void *c, const void *r) +{ + const struct tuning_score *sc = c, *sr = r; + return sc->minlat - sr->minlat; +} + +static int cmp_score_gravity(const void *c, const void *r) +{ + const struct tuning_score *sc = c, *sr = r; + return sc->gravity - sr->gravity; +} + +static int filter_mean(struct gravity_tuner *tuner) +{ + sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score), + cmp_score_mean, NULL); + + /* Top half of the best pondered means. */ + + return (tuner->nscores + 1) / 2; +} + +static int filter_stddev(struct gravity_tuner *tuner) +{ + sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score), + cmp_score_stddev, NULL); + + /* Top half of the best standard deviations. */ + + return (tuner->nscores + 1) / 2; +} + +static int filter_minlat(struct gravity_tuner *tuner) +{ + sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score), + cmp_score_minlat, NULL); + + /* Top half of the minimum latencies. */ + + return (tuner->nscores + 1) / 2; +} + +static int filter_gravity(struct gravity_tuner *tuner) +{ + sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score), + cmp_score_gravity, NULL); + + /* Smallest gravity required among the shortest latencies. */ + + return tuner->nscores; +} + +static void dump_scores(struct gravity_tuner *tuner) +{ + int n; + + if (tuner->quiet) + return; + + for (n = 0; n < tuner->nscores; n++) + printk(KERN_INFO + ".. S%.2d pmean=%Ld stddev=%Lu minlat=%Lu gravity=%Lu\n", + tuner->scores[n].step, + xnclock_ticks_to_ns(&nkclock, tuner->scores[n].pmean), + xnclock_ticks_to_ns(&nkclock, tuner->scores[n].stddev), + xnclock_ticks_to_ns(&nkclock, tuner->scores[n].minlat), + xnclock_ticks_to_ns(&nkclock, tuner->scores[n].gravity)); +} + +static inline void filter_score(struct gravity_tuner *tuner, + int (*filter)(struct gravity_tuner *tuner)) +{ + tuner->nscores = filter(tuner); + dump_scores(tuner); +} + +static int tune_gravity(struct gravity_tuner *tuner, int period) +{ + struct tuner_state *state = &tuner->state; + int ret, step, gravity_limit, adjust; + unsigned int orig_gravity; + + state->step = xnclock_ns_to_ticks(&nkclock, period); + state->max_samples = SAMPLING_TIME / (period ?: 1); + orig_gravity = tuner->get_gravity(tuner); + tuner->set_gravity(tuner, 0); + tuner->nscores = 0; + /* Gravity adjustment step */ + adjust = xnclock_ns_to_ticks(&nkclock, ADJUSTMENT_STEP) ?: 1; + gravity_limit = 0; + progress(tuner, "warming up..."); + + for (step = 0; step < WARMUP_STEPS + AUTOTUNE_STEPS; step++) { + state->ideal = xnclock_read_raw(&nkclock) + state->step * WARMUP_STEPS; + state->min_lat = xnclock_ns_to_ticks(&nkclock, SAMPLING_TIME); + state->max_lat = 0; + state->prev_mean = 0; + state->prev_sqs = 0; + state->cur_sqs = 0; + state->sum = 0; + state->cur_samples = 0; + + ret = tuner->start_tuner(tuner, + xnclock_ticks_to_ns(&nkclock, state->ideal), + period); + if (ret) + goto fail; + + /* Tuner stops when posting. */ + ret = rtdm_event_wait(&tuner->done); + if (ret) + goto fail; + + ret = tuner->status; + if (ret) + goto fail; + + if (step < WARMUP_STEPS) { + if (state->min_lat > gravity_limit) { + gravity_limit = state->min_lat; + progress(tuner, "gravity limit set to %Lu ns (%d)", + xnclock_ticks_to_ns(&nkclock, gravity_limit), state->min_lat); + } + continue; + } + + /* + * We should not be early by more than the gravity + * value minus one tick, to account for the rounding + * error involved when the timer frequency is lower + * than 1e9 / ADJUSTMENT_STEP. + */ + if (state->min_lat < 0) { + if (tuner->get_gravity(tuner) < -state->min_lat - 1) { + printk(XENO_WARNING + "autotune(%s) failed with early shot (%Ld ns)\n", + tuner->name, + xnclock_ticks_to_ns(&nkclock, + -(tuner->get_gravity(tuner) + + state->min_lat))); + ret = -EAGAIN; + goto fail; + } + break; + } + + if (((step - WARMUP_STEPS) % 5) == 0) + progress(tuner, "calibrating... (slice %d)", + (step - WARMUP_STEPS) / 5 + 1); + + build_score(tuner, step - WARMUP_STEPS); + + /* + * Anticipating by more than the minimum latency + * detected at warmup would make no sense: cap the + * gravity we may try. + */ + if (tuner->adjust_gravity(tuner, adjust) > gravity_limit) { + progress(tuner, "beyond gravity limit at %Lu ns", + xnclock_ticks_to_ns(&nkclock, + tuner->get_gravity(tuner))); + break; + } + } + + progress(tuner, "calibration scores"); + dump_scores(tuner); + progress(tuner, "pondered mean filter"); + filter_score(tuner, filter_mean); + progress(tuner, "standard deviation filter"); + filter_score(tuner, filter_stddev); + progress(tuner, "minimum latency filter"); + filter_score(tuner, filter_minlat); + progress(tuner, "gravity filter"); + filter_score(tuner, filter_gravity); + tuner->set_gravity(tuner, tuner->scores[0].gravity); + + return 0; +fail: + tuner->set_gravity(tuner, orig_gravity); + + return ret; +} + +static int autotune_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void *arg) +{ + struct autotune_context *context; + struct autotune_setup setup; + struct gravity_tuner *tuner, *old_tuner; + rtdm_lockctx_t lock_ctx; + int ret; + + switch (request) { + case AUTOTUNE_RTIOC_RESET: + xnclock_reset_gravity(&nkclock); + return 0; + case AUTOTUNE_RTIOC_IRQ: + tuner = &irq_tuner.tuner; + break; + case AUTOTUNE_RTIOC_KERN: + tuner = &kthread_tuner.tuner; + break; + case AUTOTUNE_RTIOC_USER: + tuner = &uthread_tuner.tuner; + break; + default: + return -ENOSYS; + } + + ret = rtdm_copy_from_user(fd, &setup, arg, sizeof(setup)); + if (ret) + return ret; + + ret = tuner->init_tuner(tuner); + if (ret) + return ret; + + context = rtdm_fd_to_private(fd); + + rtdm_lock_get_irqsave(&context->tuner_lock, lock_ctx); + + old_tuner = context->tuner; + if (old_tuner && atomic_read(&old_tuner->refcount) > 0) { + rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx); + tuner->destroy_tuner(tuner); + return -EBUSY; + } + + context->tuner = tuner; + context->setup = setup; + + rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx); + + if (old_tuner) + old_tuner->destroy_tuner(old_tuner); + + if (setup.quiet <= 1) + printk(XENO_INFO "autotune(%s) started\n", tuner->name); + + return ret; +} + +static int autotune_ioctl_rt(struct rtdm_fd *fd, unsigned int request, void *arg) +{ + struct autotune_context *context; + struct gravity_tuner *tuner; + rtdm_lockctx_t lock_ctx; + __u64 timestamp; + __u32 gravity; + int ret; + + context = rtdm_fd_to_private(fd); + + rtdm_lock_get_irqsave(&context->tuner_lock, lock_ctx); + + tuner = context->tuner; + if (tuner) + atomic_inc(&tuner->refcount); + + rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx); + + if (tuner == NULL) + return -ENOSYS; + + switch (request) { + case AUTOTUNE_RTIOC_RUN: + tuner->quiet = context->setup.quiet; + ret = tune_gravity(tuner, context->setup.period); + if (ret) + break; + gravity = xnclock_ticks_to_ns(&nkclock, + tuner->get_gravity(tuner)); + ret = rtdm_safe_copy_to_user(fd, arg, &gravity, + sizeof(gravity)); + break; + case AUTOTUNE_RTIOC_PULSE: + if (tuner != &uthread_tuner.tuner) { + ret = -EINVAL; + break; + } + ret = rtdm_safe_copy_from_user(fd, ×tamp, arg, + sizeof(timestamp)); + if (ret) + break; + ret = add_uthread_sample(tuner, timestamp); + break; + default: + ret = -ENOSYS; + } + + atomic_dec(&tuner->refcount); + + return ret; +} + +static int autotune_open(struct rtdm_fd *fd, int oflags) +{ + struct autotune_context *context; + + context = rtdm_fd_to_private(fd); + context->tuner = NULL; + rtdm_lock_init(&context->tuner_lock); + + return 0; +} + +static void autotune_close(struct rtdm_fd *fd) +{ + struct autotune_context *context; + struct gravity_tuner *tuner; + + context = rtdm_fd_to_private(fd); + tuner = context->tuner; + if (tuner) { + if (context->setup.quiet <= 1) + printk(XENO_INFO "autotune finished [%Lui/%Luk/%Luu]\n", + xnclock_ticks_to_ns(&nkclock, + xnclock_get_gravity(&nkclock, irq)), + xnclock_ticks_to_ns(&nkclock, + xnclock_get_gravity(&nkclock, kernel)), + xnclock_ticks_to_ns(&nkclock, + xnclock_get_gravity(&nkclock, user))); + tuner->destroy_tuner(tuner); + } +} + +static struct rtdm_driver autotune_driver = { + .profile_info = RTDM_PROFILE_INFO(autotune, + RTDM_CLASS_AUTOTUNE, + RTDM_SUBCLASS_AUTOTUNE, + 0), + .device_flags = RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE, + .device_count = 1, + .context_size = sizeof(struct autotune_context), + .ops = { + .open = autotune_open, + .ioctl_rt = autotune_ioctl_rt, + .ioctl_nrt = autotune_ioctl_nrt, + .close = autotune_close, + }, +}; + +static struct rtdm_device device = { + .driver = &autotune_driver, + .label = "autotune", +}; + +static int __init autotune_init(void) +{ + return rtdm_dev_register(&device); +} + +static void __exit autotune_exit(void) +{ + rtdm_dev_unregister(&device); +} + +module_init(autotune_init); +module_exit(autotune_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS b/kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS new file mode 100644 index 0000000..88c60ce --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS @@ -0,0 +1,37 @@ +The RT-Socket-CAN project is based on the SJA1000 socket-based CAN +driver for RTDM by Sebastian Smolorz [1]. Other parts are from the RTnet +project [2], especially the device interface, the RTDM serial device +driver and profile of Xenomai [3] and from other Open Source CAN driver +projects like PCAN [4], the linux-can.patch [5] and Socket-CAN [6]. + + +RT-Socket-CAN development team: + +Wolfgang Grandegger <wg@grandegger.com> +Jan Kiszka <kiszka@rts.uni-hannover.de> +Sebastian Smolorz <sebastian.smolorz@stud.uni-hannover.de> + + +[1] http://www.rts.uni-hannover.de/rtaddon/RTDM_CAN_Device_Profile_Doc/index.html +[2] http://www.rtnet.org +[3] http://www.xenomai.org +[4] http://www.peak-system.com/linux/ +[5] http://marc.theaimsgroup.com/?t=111088094000003&r=1&w=2 +[6] http://developer.berlios.de/projects/socketcan/ + +This file is an attempt to give proper credit to the people who have +contributed to this project so far. List entries are sorted by name +and provide the usual tags for automated processing. + +N: Wolfgang Grandegger +E: wg@grandegger.com +D: Core development. + +N: Jan Kiszka +E: kiszka@rts.uni-hannover.de +D: Core development. + +N: Sebastian Smolorz +E: sebastian.smolorz@stud.uni-hannover.de +D: Core development. + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig new file mode 100644 index 0000000..1c05549 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig @@ -0,0 +1,92 @@ +menu "CAN drivers" + +config XENO_DRIVERS_CAN + tristate "RT-Socket-CAN, CAN raw socket interface" + help + RT-Socket-CAN is a real-time socket interface for CAN controllers. + +config XENO_DRIVERS_CAN_DEBUG + depends on XENO_DRIVERS_CAN && PROC_FS + bool "Enable debug output" + default y + help + + This option activates debugging checks and enhanced output for the + RT-Socket-CAN driver. It also allows to list the hardware registers + of the registered CAN controllers. It is a recommended option for + getting started and analysing potential problems. For production + purposes, it should be switched off (for the sake of latency). + +config XENO_DRIVERS_CAN_LOOPBACK + depends on XENO_DRIVERS_CAN + bool "Enable TX loopback to local sockets" + default n + help + + This options adds support for TX loopback to local sockets. Normally, + messages sent to the CAN bus are not visible to sockets listening to + the same local device. When this option is enabled, TX messages are + looped back locally when the transmit has been done by default. This + behaviour can be deactivated or reactivated with "setsockopt". Enable + this option, if you want to have a "net-alike" behaviour. + +config XENO_DRIVERS_CAN_RXBUF_SIZE + depends on XENO_DRIVERS_CAN + int "Size of receive ring buffers (must be 2^N)" + default 1024 + +config XENO_DRIVERS_CAN_MAX_DEVICES + depends on XENO_DRIVERS_CAN + int "Maximum number of devices" + default 4 + +config XENO_DRIVERS_CAN_MAX_RECEIVERS + depends on XENO_DRIVERS_CAN + int "Maximum number of receive filters per device" + default 16 + help + + The driver maintains a receive filter list per device for fast access. + +config XENO_DRIVERS_CAN_BUS_ERR + depends on XENO_DRIVERS_CAN + bool + default n + help + + To avoid unnecessary bus error interrupt flooding, this option enables + bus error interrupts when an application is calling a receive function + on a socket listening on bus errors. After one bus error has occured, + the interrupt will be disabled to allow the application time for error + processing. This option is automatically selected for CAN controllers + supporting bus error interrupts like the SJA1000. + +config XENO_DRIVERS_CAN_CALC_BITTIME_OLD + depends on XENO_DRIVERS_CAN + bool "Old bit-time calculation algorithm (deprecated)" + default n + help + + This option allows to enable the old algorithm to calculate the + CAN bit-timing parameters for backward compatibility. + +config XENO_DRIVERS_CAN_VIRT + depends on XENO_DRIVERS_CAN + tristate "Virtual CAN bus driver" + help + + This driver provides two CAN ports that are virtually interconnected. + More ports can be enabled with the module parameter "devices". + +config XENO_DRIVERS_CAN_FLEXCAN + depends on XENO_DRIVERS_CAN && OF && !XENO_DRIVERS_CAN_CALC_BITTIME_OLD + tristate "Freescale FLEXCAN based chips" + help + + Say Y here if you want to support for Freescale FlexCAN. + +source "drivers/xenomai/can/mscan/Kconfig" +source "drivers/xenomai/can/peak_canfd/Kconfig" +source "drivers/xenomai/can/sja1000/Kconfig" + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile new file mode 100644 index 0000000..f78f6af --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile @@ -0,0 +1,10 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/can + +obj-$(CONFIG_XENO_DRIVERS_CAN) += xeno_can.o mscan/ sja1000/ peak_canfd/ +obj-$(CONFIG_XENO_DRIVERS_CAN_FLEXCAN) += xeno_can_flexcan.o +obj-$(CONFIG_XENO_DRIVERS_CAN_VIRT) += xeno_can_virt.o + +xeno_can-y := rtcan_dev.o rtcan_socket.o rtcan_module.o rtcan_raw.o rtcan_raw_dev.o rtcan_raw_filter.o +xeno_can_virt-y := rtcan_virt.o +xeno_can_flexcan-y := rtcan_flexcan.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/README b/kernel/xenomai-v3.2.4/kernel/drivers/can/README new file mode 100644 index 0000000..cb0ef37 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/README @@ -0,0 +1,143 @@ +RT-Socket-CAN - RTDM driver for CAN devices +=========================================== + +RT-Socket-CAN is an Open Source hard real-time protocol stack for CAN +devices based on BSD sockets. This implementation is for RTDM, the +Real-Time-Driver-Model. Note that there is a similar variant being +developed for standard Linux using the Linux networking stack. + + +Status: +------ + +Currently drivers are available for the following CAN controllers and +devices: + + SJA1000 ISA devices + SJA1000 Memory-mapped devices + SJA1000 esd EPPC405 embedded controller and CPCI405 boards + SJA1000 PEAK PCI card + SJA1000 PEAK parallel port Dongle + SJA1000 IXXAT PCI card + MSCAN for MPC5200 boards + +Utilities for RT-Socket-CAN are available in "src/utils/can". + + +Installation: +------------ + +This example installation is for the DENX "linuxppc_2_4_devel" tree +(Linux 2.4.25) using the ELDK (see http://www.denx.de). It works in a +similar way for other kernels and distributions including Linux 2.6. + + +o Kernel space part: + + - Please install the Xenomai kernel space part as described in the + README.INSTALL. + + - Configure RT-Socket-CAN as kernel modules as required by your + hardware (and make sure that loadable module support is enabled): + + $ cd <linux-kernel-root> + $ export CROSS_COMPILE=ppc_82xx- + $ make menuconfig + ... Select "Loadable module support --->" + [*] Enable loadable module support + ... Exit + ... Select "Real-time sub-system --->" + "Real-time drivers --->" + "CAN bus controller --->" + [M] RT-Socket-CAN, CAN raw socket interface (NEW) + (1024) Size of receive ring buffers (must be 2^N) (NEW) + (4) Maximum number of devices (NEW) + (16) Maximum number of receive filters per device (NEW) + [M] MSCAN driver for MPC5200 (NEW) + [*] Enable CAN 1 (NEW) + [*] Enable CAN 2 (NEW) + (66000000) Clock Frequency in Hz (NEW) + (I2C1/TMR01) Pin Configuration + <M> Philips SJA1000 CAN controller (NEW) + <M> Standard ISA devices + (4) Maximum number of ISA devices (NEW) + <M> PEAK PCI cards + ... Exit and save + + Note: you can also statically link the MSCAN drivers into + the kernel. + + + - Make the Linux kernel and RT-Socket-CAN modules and copy them to + the root filesystem: + + $ make dep + $ make uImage + $ cp -p arch/ppc/boot/images/uImage /tftpboot/icecube/uImage-rtcan + $ make modules + $ export DESTDIR=/opt/eldk/ppc_82xx + $ make modules_install INSTALL_MOD_PATH=$DESTDIR + $ find $DESTDIR/lib/modules/2.4.25/kernel/drivers/xenomai/rtcan + .../rtcan + .../rtcan/xeno_can.o + .../rtcan/mscan + .../rtcan/mscan/xeno_can_mscan.o + .../rtcan/sja1000/xeno_can_sja1000.o + .../rtcan/sja1000/xeno_can_peak_pci.o + .../rtcan/sja1000/xeno_can_isa.o + + - Loading the RT-Socket-CAN modules + + Now boot the Xenomai enabled kernel on your target system. + + In case RT-Socket-CAN is built as kernel modules, you need to load + them using modprobe or insmod, e.g. for this example build: + + # export MODDIR=/lib/modules/2.4.25/kernel/drivers/xenomai/rtcan + # insmod $MODDIR/xeno_can.o + # insmod $MODDIR/mscan/xeno_can_mscan.o + # insmod $MODDIR/sja1000/xeno_can_sja1000.o + # insmod $MODDIR/sja1000/xeno_can_peak_pci.o + + Note that various kernel module parameters can be passed with + insmod. Please use "modinfo" to list them or check the + corresponding source code files for further information. + + +o User space part: + + - User space CAN utilities for RT-Socket-CAN are available in + "src/utils/can". Please check the README in there for further + information. + + +Documentation: +------------- + +The RTDM CAN profile is documented at +http://www.xenomai.org/documentation/xenomai-3/html/xeno3prm/group__rtdm__can.html + +Feedback: +-------- + +Please report Xenomai related bugs and comments to the Xenomai mailing +list (xenomai@xenomai.org). + +Please report CAN related bugs and comments to the "Socketcan" mailing +list (Socketcan-users@lists.berlios.de) or directly to the main authors +Wolfgang Grandegger (wg@grandegger.com) or Sebastian Smolorz +(Sebastian.Smolorz@stud.uni-hannover.de). + + +Credits: +------- + +See CREDITS file in this directory. + + +License: +------- + +RT-Socket-CAN is free software, and you are welcome to redistribute it +under the terms of the GNU General Public License. This program comes +with ABSOLUTELY NO WARRANTY. See "COPYING" for details. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig new file mode 100644 index 0000000..dfbf5af --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig @@ -0,0 +1,8 @@ +config XENO_DRIVERS_CAN_MSCAN + depends on XENO_DRIVERS_CAN && (PPC_MPC52xx || PPC_MPC512x) + tristate "MSCAN driver for MPC52xx and MPC512x" + default n + help + + This driver is for the MSCAN on the MPC5200 and MPC512x processor + from Freescale. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile new file mode 100644 index 0000000..0f157e9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile @@ -0,0 +1,6 @@ + +ccflags-y += -I$(srctree)/drivers/xenomai/can -I$(srctree)/drivers/xenomai/can/mscan + +obj-$(CONFIG_XENO_DRIVERS_CAN_MSCAN) += xeno_can_mscan.o + +xeno_can_mscan-y := rtcan_mscan.o rtcan_mscan_proc.o rtcan_mscan_mpc5xxx.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c new file mode 100644 index 0000000..da573ab --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c @@ -0,0 +1,798 @@ +/* + * Copyright (C) 2006-2010 Wolfgang Grandegger <wg@grandegger.com> + * + * Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Derived from the PCAN project file driver/src/pcan_mpc5200.c: + * + * Copyright (c) 2003 Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * Copyright (c) 2005 Felix Daners, Plugit AG, felix.daners@plugit.ch + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include "rtcan_dev.h" +#include "rtcan_raw.h" +#include "rtcan_internal.h" +#include "rtcan_mscan_regs.h" +#include "rtcan_mscan.h" + +#define MSCAN_SET_MODE_RETRIES 255 + +#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD +static struct can_bittiming_const mscan_bittiming_const = { + .name = "mscan", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; +#endif + +/** + * Reception Interrupt handler + * + * Inline function first called within @ref rtcan_mscan_interrupt when an RX + * interrupt was detected. Here the HW registers are read out and composed + * to a struct rtcan_skb. + * + * @param[out] skb Pointer to an instance of struct rtcan_skb which will be + * filled with received CAN message + * @param[in] dev Device ID + */ +static inline void rtcan_mscan_rx_interrupt(struct rtcan_device *dev, + struct rtcan_skb *skb) +{ + int i; + unsigned char size; + struct rtcan_rb_frame *frame = &skb->rb_frame; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE; + + frame->can_dlc = in_8(®s->canrxfg.dlr) & 0x0F; + + /* If DLC exceeds 8 bytes adjust it to 8 (for the payload size) */ + size = (frame->can_dlc > 8) ? 8 : frame->can_dlc; + + if (in_8(®s->canrxfg.idr[1]) & MSCAN_BUF_EXTENDED) { + frame->can_id = ((in_8(®s->canrxfg.idr[0]) << 21) | + ((in_8(®s->canrxfg.idr[1]) & 0xE0) << 13) | + ((in_8(®s->canrxfg.idr[1]) & 0x07) << 15) | + (in_8(®s->canrxfg.idr[4]) << 7) | + (in_8(®s->canrxfg.idr[5]) >> 1)); + + frame->can_id |= CAN_EFF_FLAG; + + if ((in_8(®s->canrxfg.idr[5]) & MSCAN_BUF_EXT_RTR)) { + frame->can_id |= CAN_RTR_FLAG; + } else { + for (i = 0; i < size; i++) + frame->data[i] = + in_8(®s->canrxfg.dsr[i + + (i / 2) * 2]); + skb->rb_frame_size += size; + } + + } else { + frame->can_id = ((in_8(®s->canrxfg.idr[0]) << 3) | + (in_8(®s->canrxfg.idr[1]) >> 5)); + + if ((in_8(®s->canrxfg.idr[1]) & MSCAN_BUF_STD_RTR)) { + frame->can_id |= CAN_RTR_FLAG; + } else { + for (i = 0; i < size; i++) + frame->data[i] = + in_8(®s->canrxfg.dsr[i + + (i / 2) * 2]); + skb->rb_frame_size += size; + } + } + + + /* Store the interface index */ + frame->can_ifindex = dev->ifindex; +} + +static can_state_t mscan_stat_map[4] = { + CAN_STATE_ACTIVE, + CAN_STATE_BUS_WARNING, + CAN_STATE_BUS_PASSIVE, + CAN_STATE_BUS_OFF +}; + +static inline void rtcan_mscan_err_interrupt(struct rtcan_device *dev, + struct rtcan_skb *skb, + int r_status) +{ + u8 rstat, tstat; + struct rtcan_rb_frame *frame = &skb->rb_frame; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC; + + frame->can_id = CAN_ERR_FLAG; + frame->can_dlc = CAN_ERR_DLC; + + memset(&frame->data[0], 0, frame->can_dlc); + + if ((r_status & MSCAN_OVRIF)) { + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + } else if ((r_status & (MSCAN_CSCIF))) { + + rstat = (r_status & (MSCAN_TSTAT0 | + MSCAN_TSTAT1)) >> 2 & 0x3; + tstat = (r_status & (MSCAN_RSTAT0 | + MSCAN_RSTAT1)) >> 4 & 0x3; + dev->state = mscan_stat_map[max(rstat, tstat)]; + + switch (dev->state) { + case CAN_STATE_BUS_OFF: + /* Bus-off condition */ + frame->can_id |= CAN_ERR_BUSOFF; + dev->state = CAN_STATE_BUS_OFF; + /* Disable receiver interrupts */ + out_8(®s->canrier, 0); + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + break; + + case CAN_STATE_BUS_PASSIVE: + frame->can_id |= CAN_ERR_CRTL; + if (tstat > rstat) + frame->data[1] = CAN_ERR_CRTL_TX_PASSIVE; + else + frame->data[1] = CAN_ERR_CRTL_RX_PASSIVE; + break; + + case CAN_STATE_BUS_WARNING: + frame->can_id |= CAN_ERR_CRTL; + if (tstat > rstat) + frame->data[1] = CAN_ERR_CRTL_TX_WARNING; + else + frame->data[1] = CAN_ERR_CRTL_RX_WARNING; + break; + + default: + break; + + } + } + /* Store the interface index */ + frame->can_ifindex = dev->ifindex; +} + +/** Interrupt handler */ +static int rtcan_mscan_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtcan_skb skb; + struct rtcan_device *dev; + struct mscan_regs *regs; + u8 canrflg; + int recv_lock_free = 1; + int ret = RTDM_IRQ_NONE; + + + dev = (struct rtcan_device *)rtdm_irq_get_arg(irq_handle, void); + regs = (struct mscan_regs *)dev->base_addr; + + rtdm_lock_get(&dev->device_lock); + + canrflg = in_8(®s->canrflg); + + ret = RTDM_IRQ_HANDLED; + + /* Transmit Interrupt? */ + if ((in_8(®s->cantier) & MSCAN_TXIE0) && + (in_8(®s->cantflg) & MSCAN_TXE0)) { + out_8(®s->cantier, 0); + /* Wake up a sender */ + rtdm_sem_up(&dev->tx_sem); + dev->tx_count++; + + if (rtcan_loopback_pending(dev)) { + + if (recv_lock_free) { + recv_lock_free = 0; + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + } + + rtcan_loopback(dev); + } + } + + /* Wakeup interrupt? */ + if ((canrflg & MSCAN_WUPIF)) { + rtdm_printk("WUPIF interrupt\n"); + } + + /* Receive Interrupt? */ + if ((canrflg & MSCAN_RXF)) { + + /* Read out HW registers */ + rtcan_mscan_rx_interrupt(dev, &skb); + + /* Take more locks. Ensure that they are taken and + * released only once in the IRQ handler. */ + /* WARNING: Nested locks are dangerous! But they are + * nested only in this routine so a deadlock should + * not be possible. */ + if (recv_lock_free) { + recv_lock_free = 0; + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + } + + /* Pass received frame out to the sockets */ + rtcan_rcv(dev, &skb); + } + + /* Error Interrupt? */ + if ((canrflg & (MSCAN_CSCIF | MSCAN_OVRIF))) { + /* Check error condition and fill error frame */ + rtcan_mscan_err_interrupt(dev, &skb, canrflg); + + if (recv_lock_free) { + recv_lock_free = 0; + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + } + + /* Pass error frame out to the sockets */ + rtcan_rcv(dev, &skb); + } + + /* Acknowledge the handled interrupt within the controller. + * Only do so for the receiver interrupts. + */ + if (canrflg) + out_8(®s->canrflg, canrflg); + + if (!recv_lock_free) { + rtdm_lock_put(&rtcan_socket_lock); + rtdm_lock_put(&rtcan_recv_list_lock); + } + rtdm_lock_put(&dev->device_lock); + + return ret; +} + +/** + * Set controller into reset mode. Called from @ref rtcan_mscan_ioctl + * (main usage), init_module and cleanup_module. + * + * @param dev_id Device ID + * @param lock_ctx Pointer to saved IRQ context (if stored before calling + * this function). Only evaluated if @c locked is true. + * @param locked Boolean value indicating if function was called in an + * spin locked and IRQ disabled context + * + * @return 0 on success, otherwise: + * - -EAGAIN: Reset mode bit could not be verified after setting it. + * See also note. + * + * @note According to the MSCAN specification, it is necessary to check + * the reset mode bit in PeliCAN mode after having set it. So we do. But if + * using a ISA card like the PHYTEC eNET card this should not be necessary + * because the CAN controller clock of this card (16 MHz) is twice as high + * as the ISA bus clock. + */ +static int rtcan_mscan_mode_stop(struct rtcan_device *dev, + rtdm_lockctx_t *lock_ctx) +{ + int ret = 0; + int rinit = 0; + can_state_t state; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + u8 reg; + + state = dev->state; + /* If controller is not operating anyway, go out */ + if (!CAN_STATE_OPERATING(state)) + goto out; + + /* Switch to sleep mode */ + setbits8(®s->canctl0, MSCAN_SLPRQ); + reg = in_8(®s->canctl1); + while (!(reg & MSCAN_SLPAK) && + (rinit < MSCAN_SET_MODE_RETRIES)) { + if (likely(lock_ctx != NULL)) + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + /* Busy sleep 1 microsecond */ + rtdm_task_busy_sleep(1000); + if (likely(lock_ctx != NULL)) + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + rinit++; + reg = in_8(®s->canctl1); + } + /* + * The mscan controller will fail to enter sleep mode, + * while there are irregular activities on bus, like + * somebody keeps retransmitting. This behavior is + * undocumented and seems to differ between mscan built + * in mpc5200b and mpc5200. We proceed in that case, + * since otherwise the slprq will be kept set and the + * controller will get stuck. NOTE: INITRQ or CSWAI + * will abort all active transmit actions, if still + * any, at once. + */ + if (rinit >= MSCAN_SET_MODE_RETRIES) + rtdm_printk("rtcan_mscan: device failed to enter sleep mode. " + "We proceed anyhow.\n"); + else + dev->state = CAN_STATE_SLEEPING; + + rinit = 0; + setbits8(®s->canctl0, MSCAN_INITRQ); + + reg = in_8(®s->canctl1); + while (!(reg & MSCAN_INITAK) && + (rinit < MSCAN_SET_MODE_RETRIES)) { + if (likely(lock_ctx != NULL)) + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + /* Busy sleep 1 microsecond */ + rtdm_task_busy_sleep(1000); + if (likely(lock_ctx != NULL)) + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + rinit++; + reg = in_8(®s->canctl1); + } + if (rinit >= MSCAN_SET_MODE_RETRIES) + ret = -ENODEV; + + /* Volatile state could have changed while we slept busy. */ + dev->state = CAN_STATE_STOPPED; + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + +out: + return ret; +} + +/** + * Set controller into operating mode. + * + * Called from @ref rtcan_mscan_ioctl in spin locked and IRQ disabled + * context. + * + * @param dev_id Device ID + * @param lock_ctx Pointer to saved IRQ context (only used when coming + * from @ref CAN_STATE_SLEEPING, see also note) + * + * @return 0 on success, otherwise: + * - -EINVAL: No Baud rate set before request to set start mode + * + * @note If coming from @c CAN_STATE_SLEEPING, the controller must wait + * some time to avoid bus errors. Measured on an PHYTEC eNET card, + * this time was 110 microseconds. + */ +static int rtcan_mscan_mode_start(struct rtcan_device *dev, + rtdm_lockctx_t *lock_ctx) +{ + int ret = 0, retries = 0; + can_state_t state; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + + /* We won't forget that state in the device structure is volatile and + * access to it will not be optimized by the compiler. So ... */ + state = dev->state; + + switch (state) { + case CAN_STATE_ACTIVE: + case CAN_STATE_BUS_WARNING: + case CAN_STATE_BUS_PASSIVE: + break; + + case CAN_STATE_SLEEPING: + case CAN_STATE_STOPPED: + /* Set error active state */ + state = CAN_STATE_ACTIVE; + /* Set up sender "mutex" */ + rtdm_sem_init(&dev->tx_sem, 1); + + if ((dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)) { + setbits8(®s->canctl1, MSCAN_LISTEN); + } else { + clrbits8(®s->canctl1, MSCAN_LISTEN); + } + if ((dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)) { + setbits8(®s->canctl1, MSCAN_LOOPB); + } else { + clrbits8(®s->canctl1, MSCAN_LOOPB); + } + + /* Switch to normal mode */ + clrbits8(®s->canctl0, MSCAN_INITRQ); + clrbits8(®s->canctl0, MSCAN_SLPRQ); + while ((in_8(®s->canctl1) & MSCAN_INITAK) || + (in_8(®s->canctl1) & MSCAN_SLPAK)) { + if (likely(lock_ctx != NULL)) + rtdm_lock_put_irqrestore(&dev->device_lock, + *lock_ctx); + /* Busy sleep 1 microsecond */ + rtdm_task_busy_sleep(1000); + if (likely(lock_ctx != NULL)) + rtdm_lock_get_irqsave(&dev->device_lock, + *lock_ctx); + retries++; + } + /* Enable interrupts */ + setbits8(®s->canrier, MSCAN_RIER); + + break; + + case CAN_STATE_BUS_OFF: + /* Trigger bus-off recovery */ + out_8(®s->canrier, MSCAN_RIER); + /* Set up sender "mutex" */ + rtdm_sem_init(&dev->tx_sem, 1); + /* Set error active state */ + state = CAN_STATE_ACTIVE; + + break; + + default: + /* Never reached, but we don't want nasty compiler warnings */ + break; + } + /* Store new state in device structure (or old state) */ + dev->state = state; + + return ret; +} + +static int rtcan_mscan_set_bit_time(struct rtcan_device *dev, + struct can_bittime *bit_time, + rtdm_lockctx_t *lock_ctx) +{ + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + u8 btr0, btr1; + + switch (bit_time->type) { + case CAN_BITTIME_BTR: + btr0 = bit_time->btr.btr0; + btr1 = bit_time->btr.btr1; + break; + + case CAN_BITTIME_STD: + btr0 = (BTR0_SET_BRP(bit_time->std.brp) | + BTR0_SET_SJW(bit_time->std.sjw)); + btr1 = (BTR1_SET_TSEG1(bit_time->std.prop_seg + + bit_time->std.phase_seg1) | + BTR1_SET_TSEG2(bit_time->std.phase_seg2) | + BTR1_SET_SAM(bit_time->std.sam)); + break; + + default: + return -EINVAL; + } + + out_8(®s->canbtr0, btr0); + out_8(®s->canbtr1, btr1); + + rtdm_printk("%s: btr0=0x%02x btr1=0x%02x\n", dev->name, btr0, btr1); + + return 0; +} + +static int rtcan_mscan_set_mode(struct rtcan_device *dev, + can_mode_t mode, + rtdm_lockctx_t *lock_ctx) +{ + int ret = 0, retries = 0; + can_state_t state; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + + switch (mode) { + + case CAN_MODE_STOP: + ret = rtcan_mscan_mode_stop(dev, lock_ctx); + break; + + case CAN_MODE_START: + ret = rtcan_mscan_mode_start(dev, lock_ctx); + break; + + case CAN_MODE_SLEEP: + + state = dev->state; + + /* Controller must operate, otherwise go out */ + if (!CAN_STATE_OPERATING(state)) { + ret = -ENETDOWN; + goto mode_sleep_out; + } + + /* Is controller sleeping yet? If yes, go out */ + if (state == CAN_STATE_SLEEPING) + goto mode_sleep_out; + + /* Remember into which state to return when we + * wake up */ + dev->state_before_sleep = state; + state = CAN_STATE_SLEEPING; + + /* Let's take a nap. (Now I REALLY understand + * the meaning of interrupts ...) */ + out_8(®s->canrier, 0); + out_8(®s->cantier, 0); + setbits8(®s->canctl0, + MSCAN_SLPRQ /*| MSCAN_INITRQ*/ | MSCAN_WUPE); + while (!(in_8(®s->canctl1) & MSCAN_SLPAK)) { + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + /* Busy sleep 1 microsecond */ + rtdm_task_busy_sleep(1000); + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + if (retries++ >= 1000) + break; + } + rtdm_printk("Fallen asleep after %d tries.\n", retries); + clrbits8(®s->canctl0, MSCAN_INITRQ); + while ((in_8(®s->canctl1) & MSCAN_INITAK)) { + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + /* Busy sleep 1 microsecond */ + rtdm_task_busy_sleep(1000); + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + if (retries++ >= 1000) + break; + } + rtdm_printk("Back to normal after %d tries.\n", retries); + out_8(®s->canrier, MSCAN_WUPIE); + + mode_sleep_out: + dev->state = state; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +/** + * Start a transmission to a MSCAN + * + * Inline function called within @ref rtcan_mscan_sendmsg. + * This is the completion of a send call when hardware access is granted. + * Spinlock is taken before calling this function. + * + * @param[in] frame Pointer to CAN frame which is about to be sent + * @param[in] dev Device ID + */ +static int rtcan_mscan_start_xmit(struct rtcan_device *dev, can_frame_t *frame) +{ + int i, id; + /* "Real" size of the payload */ + unsigned char size; + /* Content of frame information register */ + unsigned char dlc; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + + /* Is TX buffer empty? */ + if (!(in_8(®s->cantflg) & MSCAN_TXE0)) { + rtdm_printk("rtcan_mscan_start_xmit: TX buffer not empty"); + return -EIO; + } + /* Select the buffer we've found. */ + out_8(®s->cantbsel, MSCAN_TXE0); + + /* Get DLC and ID */ + dlc = frame->can_dlc; + + /* If DLC exceeds 8 bytes adjust it to 8 (for the payload) */ + size = (dlc > 8) ? 8 : dlc; + + id = frame->can_id; + if (frame->can_id & CAN_EFF_FLAG) { + out_8(®s->cantxfg.idr[0], (id & 0x1fe00000) >> 21); + out_8(®s->cantxfg.idr[1], ((id & 0x001c0000) >> 13) | + ((id & 0x00038000) >> 15) | + 0x18); /* set SRR and IDE bits */ + + out_8(®s->cantxfg.idr[4], (id & 0x00007f80) >> 7); + out_8(®s->cantxfg.idr[5], (id & 0x0000007f) << 1); + + /* RTR? */ + if (frame->can_id & CAN_RTR_FLAG) + setbits8(®s->cantxfg.idr[5], 0x1); + else { + clrbits8(®s->cantxfg.idr[5], 0x1); + /* No RTR, write data bytes */ + for (i = 0; i < size; i++) + out_8(®s->cantxfg.dsr[i + (i / 2) * 2], + frame->data[i]); + } + + } else { + /* Send standard frame */ + + out_8(®s->cantxfg.idr[0], (id & 0x000007f8) >> 3); + out_8(®s->cantxfg.idr[1], (id & 0x00000007) << 5); + + /* RTR? */ + if (frame->can_id & CAN_RTR_FLAG) + setbits8(®s->cantxfg.idr[1], 0x10); + else { + clrbits8(®s->cantxfg.idr[1], 0x10); + /* No RTR, write data bytes */ + for (i = 0; i < size; i++) + out_8(®s->cantxfg.dsr[i + (i / 2) * 2], + frame->data[i]); + } + } + + out_8(®s->cantxfg.dlr, frame->can_dlc); + out_8(®s->cantxfg.tbpr, 0); /* all messages have the same prio */ + + /* Trigger transmission. */ + out_8(®s->cantflg, MSCAN_TXE0); + + /* Enable interrupt. */ + setbits8(®s->cantier, MSCAN_TXIE0); + + return 0; +} + +/** + * MSCAN Chip configuration + * + * Called during @ref init_module. Here, the configuration registers which + * must be set only once are written with the right values. The controller + * is left in reset mode and goes into operating mode not until the IOCTL + * for starting it is triggered. + * + * @param[in] dev Device ID of the controller to be configured + */ +static inline void __init mscan_chip_config(struct mscan_regs *regs, + int mscan_clksrc) +{ + /* Choose IP bus as clock source. + */ + if (mscan_clksrc) + setbits8(®s->canctl1, MSCAN_CLKSRC); + clrbits8(®s->canctl1, MSCAN_LISTEN); + + /* Configure MSCAN to accept all incoming messages. + */ + out_8(®s->canidar0, 0x00); + out_8(®s->canidar1, 0x00); + out_8(®s->canidar2, 0x00); + out_8(®s->canidar3, 0x00); + out_8(®s->canidmr0, 0xFF); + out_8(®s->canidmr1, 0xFF); + out_8(®s->canidmr2, 0xFF); + out_8(®s->canidmr3, 0xFF); + out_8(®s->canidar4, 0x00); + out_8(®s->canidar5, 0x00); + out_8(®s->canidar6, 0x00); + out_8(®s->canidar7, 0x00); + out_8(®s->canidmr4, 0xFF); + out_8(®s->canidmr5, 0xFF); + out_8(®s->canidmr6, 0xFF); + out_8(®s->canidmr7, 0xFF); + clrbits8(®s->canidac, MSCAN_IDAM0 | MSCAN_IDAM1); +} + +/** + * MSCAN Chip registration + * + * Called during @ref init_module. + * + * @param[in] dev Device ID of the controller to be registered + * @param[in] mscan_clksrc clock source to be used + */ +int rtcan_mscan_register(struct rtcan_device *dev, int irq, int mscan_clksrc) +{ + int ret; + struct mscan_regs *regs; + + regs = (struct mscan_regs *)dev->base_addr; + + /* Enable MSCAN module. */ + setbits8(®s->canctl1, MSCAN_CANE); + udelay(100); + + /* Set dummy state for following call */ + dev->state = CAN_STATE_ACTIVE; + + /* Enter reset mode */ + rtcan_mscan_mode_stop(dev, NULL); + + /* Give device an interface name (so that programs using this driver + don't need to know the device ID) */ + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + dev->hard_start_xmit = rtcan_mscan_start_xmit; + dev->do_set_mode = rtcan_mscan_set_mode; + dev->do_set_bit_time = rtcan_mscan_set_bit_time; +#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD + dev->bittiming_const = &mscan_bittiming_const; +#endif + + /* Register IRQ handler and pass device structure as arg */ + ret = rtdm_irq_request(&dev->irq_handle, irq, rtcan_mscan_interrupt, + 0, RTCAN_DRV_NAME, (void *)dev); + if (ret) { + printk("ERROR! rtdm_irq_request for IRQ %d failed\n", irq); + goto out_can_disable; + } + + mscan_chip_config(regs, mscan_clksrc); + + /* Register RTDM device */ + ret = rtcan_dev_register(dev); + if (ret) { + printk(KERN_ERR + "ERROR while trying to register RTCAN device!\n"); + goto out_irq_free; + } + + rtcan_mscan_create_proc(dev); + + return 0; + +out_irq_free: + rtdm_irq_free(&dev->irq_handle); + +out_can_disable: + /* Disable MSCAN module. */ + clrbits8(®s->canctl1, MSCAN_CANE); + + return ret; +} + +/** + * MSCAN Chip deregistration + * + * Called during @ref cleanup_module + * + * @param[in] dev Device ID of the controller to be registered + */ +int rtcan_mscan_unregister(struct rtcan_device *dev) +{ + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; + + printk("Unregistering %s device %s\n", RTCAN_DRV_NAME, dev->name); + + rtcan_mscan_mode_stop(dev, NULL); + rtdm_irq_free(&dev->irq_handle); + rtcan_mscan_remove_proc(dev); + rtcan_dev_unregister(dev); + + /* Disable MSCAN module. */ + clrbits8(®s->canctl1, MSCAN_CANE); + + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h new file mode 100644 index 0000000..654a0f5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2009 Wolfgang Grandegger <wg@denx.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __RTCAN_MSCAN_H_ +#define __RTCAN_MSCAN_H_ + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "rtcan_mscan" + +/* MSCAN type variants */ +enum { + MSCAN_TYPE_MPC5200, + MSCAN_TYPE_MPC5121 +}; + +extern int rtcan_mscan_register(struct rtcan_device *dev, int irq, + int mscan_clksrc); +extern int rtcan_mscan_unregister(struct rtcan_device *dev); + +extern int rtcan_mscan_create_proc(struct rtcan_device* dev); +extern void rtcan_mscan_remove_proc(struct rtcan_device* dev); + +#endif /* __RTCAN_MSCAN_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c new file mode 100644 index 0000000..de08d94 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c @@ -0,0 +1,392 @@ +/* + * CAN bus driver for the Freescale MPC5xxx embedded CPU. + * + * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>, + * Varma Electronics Oy + * Copyright (C) 2008-2010 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/of_platform.h> +#include <sysdev/fsl_soc.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <asm/mpc52xx.h> + +#include "rtcan_dev.h" +#include "rtcan_mscan_regs.h" +#include "rtcan_mscan.h" + +#define of_device platform_device +#define of_platform_driver platform_driver +#define of_register_platform_driver platform_driver_register +#define of_unregister_platform_driver platform_driver_unregister + +static char mscan_ctrl_name_mpc5200[] = "MSCAN-MPC5200"; +static char mscan_ctrl_name_mpc512x[] = "MSCAN-MPC512x"; +static char mscan_board_name[] = "unkown"; + +struct mpc5xxx_can_data { + unsigned int type; + u32 (*get_clock)(struct of_device *ofdev, const char *clock_name, + int *mscan_clksrc); +}; + +#ifdef CONFIG_PPC_MPC52xx +static struct of_device_id mpc52xx_cdm_ids[] = { + { .compatible = "fsl,mpc5200-cdm", }, + {} +}; + +static u32 mpc52xx_can_get_clock(struct of_device *ofdev, + const char *clock_name, + int *mscan_clksrc) +{ + unsigned int pvr; + struct mpc52xx_cdm __iomem *cdm; + struct device_node *np_cdm; + unsigned int freq; + u32 val; + + pvr = mfspr(SPRN_PVR); + + /* + * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock + * (IP_CLK) can be selected as MSCAN clock source. According to + * the MPC5200 user's manual, the oscillator clock is the better + * choice as it has less jitter. For this reason, it is selected + * by default. Unfortunately, it can not be selected for the old + * MPC5200 Rev. A chips due to a hardware bug (check errata). + */ + if (clock_name && strcmp(clock_name, "ip") == 0) + *mscan_clksrc = MSCAN_CLKSRC_BUS; + else + *mscan_clksrc = MSCAN_CLKSRC_XTAL; + + freq = mpc5xxx_get_bus_frequency(mpc5xxx_get_of_node(ofdev)); + if (!freq) + return 0; + + if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011) + return freq; + + /* Determine SYS_XTAL_IN frequency from the clock domain settings */ + np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); + if (!np_cdm) { + dev_err(&ofdev->dev, "can't get clock node!\n"); + return 0; + } + cdm = of_iomap(np_cdm, 0); + + if (in_8(&cdm->ipb_clk_sel) & 0x1) + freq *= 2; + val = in_be32(&cdm->rstcfg); + + freq *= (val & (1 << 5)) ? 8 : 4; + freq /= (val & (1 << 6)) ? 12 : 16; + + of_node_put(np_cdm); + iounmap(cdm); + + return freq; +} +#else /* !CONFIG_PPC_MPC5200 */ +static u32 mpc52xx_can_get_clock(struct of_device *ofdev, + const char *clock_name, + int *mscan_clksrc) +{ + return 0; +} +#endif /* CONFIG_PPC_MPC52xx */ + +#ifdef CONFIG_PPC_MPC512x +struct mpc512x_clockctl { + u32 spmr; /* System PLL Mode Reg */ + u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */ + u32 scfr1; /* System Clk Freq Reg 1 */ + u32 scfr2; /* System Clk Freq Reg 2 */ + u32 reserved; + u32 bcr; /* Bread Crumb Reg */ + u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */ + u32 spccr; /* SPDIF Clk Ctrl Reg */ + u32 cccr; /* CFM Clk Ctrl Reg */ + u32 dccr; /* DIU Clk Cnfg Reg */ + u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */ +}; + +static struct of_device_id mpc512x_clock_ids[] = { + { .compatible = "fsl,mpc5121-clock", }, + {} +}; + +static u32 mpc512x_can_get_clock(struct of_device *ofdev, + const char *clock_name, + int *mscan_clksrc) +{ + struct mpc512x_clockctl __iomem *clockctl; + struct device_node *np_clock; + struct clk *sys_clk, *ref_clk; + int plen, clockidx, clocksrc = -1; + u32 sys_freq, val, clockdiv = 1, freq = 0; + const u32 *pval; + + np_clock = of_find_matching_node(NULL, mpc512x_clock_ids); + if (!np_clock) { + dev_err(&ofdev->dev, "couldn't find clock node\n"); + return -ENODEV; + } + clockctl = of_iomap(np_clock, 0); + if (!clockctl) { + dev_err(&ofdev->dev, "couldn't map clock registers\n"); + return 0; + } + + /* Determine the MSCAN device index from the physical address */ + pval = of_get_property(mpc5xxx_get_of_node(ofdev), "reg", &plen); + BUG_ON(!pval || plen < sizeof(*pval)); + clockidx = (*pval & 0x80) ? 1 : 0; + if (*pval & 0x2000) + clockidx += 2; + + /* + * Clock source and divider selection: 3 different clock sources + * can be selected: "ip", "ref" or "sys". For the latter two, a + * clock divider can be defined as well. If the clock source is + * not specified by the device tree, we first try to find an + * optimal CAN source clock based on the system clock. If that + * is not posslible, the reference clock will be used. + */ + if (clock_name && !strcmp(clock_name, "ip")) { + *mscan_clksrc = MSCAN_CLKSRC_IPS; + freq = mpc5xxx_get_bus_frequency(mpc5xxx_get_of_node(ofdev)); + } else { + *mscan_clksrc = MSCAN_CLKSRC_BUS; + + pval = of_get_property(mpc5xxx_get_of_node(ofdev), + "fsl,mscan-clock-divider", &plen); + if (pval && plen == sizeof(*pval)) + clockdiv = *pval; + if (!clockdiv) + clockdiv = 1; + + if (!clock_name || !strcmp(clock_name, "sys")) { + sys_clk = clk_get(&ofdev->dev, "sys_clk"); + if (!sys_clk) { + dev_err(&ofdev->dev, "couldn't get sys_clk\n"); + goto exit_unmap; + } + /* Get and round up/down sys clock rate */ + sys_freq = 1000000 * + ((clk_get_rate(sys_clk) + 499999) / 1000000); + + if (!clock_name) { + /* A multiple of 16 MHz would be optimal */ + if ((sys_freq % 16000000) == 0) { + clocksrc = 0; + clockdiv = sys_freq / 16000000; + freq = sys_freq / clockdiv; + } + } else { + clocksrc = 0; + freq = sys_freq / clockdiv; + } + } + + if (clocksrc < 0) { + ref_clk = clk_get(&ofdev->dev, "ref_clk"); + if (!ref_clk) { + dev_err(&ofdev->dev, "couldn't get ref_clk\n"); + goto exit_unmap; + } + clocksrc = 1; + freq = clk_get_rate(ref_clk) / clockdiv; + } + } + + /* Disable clock */ + out_be32(&clockctl->mccr[clockidx], 0x0); + if (clocksrc >= 0) { + /* Set source and divider */ + val = (clocksrc << 14) | ((clockdiv - 1) << 17); + out_be32(&clockctl->mccr[clockidx], val); + /* Enable clock */ + out_be32(&clockctl->mccr[clockidx], val | 0x10000); + } + + /* Enable MSCAN clock domain */ + val = in_be32(&clockctl->sccr[1]); + if (!(val & (1 << 25))) + out_be32(&clockctl->sccr[1], val | (1 << 25)); + + dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n", + *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" : + clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv); + +exit_unmap: + of_node_put(np_clock); + iounmap(clockctl); + + return freq; +} +#else /* !CONFIG_PPC_MPC512x */ +static u32 mpc512x_can_get_clock(struct of_device *ofdev, + const char *clock_name, + int *mscan_clksrc) +{ + return 0; +} +#endif /* CONFIG_PPC_MPC512x */ + +static struct of_device_id mpc5xxx_can_table[]; +static int mpc5xxx_can_probe(struct of_device *ofdev) +{ + struct device_node *np = mpc5xxx_get_of_node(ofdev); + struct mpc5xxx_can_data *data; + struct rtcan_device *dev; + void __iomem *base; + const char *clock_name = NULL; + int irq, mscan_clksrc = 0; + int err = -ENOMEM; + + const struct of_device_id *id; + + id = of_match_device(mpc5xxx_can_table, &ofdev->dev); + if (!id) + return -EINVAL; + + data = (struct mpc5xxx_can_data *)id->data; + + base = of_iomap(np, 0); + if (!base) { + dev_err(&ofdev->dev, "couldn't ioremap\n"); + return err; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + dev_err(&ofdev->dev, "no irq found\n"); + err = -ENODEV; + goto exit_unmap_mem; + } + + dev = rtcan_dev_alloc(0, 0); + if (!dev) + goto exit_dispose_irq; + + clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL); + + BUG_ON(!data); + dev->can_sys_clock = data->get_clock(ofdev, clock_name, + &mscan_clksrc); + if (!dev->can_sys_clock) { + dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); + goto exit_free_mscan; + } + + if (data->type == MSCAN_TYPE_MPC5121) + dev->ctrl_name = mscan_ctrl_name_mpc512x; + else + dev->ctrl_name = mscan_ctrl_name_mpc5200; + dev->board_name = mscan_board_name; + dev->base_addr = (unsigned long)base; + + err = rtcan_mscan_register(dev, irq, mscan_clksrc); + if (err) { + dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", + RTCAN_DRV_NAME, err); + goto exit_free_mscan; + } + + dev_set_drvdata(&ofdev->dev, dev); + + dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", + base, irq, dev->can_sys_clock); + + return 0; + +exit_free_mscan: + rtcan_dev_free(dev); +exit_dispose_irq: + irq_dispose_mapping(irq); +exit_unmap_mem: + iounmap(base); + + return err; +} + +static int mpc5xxx_can_remove(struct of_device *ofdev) +{ + struct rtcan_device *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + rtcan_mscan_unregister(dev); + iounmap((void *)dev->base_addr); + rtcan_dev_free(dev); + + return 0; +} + +static struct mpc5xxx_can_data mpc5200_can_data = { + .type = MSCAN_TYPE_MPC5200, + .get_clock = mpc52xx_can_get_clock, +}; + +static struct mpc5xxx_can_data mpc5121_can_data = { + .type = MSCAN_TYPE_MPC5121, + .get_clock = mpc512x_can_get_clock, +}; + +static struct of_device_id mpc5xxx_can_table[] = { + { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, }, + /* Note that only MPC5121 Rev. 2 (and later) is supported */ + { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, }, + {}, +}; + +static struct of_platform_driver mpc5xxx_can_driver = { + .driver = { + .owner = THIS_MODULE, + .name = RTCAN_DRV_NAME, + .of_match_table = mpc5xxx_can_table, + }, + .probe = mpc5xxx_can_probe, + .remove = mpc5xxx_can_remove, +}; + +static int __init mpc5xxx_can_init(void) +{ + if (!rtdm_available()) + return -ENOSYS; + + return of_register_platform_driver(&mpc5xxx_can_driver); +} +module_init(mpc5xxx_can_init); + +static void __exit mpc5xxx_can_exit(void) +{ + return of_unregister_platform_driver(&mpc5xxx_can_driver); +}; +module_exit(mpc5xxx_can_exit); + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RT-Socket-CAN driver for MPC5200 and MPC521x"); +MODULE_LICENSE("GPL v2"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c new file mode 100644 index 0000000..91f8f84 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include "rtcan_dev.h" +#include "rtcan_internal.h" +#include "rtcan_mscan_regs.h" + +#define MSCAN_REG_ARGS(reg) \ + "%-8s 0x%02x\n", #reg, (int)(in_8(®s->reg)) & 0xff + +#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG + +static int rtcan_mscan_proc_regs(struct seq_file *p, void *data) +{ + struct rtcan_device *dev = (struct rtcan_device *)data; + struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr; +#ifdef MPC5xxx_GPIO + struct mpc5xxx_gpio *gpio = (struct mpc5xxx_gpio *)MPC5xxx_GPIO; + u32 port_config; +#endif + u8 canctl0, canctl1; + + seq_printf(p, "MSCAN registers at %p\n", regs); + + canctl0 = in_8(®s->canctl0); + seq_printf(p, "canctl0 0x%02x%s%s%s%s%s%s%s%s\n", + canctl0, + (canctl0 & MSCAN_RXFRM) ? " rxfrm" :"", + (canctl0 & MSCAN_RXACT) ? " rxact" :"", + (canctl0 & MSCAN_CSWAI) ? " cswai" :"", + (canctl0 & MSCAN_SYNCH) ? " synch" :"", + (canctl0 & MSCAN_TIME) ? " time" :"", + (canctl0 & MSCAN_WUPE) ? " wupe" :"", + (canctl0 & MSCAN_SLPRQ) ? " slprq" :"", + (canctl0 & MSCAN_INITRQ)? " initrq":"" ); + canctl1 = in_8(®s->canctl1); + seq_printf(p, "canctl1 0x%02x%s%s%s%s%s%s%s\n", + canctl1, + (canctl1 & MSCAN_CANE) ? " cane" :"", + (canctl1 & MSCAN_CLKSRC)? " clksrc":"", + (canctl1 & MSCAN_LOOPB) ? " loopb" :"", + (canctl1 & MSCAN_LISTEN)? " listen":"", + (canctl1 & MSCAN_WUPM) ? " wump" :"", + (canctl1 & MSCAN_SLPAK) ? " slpak" :"", + (canctl1 & MSCAN_INITAK)? " initak":""); + seq_printf(p, MSCAN_REG_ARGS(canbtr0 )); + seq_printf(p, MSCAN_REG_ARGS(canbtr1 )); + seq_printf(p, MSCAN_REG_ARGS(canrflg )); + seq_printf(p, MSCAN_REG_ARGS(canrier )); + seq_printf(p, MSCAN_REG_ARGS(cantflg )); + seq_printf(p, MSCAN_REG_ARGS(cantier )); + seq_printf(p, MSCAN_REG_ARGS(cantarq )); + seq_printf(p, MSCAN_REG_ARGS(cantaak )); + seq_printf(p, MSCAN_REG_ARGS(cantbsel)); + seq_printf(p, MSCAN_REG_ARGS(canidac )); + seq_printf(p, MSCAN_REG_ARGS(canrxerr)); + seq_printf(p, MSCAN_REG_ARGS(cantxerr)); + seq_printf(p, MSCAN_REG_ARGS(canidar0)); + seq_printf(p, MSCAN_REG_ARGS(canidar1)); + seq_printf(p, MSCAN_REG_ARGS(canidar2)); + seq_printf(p, MSCAN_REG_ARGS(canidar3)); + seq_printf(p, MSCAN_REG_ARGS(canidmr0)); + seq_printf(p, MSCAN_REG_ARGS(canidmr1)); + seq_printf(p, MSCAN_REG_ARGS(canidmr2)); + seq_printf(p, MSCAN_REG_ARGS(canidmr3)); + seq_printf(p, MSCAN_REG_ARGS(canidar4)); + seq_printf(p, MSCAN_REG_ARGS(canidar5)); + seq_printf(p, MSCAN_REG_ARGS(canidar6)); + seq_printf(p, MSCAN_REG_ARGS(canidar7)); + seq_printf(p, MSCAN_REG_ARGS(canidmr4)); + seq_printf(p, MSCAN_REG_ARGS(canidmr5)); + seq_printf(p, MSCAN_REG_ARGS(canidmr6)); + seq_printf(p, MSCAN_REG_ARGS(canidmr7)); + +#ifdef MPC5xxx_GPIO + seq_printf(p, "GPIO registers\n"); + port_config = in_be32(&gpio->port_config); + seq_printf(p, "port_config 0x%08x %s\n", port_config, + (port_config & 0x10000000 ? + "CAN1 on I2C1, CAN2 on TMR0/1 pins" : + (port_config & 0x70) == 0x10 ? + "CAN1/2 on PSC2 pins" : + "MSCAN1/2 not routed")); +#endif + + return 0; +} + +static int rtcan_mscan_proc_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_mscan_proc_regs, PDE_DATA(inode)); +} + +static const DEFINE_PROC_OPS(rtcan_mscan_proc_regs_ops, + rtcan_mscan_proc_regs_open, + single_elease, + seq_read, + NULL); + +int rtcan_mscan_create_proc(struct rtcan_device* dev) +{ + if (!dev->proc_root) + return -EINVAL; + + proc_create_data("registers", S_IFREG | S_IRUGO | S_IWUSR, + dev->proc_root, &rtcan_mscan_proc_regs_ops, dev); + return 0; +} + +void rtcan_mscan_remove_proc(struct rtcan_device* dev) +{ + if (!dev->proc_root) + return; + + remove_proc_entry("registers", dev->proc_root); +} + +#else /* !CONFIG_XENO_DRIVERS_CAN_DEBUG */ + +void rtcan_mscan_remove_proc(struct rtcan_device* dev) +{ +} + +int rtcan_mscan_create_proc(struct rtcan_device* dev) +{ + return 0; +} +#endif /* CONFIG_XENO_DRIVERS_CAN_DEBUG */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h new file mode 100644 index 0000000..11b85a9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h @@ -0,0 +1,226 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Based on linux-2.4.25/include/asm-ppc/mpc5xxx.h + * Prototypes, etc. for the Motorola MPC5xxx embedded cpu chips + * + * Author: Dale Farnsworth <dfarnsworth@mvista.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __RTCAN_MSCAN_REGS_H_ +#define __RTCAN_MSCAN_REGS_H_ + +#include <linux/version.h> +#include <linux/of_platform.h> +#include <asm/mpc52xx.h> + +static inline void __iomem *mpc5xxx_gpio_find_and_map(void) +{ + struct device_node *ofn; + ofn = of_find_compatible_node(NULL, NULL, "mpc5200-gpio"); + if (!ofn) + ofn = of_find_compatible_node(NULL, NULL, "fsl,mpc5200-gpio"); + return ofn ? of_iomap(ofn, 0) : NULL; +} + +#define MPC5xxx_GPIO mpc5xxx_gpio_find_and_map() +#define mpc5xxx_gpio mpc52xx_gpio + +#define mpc5xxx_get_of_node(ofdev) (ofdev)->dev.of_node + +#define MSCAN_CAN1_ADDR (MSCAN_MBAR + 0x0900) /* MSCAN Module 1 */ +#define MSCAN_CAN2_ADDR (MSCAN_MBAR + 0x0980) /* MSCAN Module 2 */ +#define MSCAN_SIZE 0x80 + +/* MSCAN control register 0 (CANCTL0) bits */ +#define MSCAN_RXFRM 0x80 +#define MSCAN_RXACT 0x40 +#define MSCAN_CSWAI 0x20 +#define MSCAN_SYNCH 0x10 +#define MSCAN_TIME 0x08 +#define MSCAN_WUPE 0x04 +#define MSCAN_SLPRQ 0x02 +#define MSCAN_INITRQ 0x01 + +/* MSCAN control register 1 (CANCTL1) bits */ +#define MSCAN_CANE 0x80 +#define MSCAN_CLKSRC 0x40 +#define MSCAN_LOOPB 0x20 +#define MSCAN_LISTEN 0x10 +#define MSCAN_WUPM 0x04 +#define MSCAN_SLPAK 0x02 +#define MSCAN_INITAK 0x01 + +/* MSCAN receiver flag register (CANRFLG) bits */ +#define MSCAN_WUPIF 0x80 +#define MSCAN_CSCIF 0x40 +#define MSCAN_RSTAT1 0x20 +#define MSCAN_RSTAT0 0x10 +#define MSCAN_TSTAT1 0x08 +#define MSCAN_TSTAT0 0x04 +#define MSCAN_OVRIF 0x02 +#define MSCAN_RXF 0x01 + +/* MSCAN receiver interrupt enable register (CANRIER) bits */ +#define MSCAN_WUPIE 0x80 +#define MSCAN_CSCIE 0x40 +#define MSCAN_RSTATE1 0x20 +#define MSCAN_RSTATE0 0x10 +#define MSCAN_TSTATE1 0x08 +#define MSCAN_TSTATE0 0x04 +#define MSCAN_OVRIE 0x02 +#define MSCAN_RXFIE 0x01 + +/* MSCAN transmitter flag register (CANTFLG) bits */ +#define MSCAN_TXE2 0x04 +#define MSCAN_TXE1 0x02 +#define MSCAN_TXE0 0x01 +#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0) + +/* MSCAN transmitter interrupt enable register (CANTIER) bits */ +#define MSCAN_TXIE2 0x04 +#define MSCAN_TXIE1 0x02 +#define MSCAN_TXIE0 0x01 +#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0) + +/* MSCAN transmitter message abort request (CANTARQ) bits */ +#define MSCAN_ABTRQ2 0x04 +#define MSCAN_ABTRQ1 0x02 +#define MSCAN_ABTRQ0 0x01 + +/* MSCAN transmitter message abort ack (CANTAAK) bits */ +#define MSCAN_ABTAK2 0x04 +#define MSCAN_ABTAK1 0x02 +#define MSCAN_ABTAK0 0x01 + +/* MSCAN transmit buffer selection (CANTBSEL) bits */ +#define MSCAN_TX2 0x04 +#define MSCAN_TX1 0x02 +#define MSCAN_TX0 0x01 + +/* MSCAN ID acceptance control register (CANIDAC) bits */ +#define MSCAN_IDAM1 0x20 +#define MSCAN_IDAM0 0x10 +#define MSCAN_IDHIT2 0x04 +#define MSCAN_IDHIT1 0x02 +#define MSCAN_IDHIT0 0x01 + +struct mscan_msgbuf { + volatile u8 idr[0x8]; /* 0x00 */ + volatile u8 dsr[0x10]; /* 0x08 */ + volatile u8 dlr; /* 0x18 */ + volatile u8 tbpr; /* 0x19 */ /* This register is not applicable for receive buffers */ + volatile u16 rsrv1; /* 0x1A */ + volatile u8 tsrh; /* 0x1C */ + volatile u8 tsrl; /* 0x1D */ + volatile u16 rsrv2; /* 0x1E */ +}; + +struct mscan_regs { + volatile u8 canctl0; /* MSCAN + 0x00 */ + volatile u8 canctl1; /* MSCAN + 0x01 */ + volatile u16 rsrv1; /* MSCAN + 0x02 */ + volatile u8 canbtr0; /* MSCAN + 0x04 */ + volatile u8 canbtr1; /* MSCAN + 0x05 */ + volatile u16 rsrv2; /* MSCAN + 0x06 */ + volatile u8 canrflg; /* MSCAN + 0x08 */ + volatile u8 canrier; /* MSCAN + 0x09 */ + volatile u16 rsrv3; /* MSCAN + 0x0A */ + volatile u8 cantflg; /* MSCAN + 0x0C */ + volatile u8 cantier; /* MSCAN + 0x0D */ + volatile u16 rsrv4; /* MSCAN + 0x0E */ + volatile u8 cantarq; /* MSCAN + 0x10 */ + volatile u8 cantaak; /* MSCAN + 0x11 */ + volatile u16 rsrv5; /* MSCAN + 0x12 */ + volatile u8 cantbsel; /* MSCAN + 0x14 */ + volatile u8 canidac; /* MSCAN + 0x15 */ + volatile u16 rsrv6[3]; /* MSCAN + 0x16 */ + volatile u8 canrxerr; /* MSCAN + 0x1C */ + volatile u8 cantxerr; /* MSCAN + 0x1D */ + volatile u16 rsrv7; /* MSCAN + 0x1E */ + volatile u8 canidar0; /* MSCAN + 0x20 */ + volatile u8 canidar1; /* MSCAN + 0x21 */ + volatile u16 rsrv8; /* MSCAN + 0x22 */ + volatile u8 canidar2; /* MSCAN + 0x24 */ + volatile u8 canidar3; /* MSCAN + 0x25 */ + volatile u16 rsrv9; /* MSCAN + 0x26 */ + volatile u8 canidmr0; /* MSCAN + 0x28 */ + volatile u8 canidmr1; /* MSCAN + 0x29 */ + volatile u16 rsrv10; /* MSCAN + 0x2A */ + volatile u8 canidmr2; /* MSCAN + 0x2C */ + volatile u8 canidmr3; /* MSCAN + 0x2D */ + volatile u16 rsrv11; /* MSCAN + 0x2E */ + volatile u8 canidar4; /* MSCAN + 0x30 */ + volatile u8 canidar5; /* MSCAN + 0x31 */ + volatile u16 rsrv12; /* MSCAN + 0x32 */ + volatile u8 canidar6; /* MSCAN + 0x34 */ + volatile u8 canidar7; /* MSCAN + 0x35 */ + volatile u16 rsrv13; /* MSCAN + 0x36 */ + volatile u8 canidmr4; /* MSCAN + 0x38 */ + volatile u8 canidmr5; /* MSCAN + 0x39 */ + volatile u16 rsrv14; /* MSCAN + 0x3A */ + volatile u8 canidmr6; /* MSCAN + 0x3C */ + volatile u8 canidmr7; /* MSCAN + 0x3D */ + volatile u16 rsrv15; /* MSCAN + 0x3E */ + + struct mscan_msgbuf canrxfg; /* MSCAN + 0x40 */ /* Foreground receive buffer */ + struct mscan_msgbuf cantxfg; /* MSCAN + 0x60 */ /* Foreground transmit buffer */ +}; + +/* Clock source selection + */ +#define MSCAN_CLKSRC_BUS 0 +#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC +#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC + +/* Message type access macros. + */ +#define MSCAN_BUF_STD_RTR 0x10 +#define MSCAN_BUF_EXT_RTR 0x01 +#define MSCAN_BUF_EXTENDED 0x08 + +#define MSCAN_IDAM1 0x20 +/* Value for the interrupt enable register */ +#define MSCAN_RIER (MSCAN_OVRIE | \ + MSCAN_RXFIE | \ + MSCAN_WUPIF | \ + MSCAN_CSCIE | \ + MSCAN_RSTATE0 | \ + MSCAN_RSTATE1 | \ + MSCAN_TSTATE0 | \ + MSCAN_TSTATE1) + +#define BTR0_BRP_MASK 0x3f +#define BTR0_SJW_SHIFT 6 +#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT) + +#define BTR1_TSEG1_MASK 0xf +#define BTR1_TSEG2_SHIFT 4 +#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT) +#define BTR1_SAM_SHIFT 7 + +#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK) +#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \ + BTR0_SJW_MASK) + +#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK) +#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \ + BTR1_TSEG2_MASK) +#define BTR1_SET_SAM(sam) (((sam) & 1) << BTR1_SAM_SHIFT) + +#endif /* __RTCAN_MSCAN_REGS_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig new file mode 100644 index 0000000..0fccce9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig @@ -0,0 +1,6 @@ +config XENO_DRIVERS_CAN_PEAK_CANFD + depends on XENO_DRIVERS_CAN && PCI && !XENO_DRIVERS_CAN_CALC_BITTIME_OLD + tristate "PEAK driver for PCAN-PCIe FD family" + help + + This driver supports the PCAN-PCIe FD boards family from PEAK-System. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile new file mode 100644 index 0000000..f56f451 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the PEAK-System CAN-FD IP module drivers +# +ccflags-y += -I$(srctree)/drivers/xenomai/can + +obj-$(CONFIG_XENO_DRIVERS_CAN_PEAK_CANFD) += xeno_can_peak_pciefd.o + +xeno_can_peak_pciefd-y := rtcan_peak_pciefd.o rtcan_peak_canfd.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c new file mode 100644 index 0000000..4ecc1e9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CANFD firmware interface. + * + * Copyright (C) 2001-2021 PEAK System-Technik GmbH + * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com> + */ +#include "rtcan_dev.h" +#include "rtcan_raw.h" +#include "rtcan_peak_canfd_user.h" + +#define DRV_NAME "xeno_peak_canfd" + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_CTRLR_NAME "peak_canfd" + +/* bittiming ranges of the PEAK-System PC CAN-FD interfaces */ +static const struct can_bittiming_const peak_canfd_nominal_const = { + .name = RTCAN_CTRLR_NAME, + .tseg1_min = 1, + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), + .tseg2_min = 1, + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), + .brp_min = 1, + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), + .brp_inc = 1, +}; + +/* initialize the command area */ +static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv) +{ + priv->cmd_len = 0; + return priv; +} + +/* add command 'cmd_op' to the command area */ +static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op) +{ + struct pucan_command *cmd; + + if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen) + return NULL; + + cmd = priv->cmd_buffer + priv->cmd_len; + + /* reset all unused bit to default */ + memset(cmd, 0, sizeof(*cmd)); + + cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op); + priv->cmd_len += sizeof(*cmd); + + return cmd; +} + +/* send the command(s) to the IP core through the host-device interface */ +static int pucan_write_cmd(struct peak_canfd_priv *priv) +{ + int err; + + /* prepare environment before writing the command */ + if (priv->pre_cmd) { + err = priv->pre_cmd(priv); + if (err) + return err; + } + + err = priv->write_cmd(priv); + if (err) + return err; + + /* update environment after writing the command */ + if (priv->post_cmd) + err = priv->post_cmd(priv); + + return err; +} + +/* set the device in RESET mode */ +static int pucan_set_reset_mode(struct peak_canfd_priv *priv) +{ + int err; + + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE); + err = pucan_write_cmd(priv); + if (!err) + priv->rdev->state = CAN_STATE_STOPPED; + + return err; +} + +/* set the device in NORMAL mode */ +static int pucan_set_normal_mode(struct peak_canfd_priv *priv) +{ + int err; + + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE); + err = pucan_write_cmd(priv); + if (!err) + priv->rdev->state = CAN_STATE_ERROR_ACTIVE; + + return err; +} + +/* set the device in LISTEN_ONLY mode */ +static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv) +{ + int err; + + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE); + err = pucan_write_cmd(priv); + if (!err) + priv->rdev->state = CAN_STATE_ERROR_ACTIVE; + + return err; +} + +/* set acceptance filters */ +static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask) +{ + struct pucan_std_filter *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER); + + /* All the 11-bit CAN ID values are represented by one bit in a + * 64 rows array of 32 columns: the upper 6 bit of the CAN ID select + * the row while the lowest 5 bit select the column in that row. + * + * bit filter + * 1 passed + * 0 discarded + */ + + /* select the row */ + cmd->idx = row; + + /* set/unset bits in the row */ + cmd->mask = cpu_to_le32(mask); + + return pucan_write_cmd(priv); +} + +/* request the device to stop transmission */ +static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags) +{ + struct pucan_tx_abort *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT); + + cmd->flags = cpu_to_le16(flags); + + return pucan_write_cmd(priv); +} + +/* request the device to clear rx/tx error counters */ +static int pucan_clr_err_counters(struct peak_canfd_priv *priv) +{ + struct pucan_wr_err_cnt *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT); + + cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE); + + /* write the counters new value */ + cmd->tx_counter = 0; + cmd->rx_counter = 0; + + return pucan_write_cmd(priv); +} + +/* set options to the device */ +static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask) +{ + struct pucan_options *cmd; + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION); + + cmd->options = cpu_to_le16(opt_mask); + + return pucan_write_cmd(priv); +} + +/* request the device to notify the driver when Tx path is ready */ +static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv) +{ + pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER); + + return pucan_write_cmd(priv); +} + +/* handle the reception of one CAN frame */ +static int pucan_handle_can_rx(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg) +{ + struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, }; + struct rtcan_rb_frame *cf = &skb.rb_frame; + struct rtcan_device *rdev = priv->rdev; + const u16 rx_msg_flags = le16_to_cpu(msg->flags); + + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { + /* CAN-FD frames are silently discarded */ + return 0; + } + + cf->can_id = le32_to_cpu(msg->can_id); + cf->can_dlc = get_can_dlc(pucan_msg_get_dlc(msg)); + + if (rx_msg_flags & PUCAN_MSG_EXT_ID) + cf->can_id |= CAN_EFF_FLAG; + + if (rx_msg_flags & PUCAN_MSG_RTR) + cf->can_id |= CAN_RTR_FLAG; + else { + memcpy(cf->data, msg->d, cf->can_dlc); + skb.rb_frame_size += cf->can_dlc; + } + + cf->can_ifindex = rdev->ifindex; + + /* Pass received frame out to the sockets */ + rtcan_rcv(rdev, &skb); + + return 0; +} + +/* handle rx/tx error counters notification */ +static int pucan_handle_error(struct peak_canfd_priv *priv, + struct pucan_error_msg *msg) +{ + priv->bec.txerr = msg->tx_err_cnt; + priv->bec.rxerr = msg->rx_err_cnt; + + return 0; +} + +/* handle status notification */ +static int pucan_handle_status(struct peak_canfd_priv *priv, + struct pucan_status_msg *msg) +{ + struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, }; + struct rtcan_rb_frame *cf = &skb.rb_frame; + struct rtcan_device *rdev = priv->rdev; + + /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ + if (pucan_status_is_rx_barrier(msg)) { + if (priv->enable_tx_path) { + int err = priv->enable_tx_path(priv); + + if (err) + return err; + } + + /* unlock senders */ + rtdm_sem_up(&rdev->tx_sem); + return 0; + } + + /* otherwise, it's a BUS status */ + cf->can_id = CAN_ERR_FLAG; + cf->can_dlc = CAN_ERR_DLC; + + /* test state error bits according to their priority */ + if (pucan_status_is_busoff(msg)) { + rtdm_printk(DRV_NAME " CAN%u: Bus-off entry status\n", + priv->index+1); + rdev->state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + + /* wakeup waiting senders */ + rtdm_sem_destroy(&rdev->tx_sem); + + } else if (pucan_status_is_passive(msg)) { + rtdm_printk(DRV_NAME " CAN%u: Error passive status\n", + priv->index+1); + rdev->state = CAN_STATE_ERROR_PASSIVE; + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + + } else if (pucan_status_is_warning(msg)) { + rtdm_printk(DRV_NAME " CAN%u: Error warning status\n", + priv->index+1); + rdev->state = CAN_STATE_ERROR_WARNING; + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + + } else if (rdev->state != CAN_STATE_ERROR_ACTIVE) { + /* back to ERROR_ACTIVE */ + rtdm_printk(DRV_NAME " CAN%u: Error active status\n", + priv->index+1); + rdev->state = CAN_STATE_ERROR_ACTIVE; + } + + skb.rb_frame_size += cf->can_dlc; + cf->can_ifindex = rdev->ifindex; + + /* Pass received frame out to the sockets */ + rtcan_rcv(rdev, &skb); + + return 0; +} + +/* handle IP core Rx overflow notification */ +static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) +{ + struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, }; + struct rtcan_rb_frame *cf = &skb.rb_frame; + struct rtcan_device *rdev = priv->rdev; + + cf->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; + cf->can_dlc = CAN_ERR_DLC; + + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + cf->data[6] = priv->bec.txerr; + cf->data[7] = priv->bec.rxerr; + + skb.rb_frame_size += cf->can_dlc; + cf->can_ifindex = rdev->ifindex; + + /* Pass received frame out to the sockets */ + rtcan_rcv(rdev, &skb); + + return 0; +} + +/* handle a single uCAN message */ +int peak_canfd_handle_msg(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg) +{ + u16 msg_type = le16_to_cpu(msg->type); + int msg_size = le16_to_cpu(msg->size); + int err; + + if (!msg_size || !msg_type) { + /* null packet found: end of list */ + goto exit; + } + + switch (msg_type) { + case PUCAN_MSG_CAN_RX: + err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg); + break; + case PUCAN_MSG_ERROR: + err = pucan_handle_error(priv, (struct pucan_error_msg *)msg); + break; + case PUCAN_MSG_STATUS: + err = pucan_handle_status(priv, + (struct pucan_status_msg *)msg); + break; + case PUCAN_MSG_CACHE_CRITICAL: + err = pucan_handle_cache_critical(priv); + break; + default: + err = 0; + } + + if (err < 0) + return err; + +exit: + return msg_size; +} + +/* handle a list of rx_count messages from rx_msg memory address */ +int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg_list, int msg_count) +{ + void *msg_ptr = msg_list; + int i, msg_size = 0; + + for (i = 0; i < msg_count; i++) { + msg_size = peak_canfd_handle_msg(priv, msg_ptr); + + /* a null packet can be found at the end of a list */ + if (msg_size <= 0) + break; + + msg_ptr += ALIGN(msg_size, 4); + } + + if (msg_size < 0) + return msg_size; + + return i; +} + +/* start the device (set the IP core in NORMAL or LISTEN-ONLY mode) */ +static int peak_canfd_start(struct rtcan_device *rdev, + rtdm_lockctx_t *lock_ctx) +{ + struct peak_canfd_priv *priv = rdev->priv; + int i, err = 0; + + switch (rdev->state) { + case CAN_STATE_BUS_OFF: + case CAN_STATE_STOPPED: + err = pucan_set_reset_mode(priv); + if (err) + break; + + /* set ineeded option: get rx/tx error counters */ + err = pucan_set_options(priv, PUCAN_OPTION_ERROR); + if (err) + break; + + /* accept all standard CAN ID */ + for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++) + pucan_set_std_filter(priv, i, 0xffffffff); + + /* clear device rx/tx error counters */ + err = pucan_clr_err_counters(priv); + if (err) + break; + + /* set resquested mode */ + if (priv->rdev->ctrl_mode & CAN_CTRLMODE_LISTENONLY) + err = pucan_set_listen_only_mode(priv); + else + err = pucan_set_normal_mode(priv); + + rtdm_sem_init(&rdev->tx_sem, 1); + + /* receiving the RB status says when Tx path is ready */ + err = pucan_setup_rx_barrier(priv); + break; + + default: + break; + } + + return err; +} + +/* stop the device (set the IP core in RESET mode) */ +static int peak_canfd_stop(struct rtcan_device *rdev, + rtdm_lockctx_t *lock_ctx) +{ + struct peak_canfd_priv *priv = rdev->priv; + int err = 0; + + switch (rdev->state) { + case CAN_STATE_BUS_OFF: + case CAN_STATE_STOPPED: + break; + + default: + /* go back to RESET mode */ + err = pucan_set_reset_mode(priv); + if (err) { + rtdm_printk(DRV_NAME " CAN%u: reset failed\n", + priv->index+1); + break; + } + + /* abort last Tx (MUST be done in RESET mode only!) */ + pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH); + + rtdm_sem_destroy(&rdev->tx_sem); + break; + } + + return err; +} + +/* RT-Socket-CAN driver interface */ +static int peak_canfd_set_mode(struct rtcan_device *rdev, can_mode_t mode, + rtdm_lockctx_t *lock_ctx) +{ + int err = 0; + + switch (mode) { + case CAN_MODE_STOP: + err = peak_canfd_stop(rdev, lock_ctx); + break; + case CAN_MODE_START: + err = peak_canfd_start(rdev, lock_ctx); + break; + case CAN_MODE_SLEEP: + /* Controller must operate, otherwise go out */ + if (!CAN_STATE_OPERATING(rdev->state)) { + err = -ENETDOWN; + break; + } + if (rdev->state == CAN_STATE_SLEEPING) + break; + + fallthrough; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int peak_canfd_set_bittiming(struct rtcan_device *rdev, + struct can_bittime *pbt, + rtdm_lockctx_t *lock_ctx) +{ + struct peak_canfd_priv *priv = rdev->priv; + struct pucan_timing_slow *cmd; + + /* can't support BTR0BTR1 mode with clock greater than 8 MHz */ + if (pbt->type != CAN_BITTIME_STD) { + rtdm_printk(DRV_NAME + " CAN%u: unsupported bittiming mode %u\n", + priv->index+1, pbt->type); + return -EINVAL; + } + + cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW); + + cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->std.sjw - 1, + priv->rdev->ctrl_mode & + CAN_CTRLMODE_3_SAMPLES); + + cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->std.prop_seg + + pbt->std.phase_seg1 - 1); + cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->std.phase_seg2 - 1); + cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->std.brp - 1)); + + cmd->ewl = 96; /* default */ + + rtdm_printk(DRV_NAME ": nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n", + le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t); + + return pucan_write_cmd(priv); +} + +/* hard transmit callback: write the CAN frame to the device */ +static netdev_tx_t peak_canfd_start_xmit(struct rtcan_device *rdev, + can_frame_t *cf) +{ + struct peak_canfd_priv *priv = rdev->priv; + struct pucan_tx_msg *msg; + u16 msg_size, msg_flags; + int room_left; + const u8 dlc = (cf->can_dlc > CAN_MAX_DLC) ? CAN_MAX_DLC : cf->can_dlc; + + msg_size = ALIGN(sizeof(*msg) + dlc, 4); + msg = priv->alloc_tx_msg(priv, msg_size, &room_left); + + /* should never happen except under bus-off condition and + * (auto-)restart mechanism + */ + if (!msg) { + rtdm_printk(DRV_NAME + " CAN%u: skb lost (No room left in tx buffer)\n", + priv->index+1); + return 0; + } + + msg->size = cpu_to_le16(msg_size); + msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); + msg_flags = 0; + if (cf->can_id & CAN_EFF_FLAG) { + msg_flags |= PUCAN_MSG_EXT_ID; + msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK); + } else { + msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK); + } + + if (cf->can_id & CAN_RTR_FLAG) + msg_flags |= PUCAN_MSG_RTR; + + /* set driver specific bit to differentiate with application + * loopback + */ + if (rdev->ctrl_mode & CAN_CTRLMODE_LOOPBACK) + msg_flags |= PUCAN_MSG_LOOPED_BACK; + + msg->flags = cpu_to_le16(msg_flags); + msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, dlc); + memcpy(msg->d, cf->data, dlc); + + /* write the skb on the interface */ + priv->write_tx_msg(priv, msg); + + /* control senders flow */ + if (room_left > (sizeof(*msg) + CAN_MAX_DLC)) + rtdm_sem_up(&rdev->tx_sem); + + return 0; +} + +/* allocate a rtcan device for channel #index, with enough space to store + * private information. + */ +struct rtcan_device *alloc_peak_canfd_dev(int sizeof_priv, int index) +{ + struct rtcan_device *rdev; + struct peak_canfd_priv *priv; + + /* allocate the candev object */ + rdev = rtcan_dev_alloc(sizeof_priv, 0); + if (!rdev) + return NULL; + + /* RTCAN part initialization */ + strncpy(rdev->name, RTCAN_DEV_NAME, IFNAMSIZ); + rdev->ctrl_name = RTCAN_CTRLR_NAME; + rdev->can_sys_clock = 80*1000*1000; /* default */ + rdev->state = CAN_STATE_STOPPED; + rdev->hard_start_xmit = peak_canfd_start_xmit; + rdev->do_set_mode = peak_canfd_set_mode; + rdev->do_set_bit_time = peak_canfd_set_bittiming; + rdev->bittiming_const = &peak_canfd_nominal_const; + + priv = rdev->priv; + + /* private part initialization */ + priv->rdev = rdev; + priv->index = index; + priv->cmd_len = 0; + priv->bec.txerr = 0; + priv->bec.rxerr = 0; + + return rdev; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h new file mode 100644 index 0000000..f2b911e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CAN driver for PEAK System micro-CAN based adapters. + * + * Copyright (C) 2001-2021 PEAK System-Technik GmbH + * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com> + */ +#ifndef PEAK_CANFD_USER_H +#define PEAK_CANFD_USER_H + +#include <linux/can/dev/peak_canfd.h> + +#define CAN_MAX_DLC 8 +#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) + +struct peak_berr_counter { + __u16 txerr; + __u16 rxerr; +}; + +/* data structure private to each uCAN interface */ +struct peak_canfd_priv { + struct rtcan_device *rdev; /* RTCAN device */ + int index; /* channel index */ + + struct peak_berr_counter bec; + + int cmd_len; + void *cmd_buffer; + int cmd_maxlen; + + int (*pre_cmd)(struct peak_canfd_priv *priv); + int (*write_cmd)(struct peak_canfd_priv *priv); + int (*post_cmd)(struct peak_canfd_priv *priv); + + int (*enable_tx_path)(struct peak_canfd_priv *priv); + void *(*alloc_tx_msg)(struct peak_canfd_priv *priv, u16 msg_size, + int *room_left); + int (*write_tx_msg)(struct peak_canfd_priv *priv, + struct pucan_tx_msg *msg); +}; + +struct rtcan_device *alloc_peak_canfd_dev(int sizeof_priv, int index); +void rtcan_peak_pciefd_remove_proc(struct rtcan_device *rdev); +int rtcan_peak_pciefd_create_proc(struct rtcan_device *rdev); + +int peak_canfd_handle_msg(struct peak_canfd_priv *priv, + struct pucan_rx_msg *msg); +int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, + struct pucan_rx_msg *rx_msg, int rx_count); +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c new file mode 100644 index 0000000..921182f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c @@ -0,0 +1,1001 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CAN driver PCI interface. + * + * Copyright (C) 2001-2021 PEAK System-Technik GmbH + * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/io.h> + +#include <rtdm/driver.h> + +#include <rtdm/can.h> + +#include "rtcan_dev.h" +#include "rtcan_raw.h" +#include "rtcan_peak_canfd_user.h" + +#ifdef CONFIG_PCI_MSI +#define PCIEFD_USES_MSI +#endif + +#ifndef struct_size +#define struct_size(p, member, n) ((n)*sizeof(*(p)->member) + \ + sizeof(*(p))) +#endif + +#define DRV_NAME "xeno_peak_pciefd" + +static char *pciefd_board_name = "PEAK-PCIe FD"; + +MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); +MODULE_DESCRIPTION("RTCAN driver for PEAK PCAN PCIe/M.2 FD family cards"); +MODULE_LICENSE("GPL v2"); + +#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ +#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ +#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */ +#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */ +#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */ +#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */ +#define PCAN_M2_ID 0x001a /* for M2 slot cards */ + +/* supported device ids. */ +static const struct pci_device_id peak_pciefd_tbl[] = { + {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl); + +/* PEAK PCIe board access description */ +#define PCIEFD_BAR0_SIZE (64 * 1024) +#define PCIEFD_RX_DMA_SIZE (4 * 1024) +#define PCIEFD_TX_DMA_SIZE (4 * 1024) + +#define PCIEFD_TX_PAGE_SIZE (2 * 1024) + +/* System Control Registers */ +#define PCIEFD_REG_SYS_CTL_SET 0x0000 /* set bits */ +#define PCIEFD_REG_SYS_CTL_CLR 0x0004 /* clear bits */ + +/* Version info registers */ +#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ +#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ + +#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ + ((u32)(y) << 16) | \ + ((u32)(z) << 8)) + +/* System Control Registers Bits */ +#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ +#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ + +/* CAN-FD channel addresses */ +#define PCIEFD_CANX_OFF(c) (((c) + 1) * 0x1000) + +#define PCIEFD_ECHO_SKB_MAX PCANFD_ECHO_SKB_DEF + +/* CAN-FD channel registers */ +#define PCIEFD_REG_CAN_MISC 0x0000 /* Misc. control */ +#define PCIEFD_REG_CAN_CLK_SEL 0x0008 /* Clock selector */ +#define PCIEFD_REG_CAN_CMD_PORT_L 0x0010 /* 64-bits command port */ +#define PCIEFD_REG_CAN_CMD_PORT_H 0x0014 +#define PCIEFD_REG_CAN_TX_REQ_ACC 0x0020 /* Tx request accumulator */ +#define PCIEFD_REG_CAN_TX_CTL_SET 0x0030 /* Tx control set register */ +#define PCIEFD_REG_CAN_TX_CTL_CLR 0x0038 /* Tx control clear register */ +#define PCIEFD_REG_CAN_TX_DMA_ADDR_L 0x0040 /* 64-bits addr for Tx DMA */ +#define PCIEFD_REG_CAN_TX_DMA_ADDR_H 0x0044 +#define PCIEFD_REG_CAN_RX_CTL_SET 0x0050 /* Rx control set register */ +#define PCIEFD_REG_CAN_RX_CTL_CLR 0x0058 /* Rx control clear register */ +#define PCIEFD_REG_CAN_RX_CTL_WRT 0x0060 /* Rx control write register */ +#define PCIEFD_REG_CAN_RX_CTL_ACK 0x0068 /* Rx control ACK register */ +#define PCIEFD_REG_CAN_RX_DMA_ADDR_L 0x0070 /* 64-bits addr for Rx DMA */ +#define PCIEFD_REG_CAN_RX_DMA_ADDR_H 0x0074 + +/* CAN-FD channel misc register bits */ +#define CANFD_MISC_TS_RST 0x00000001 /* timestamp cnt rst */ + +/* CAN-FD channel Clock SELector Source & DIVider */ +#define CANFD_CLK_SEL_DIV_MASK 0x00000007 +#define CANFD_CLK_SEL_DIV_60MHZ 0x00000000 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_40MHZ 0x00000001 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_30MHZ 0x00000002 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_24MHZ 0x00000003 /* SRC=240MHz only */ +#define CANFD_CLK_SEL_DIV_20MHZ 0x00000004 /* SRC=240MHz only */ + +#define CANFD_CLK_SEL_SRC_MASK 0x00000008 /* 0=80MHz, 1=240MHz */ +#define CANFD_CLK_SEL_SRC_240MHZ 0x00000008 +#define CANFD_CLK_SEL_SRC_80MHZ (~CANFD_CLK_SEL_SRC_240MHZ & \ + CANFD_CLK_SEL_SRC_MASK) + +#define CANFD_CLK_SEL_20MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_20MHZ) +#define CANFD_CLK_SEL_24MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_24MHZ) +#define CANFD_CLK_SEL_30MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_30MHZ) +#define CANFD_CLK_SEL_40MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_40MHZ) +#define CANFD_CLK_SEL_60MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ + CANFD_CLK_SEL_DIV_60MHZ) +#define CANFD_CLK_SEL_80MHZ (CANFD_CLK_SEL_SRC_80MHZ) + +/* CAN-FD channel Rx/Tx control register bits */ +#define CANFD_CTL_UNC_BIT 0x00010000 /* Uncached DMA mem */ +#define CANFD_CTL_RST_BIT 0x00020000 /* reset DMA action */ +#define CANFD_CTL_IEN_BIT 0x00040000 /* IRQ enable */ + +/* Rx IRQ Count and Time Limits */ +#define CANFD_CTL_IRQ_CL_DEF 8 /* Rx msg max nb per IRQ in Rx DMA */ +#define CANFD_CTL_IRQ_TL_DEF 5 /* Time before IRQ if < CL (x100 us) */ + +#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD) + +/* Tx anticipation window (link logical address should be aligned on 2K + * boundary) + */ +#define PCIEFD_TX_PAGE_COUNT (PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE) + +#define CANFD_MSG_LNK_TX 0x1001 /* Tx msgs link */ + +/* 32-bit IRQ status fields, heading Rx DMA area */ +static inline int pciefd_irq_tag(u32 irq_status) +{ + return irq_status & 0x0000000f; +} + +static inline int pciefd_irq_rx_cnt(u32 irq_status) +{ + return (irq_status & 0x000007f0) >> 4; +} + +static inline int pciefd_irq_is_lnk(u32 irq_status) +{ + return irq_status & 0x00010000; +} + +/* Rx record */ +struct pciefd_rx_dma { + __le32 irq_status; + __le32 sys_time_low; + __le32 sys_time_high; + struct pucan_rx_msg msg[0]; +} __packed __aligned(4); + +/* Tx Link record */ +struct pciefd_tx_link { + __le16 size; + __le16 type; + __le32 laddr_lo; + __le32 laddr_hi; +} __packed __aligned(4); + +/* Tx page descriptor */ +struct pciefd_page { + void *vbase; /* page virtual address */ + dma_addr_t lbase; /* page logical address */ + u32 offset; + u32 size; +}; + +/* CAN channel object */ +struct pciefd_board; +struct pciefd_can { + struct peak_canfd_priv ucan; /* must be the first member */ + void __iomem *reg_base; /* channel config base addr */ + struct pciefd_board *board; /* reverse link */ + + struct pucan_command pucan_cmd; /* command buffer */ + + dma_addr_t rx_dma_laddr; /* DMA virtual and logical addr */ + void *rx_dma_vaddr; /* for Rx and Tx areas */ + dma_addr_t tx_dma_laddr; + void *tx_dma_vaddr; + + struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT]; + u16 tx_pages_free; /* free Tx pages counter */ + u16 tx_page_index; /* current page used for Tx */ + rtdm_lock_t tx_lock; + u32 irq_status; + u32 irq_tag; /* next irq tag */ + int irq; + + u32 flags; +}; + +/* PEAK-PCIe FD board object */ +struct pciefd_board { + void __iomem *reg_base; + struct pci_dev *pci_dev; + int can_count; + int irq_flags; /* RTDM_IRQTYPE_SHARED or 0 */ + rtdm_lock_t cmd_lock; /* 64-bits cmds must be atomic */ + struct pciefd_can *can[0]; /* array of network devices */ +}; + +#define CANFD_CTL_IRQ_CL_MIN 1 +#define CANFD_CTL_IRQ_CL_MAX 127 /* 7-bit field */ + +#define CANFD_CTL_IRQ_TL_MIN 1 +#define CANFD_CTL_IRQ_TL_MAX 15 /* 4-bit field */ + +static uint irqcl = CANFD_CTL_IRQ_CL_DEF; +module_param(irqcl, uint, 0644); +MODULE_PARM_DESC(irqcl, +" PCIe FD IRQ Count Limit (default=" __stringify(CANFD_CTL_IRQ_CL_DEF) ")"); + +static uint irqtl = CANFD_CTL_IRQ_TL_DEF; +module_param(irqtl, uint, 0644); +MODULE_PARM_DESC(irqtl, +" PCIe FD IRQ Time Limit (default=" __stringify(CANFD_CTL_IRQ_TL_DEF) ")"); + +#ifdef PCIEFD_USES_MSI + +#ifdef CONFIG_XENO_OPT_SHIRQ +/* default behaviour: run as mainline driver in INTx mode */ +#define PCIEFD_USEMSI_DEFAULT 0 +#else +/* default behaviour: run in MSI mode (one IRQ per channel) */ +#define PCIEFD_USEMSI_DEFAULT 1 +#endif + +static uint usemsi = PCIEFD_USEMSI_DEFAULT; +module_param(usemsi, uint, 0644); +MODULE_PARM_DESC(usemsi, +" 0=INTA; 1=MSI (def=" __stringify(PCIEFD_USEMSI_DEFAULT) ")"); +#endif + +/* read a 32 bit value from a SYS block register */ +static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg) +{ + return readl(priv->reg_base + reg); +} + +/* write a 32 bit value into a SYS block register */ +static inline void pciefd_sys_writereg(const struct pciefd_board *priv, + u32 val, u16 reg) +{ + writel(val, priv->reg_base + reg); +} + +/* read a 32 bits value from CAN-FD block register */ +static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg) +{ + return readl(priv->reg_base + reg); +} + +/* write a 32 bits value into a CAN-FD block register */ +static inline void pciefd_can_writereg(const struct pciefd_can *priv, + u32 val, u16 reg) +{ + writel(val, priv->reg_base + reg); +} + +/* give a channel logical Rx DMA address to the board */ +static void pciefd_can_setup_rx_dma(struct pciefd_can *priv) +{ +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32); +#else + const u32 dma_addr_h = 0; +#endif + + /* (DMA must be reset for Rx) */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_RX_CTL_SET); + + /* write the logical address of the Rx DMA area for this channel */ + pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr, + PCIEFD_REG_CAN_RX_DMA_ADDR_L); + pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H); + + /* also indicates that Rx DMA is cacheable */ + pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); +} + +/* clear channel logical Rx DMA address from the board */ +static void pciefd_can_clear_rx_dma(struct pciefd_can *priv) +{ + /* DMA must be reset for Rx */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_RX_CTL_SET); + + /* clear the logical address of the Rx DMA area for this channel */ + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L); + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H); +} + +/* give a channel logical Tx DMA address to the board */ +static void pciefd_can_setup_tx_dma(struct pciefd_can *priv) +{ +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32); +#else + const u32 dma_addr_h = 0; +#endif + + /* (DMA must be reset for Tx) */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_TX_CTL_SET); + + /* write the logical address of the Tx DMA area for this channel */ + pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr, + PCIEFD_REG_CAN_TX_DMA_ADDR_L); + pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H); + + /* also indicates that Tx DMA is cacheable */ + pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, + PCIEFD_REG_CAN_TX_CTL_CLR); +} + +/* clear channel logical Tx DMA address from the board */ +static void pciefd_can_clear_tx_dma(struct pciefd_can *priv) +{ + /* DMA must be reset for Tx */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_TX_CTL_SET); + + /* clear the logical address of the Tx DMA area for this channel */ + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L); + pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H); +} + +/* acknowledge interrupt to the device */ +static void pciefd_can_ack_rx_dma(struct pciefd_can *priv) +{ + /* read value of current IRQ tag and inc it for next one */ + priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr); + priv->irq_tag++; + priv->irq_tag &= 0xf; + + /* write the next IRQ tag for this CAN */ + pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK); +} + +/* IRQ handler */ +static int pciefd_irq_handler(rtdm_irq_t *irq_handle) +{ + struct pciefd_can *priv = rtdm_irq_get_arg(irq_handle, void); + struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr; + + /* INTA mode only, dummy read to sync with PCIe transaction */ + if (!pci_dev_msi_enabled(priv->board->pci_dev)) + (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); + + /* read IRQ status from the first 32-bit of the Rx DMA area */ + priv->irq_status = le32_to_cpu(rx_dma->irq_status); + + /* check if this (shared) IRQ is for this CAN */ + if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag) + return RTDM_IRQ_NONE; + + /* handle rx messages (if any) */ + peak_canfd_handle_msgs_list(&priv->ucan, + rx_dma->msg, + pciefd_irq_rx_cnt(priv->irq_status)); + + /* handle tx link interrupt (if any) */ + if (pciefd_irq_is_lnk(priv->irq_status)) { + rtdm_lock_get(&priv->tx_lock); + priv->tx_pages_free++; + rtdm_lock_put(&priv->tx_lock); + + /* Wake up a sender */ + rtdm_sem_up(&priv->ucan.rdev->tx_sem); + } + + /* re-enable Rx DMA transfer for this CAN */ + pciefd_can_ack_rx_dma(priv); + + return RTDM_IRQ_HANDLED; +} + +/* initialize structures used for sending CAN frames */ +static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + int i; + + /* initialize the Tx pages descriptors */ + priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1; + priv->tx_page_index = 0; + + priv->tx_pages[0].vbase = priv->tx_dma_vaddr; + priv->tx_pages[0].lbase = priv->tx_dma_laddr; + + for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) { + priv->tx_pages[i].offset = 0; + priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE - + sizeof(struct pciefd_tx_link); + if (i) { + priv->tx_pages[i].vbase = + priv->tx_pages[i - 1].vbase + + PCIEFD_TX_PAGE_SIZE; + priv->tx_pages[i].lbase = + priv->tx_pages[i - 1].lbase + + PCIEFD_TX_PAGE_SIZE; + } + } + + /* setup Tx DMA addresses into IP core */ + pciefd_can_setup_tx_dma(priv); + + /* start (TX_RST=0) Tx Path */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_TX_CTL_CLR); + + return 0; +} + +/* board specific command pre-processing */ +static int pciefd_pre_cmd(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); + + /* pre-process command */ + switch (cmd) { + case PUCAN_CMD_NORMAL_MODE: + case PUCAN_CMD_LISTEN_ONLY_MODE: + + if (ucan->rdev->state == CAN_STATE_BUS_OFF) + break; + + /* setup Rx DMA address */ + pciefd_can_setup_rx_dma(priv); + + /* setup max count of msgs per IRQ */ + pciefd_can_writereg(priv, (irqtl << 8) | irqcl, + PCIEFD_REG_CAN_RX_CTL_WRT); + + /* clear DMA RST for Rx (Rx start) */ + pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); + + /* reset timestamps */ + pciefd_can_writereg(priv, !CANFD_MISC_TS_RST, + PCIEFD_REG_CAN_MISC); + + /* do an initial ACK */ + pciefd_can_ack_rx_dma(priv); + + /* enable IRQ for this CAN after having set next irq_tag */ + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, + PCIEFD_REG_CAN_RX_CTL_SET); + + /* Tx path will be setup as soon as RX_BARRIER is received */ + break; + default: + break; + } + + return 0; +} + +/* write a command */ +static int pciefd_write_cmd(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + unsigned long flags; + + /* 64-bit command must be atomic */ + rtdm_lock_get_irqsave(&priv->board->cmd_lock, flags); + + pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer, + PCIEFD_REG_CAN_CMD_PORT_L); + pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4), + PCIEFD_REG_CAN_CMD_PORT_H); + + rtdm_lock_put_irqrestore(&priv->board->cmd_lock, flags); + + return 0; +} + +/* board specific command post-processing */ +static int pciefd_post_cmd(struct peak_canfd_priv *ucan) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); + + switch (cmd) { + case PUCAN_CMD_RESET_MODE: + + if (ucan->rdev->state == CAN_STATE_STOPPED) + break; + + /* controller now in reset mode: disable IRQ for this CAN */ + pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, + PCIEFD_REG_CAN_RX_CTL_CLR); + + /* stop and reset DMA addresses in Tx/Rx engines */ + pciefd_can_clear_tx_dma(priv); + pciefd_can_clear_rx_dma(priv); + + /* wait for above commands to complete (read cycle) */ + (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); + + ucan->rdev->state = CAN_STATE_STOPPED; + + break; + } + + return 0; +} + +/* allocate enough room into the Tx dma area to store a CAN message */ +static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size, + int *room_left) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; + unsigned long flags; + void *msg; + + rtdm_lock_get_irqsave(&priv->tx_lock, flags); + + if (page->offset + msg_size > page->size) { + struct pciefd_tx_link *lk; + + /* not enough space in this page: try another one */ + if (!priv->tx_pages_free) { + rtdm_lock_put_irqrestore(&priv->tx_lock, flags); + + /* Tx overflow */ + return NULL; + } + + priv->tx_pages_free--; + + /* keep address of the very last free slot of current page */ + lk = page->vbase + page->offset; + + /* next, move on a new free page */ + priv->tx_page_index = (priv->tx_page_index + 1) % + PCIEFD_TX_PAGE_COUNT; + page = priv->tx_pages + priv->tx_page_index; + + /* put link record to this new page at the end of prev one */ + lk->size = cpu_to_le16(sizeof(*lk)); + lk->type = cpu_to_le16(CANFD_MSG_LNK_TX); + lk->laddr_lo = cpu_to_le32(page->lbase); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + lk->laddr_hi = cpu_to_le32(page->lbase >> 32); +#else + lk->laddr_hi = 0; +#endif + /* next msgs will be put from the begininng of this new page */ + page->offset = 0; + } + + *room_left = priv->tx_pages_free * page->size; + + rtdm_lock_put_irqrestore(&priv->tx_lock, flags); + + msg = page->vbase + page->offset; + + /* give back room left in the tx ring */ + *room_left += page->size - (page->offset + msg_size); + + return msg; +} + +/* tell the IP core tha a frame has been written into the Tx DMA area */ +static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan, + struct pucan_tx_msg *msg) +{ + struct pciefd_can *priv = (struct pciefd_can *)ucan; + struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; + + /* this slot is now reserved for writing the frame */ + page->offset += le16_to_cpu(msg->size); + + /* tell the board a frame has been written in Tx DMA area */ + pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC); + + return 0; +} + +/* probe for CAN channel number #pciefd_board->can_count */ +static int pciefd_can_probe(struct pciefd_board *pciefd) +{ + struct rtcan_device *rdev; + struct pciefd_can *priv; + u32 clk; + int err; + + /* allocate the RTCAN object */ + rdev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count); + if (!rdev) { + dev_err(&pciefd->pci_dev->dev, + "failed to alloc RTCAN device object\n"); + goto failure; + } + + /* fill-in board specific parts */ + rdev->board_name = pciefd_board_name; + + /* fill-in rtcan private object */ + priv = rdev->priv; + + /* setup PCIe-FD own callbacks */ + priv->ucan.pre_cmd = pciefd_pre_cmd; + priv->ucan.write_cmd = pciefd_write_cmd; + priv->ucan.post_cmd = pciefd_post_cmd; + priv->ucan.enable_tx_path = pciefd_enable_tx_path; + priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg; + priv->ucan.write_tx_msg = pciefd_write_tx_msg; + + /* setup PCIe-FD own command buffer */ + priv->ucan.cmd_buffer = &priv->pucan_cmd; + priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd); + + priv->board = pciefd; + + /* CAN config regs block address */ + priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index); + rdev->base_addr = (unsigned long)priv->reg_base; + + /* allocate non-cacheable DMA'able 4KB memory area for Rx */ + priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, + PCIEFD_RX_DMA_SIZE, + &priv->rx_dma_laddr, + GFP_KERNEL); + if (!priv->rx_dma_vaddr) { + dev_err(&pciefd->pci_dev->dev, + "Rx dmam_alloc_coherent(%u) failure\n", + PCIEFD_RX_DMA_SIZE); + goto err_free_rtdev; + } + + /* allocate non-cacheable DMA'able 4KB memory area for Tx */ + priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, + PCIEFD_TX_DMA_SIZE, + &priv->tx_dma_laddr, + GFP_KERNEL); + if (!priv->tx_dma_vaddr) { + dev_err(&pciefd->pci_dev->dev, + "Tx dmam_alloc_coherent(%u) failure\n", + PCIEFD_TX_DMA_SIZE); + goto err_free_rtdev; + } + + /* CAN clock in RST mode */ + pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC); + + /* read current clock value */ + clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL); + switch (clk) { + case CANFD_CLK_SEL_20MHZ: + priv->ucan.rdev->can_sys_clock = 20 * 1000 * 1000; + break; + case CANFD_CLK_SEL_24MHZ: + priv->ucan.rdev->can_sys_clock = 24 * 1000 * 1000; + break; + case CANFD_CLK_SEL_30MHZ: + priv->ucan.rdev->can_sys_clock = 30 * 1000 * 1000; + break; + case CANFD_CLK_SEL_40MHZ: + priv->ucan.rdev->can_sys_clock = 40 * 1000 * 1000; + break; + case CANFD_CLK_SEL_60MHZ: + priv->ucan.rdev->can_sys_clock = 60 * 1000 * 1000; + break; + default: + pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, + PCIEFD_REG_CAN_CLK_SEL); + + fallthrough; + case CANFD_CLK_SEL_80MHZ: + priv->ucan.rdev->can_sys_clock = 80 * 1000 * 1000; + break; + } + +#ifdef PCIEFD_USES_MSI + priv->irq = (pciefd->irq_flags & RTDM_IRQTYPE_SHARED) ? + pciefd->pci_dev->irq : + pci_irq_vector(pciefd->pci_dev, priv->ucan.index); +#else + priv->irq = pciefd->pci_dev->irq; +#endif + + /* setup irq handler */ + err = rtdm_irq_request(&rdev->irq_handle, + priv->irq, + pciefd_irq_handler, + pciefd->irq_flags, + DRV_NAME, + priv); + if (err) { + dev_err(&pciefd->pci_dev->dev, + "rtdm_irq_request(IRQ%u) failure err %d\n", + priv->irq, err); + goto err_free_rtdev; + } + + err = rtcan_dev_register(rdev); + if (err) { + dev_err(&pciefd->pci_dev->dev, + "couldn't register RTCAN device: %d\n", err); + goto err_free_irq; + } + + rtdm_lock_init(&priv->tx_lock); + + /* save the object address in the board structure */ + pciefd->can[pciefd->can_count] = priv; + + dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", + rdev->name, priv->reg_base, priv->irq); + + return 0; + +err_free_irq: + rtdm_irq_free(&rdev->irq_handle); + +err_free_rtdev: + rtcan_dev_free(rdev); + +failure: + return -ENOMEM; +} + +/* wakeup all RT tasks that are blocked on read */ +static void pciefd_can_unlock_recv_tasks(struct rtcan_device *rdev) +{ + struct rtcan_recv *recv_listener = rdev->recv_list; + + while (recv_listener) { + struct rtcan_socket *sock = recv_listener->sock; + + /* wakeup any rx task */ + rtdm_sem_destroy(&sock->recv_sem); + + recv_listener = recv_listener->next; + } +} + +/* remove a CAN-FD channel by releasing all of its resources */ +static void pciefd_can_remove(struct pciefd_can *priv) +{ + struct rtcan_device *rdev = priv->ucan.rdev; + + /* unlock any tasks that wait for read on a socket bound to this CAN */ + pciefd_can_unlock_recv_tasks(rdev); + + /* in case the driver is removed when the interface is UP + * (device MUST be closed before being unregistered) + */ + rdev->do_set_mode(rdev, CAN_MODE_STOP, NULL); + + rtcan_dev_unregister(rdev); + rtdm_irq_disable(&rdev->irq_handle); + rtdm_irq_free(&rdev->irq_handle); + rtcan_dev_free(rdev); +} + +/* remove all CAN-FD channels by releasing their own resources */ +static void pciefd_can_remove_all(struct pciefd_board *pciefd) +{ + while (pciefd->can_count > 0) + pciefd_can_remove(pciefd->can[--pciefd->can_count]); +} + +/* probe for the entire device */ +static int peak_pciefd_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct pciefd_board *pciefd; + int err, can_count; + u16 sub_sys_id; + u8 hw_ver_major; + u8 hw_ver_minor; + u8 hw_ver_sub; + u32 v2; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto err_disable_pci; + + /* the number of channels depends on sub-system id */ + err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id); + if (err) + goto err_release_regions; + + dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", + pdev->vendor, pdev->device, sub_sys_id); + + if (sub_sys_id >= 0x0012) + can_count = 4; + else if (sub_sys_id >= 0x0010) + can_count = 3; + else if (sub_sys_id >= 0x0004) + can_count = 2; + else + can_count = 1; + + /* allocate board structure object */ + pciefd = devm_kzalloc(&pdev->dev, struct_size(pciefd, can, can_count), + GFP_KERNEL); + if (!pciefd) { + err = -ENOMEM; + goto err_release_regions; + } + + /* initialize the board structure */ + pciefd->pci_dev = pdev; + rtdm_lock_init(&pciefd->cmd_lock); + + /* save the PCI BAR0 virtual address for further system regs access */ + pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE); + if (!pciefd->reg_base) { + dev_err(&pdev->dev, "failed to map PCI resource #0\n"); + err = -ENOMEM; + goto err_release_regions; + } + + /* read the firmware version number */ + v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2); + + hw_ver_major = (v2 & 0x0000f000) >> 12; + hw_ver_minor = (v2 & 0x00000f00) >> 8; + hw_ver_sub = (v2 & 0x000000f0) >> 4; + + dev_info(&pdev->dev, + "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, + hw_ver_major, hw_ver_minor, hw_ver_sub); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + /* DMA logic doesn't handle mix of 32-bit and 64-bit logical addresses + * in fw <= 3.2.x + */ + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < + PCIEFD_FW_VERSION(3, 3, 0)) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + dev_warn(&pdev->dev, + "warning: can't set DMA mask %llxh (err %d)\n", + DMA_BIT_MASK(32), err); + } +#endif + + /* default interrupt mode is: shared INTx */ + pciefd->irq_flags = RTDM_IRQTYPE_SHARED; + +#ifdef PCIEFD_USES_MSI + if (usemsi) { + err = pci_msi_vec_count(pdev); + if (err > 0) { + int msi_maxvec = err; + + err = pci_alloc_irq_vectors_affinity(pdev, can_count, + msi_maxvec, + PCI_IRQ_MSI, + NULL); + dev_info(&pdev->dev, + "MSI[%u..%u] enabling status: %d\n", + can_count, msi_maxvec, err); + + /* if didn't get the requested count of MSI, fall back + * to INTx + */ + if (err >= can_count) + pciefd->irq_flags &= ~RTDM_IRQTYPE_SHARED; + else if (err >= 0) + pci_free_irq_vectors(pdev); + } + } +#endif + + /* stop system clock */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, + PCIEFD_REG_SYS_CTL_CLR); + + pci_set_master(pdev); + + /* create now the corresponding channels objects */ + while (pciefd->can_count < can_count) { + err = pciefd_can_probe(pciefd); + if (err) + goto err_free_canfd; + + pciefd->can_count++; + } + + /* set system timestamps counter in RST mode */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, + PCIEFD_REG_SYS_CTL_SET); + + /* wait a bit (read cycle) */ + (void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1); + + /* free all clocks */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, + PCIEFD_REG_SYS_CTL_CLR); + + /* start system clock */ + pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, + PCIEFD_REG_SYS_CTL_SET); + + /* remember the board structure address in the device user data */ + pci_set_drvdata(pdev, pciefd); + + return 0; + +err_free_canfd: + pciefd_can_remove_all(pciefd); + +#ifdef PCIEFD_USES_MSI + pci_free_irq_vectors(pdev); +#endif + pci_iounmap(pdev, pciefd->reg_base); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pci: + pci_disable_device(pdev); + + /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while + * the probe() function must return a negative errno in case of failure + * (err is unchanged if negative) + */ + return pcibios_err_to_errno(err); +} + +/* free the board structure object, as well as its resources: */ +static void peak_pciefd_remove(struct pci_dev *pdev) +{ + struct pciefd_board *pciefd = pci_get_drvdata(pdev); + + /* release CAN-FD channels resources */ + pciefd_can_remove_all(pciefd); + +#ifdef PCIEFD_USES_MSI + pci_free_irq_vectors(pdev); +#endif + pci_iounmap(pdev, pciefd->reg_base); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver rtcan_peak_pciefd_driver = { + .name = DRV_NAME, + .id_table = peak_pciefd_tbl, + .probe = peak_pciefd_probe, + .remove = peak_pciefd_remove, +}; + +static int __init rtcan_peak_pciefd_init(void) +{ + if (!realtime_core_enabled()) + return 0; + + return pci_register_driver(&rtcan_peak_pciefd_driver); +} + +static void __exit rtcan_peak_pciefd_exit(void) +{ + if (realtime_core_enabled()) + pci_unregister_driver(&rtcan_peak_pciefd_driver); +} + +module_init(rtcan_peak_pciefd_init); +module_exit(rtcan_peak_pciefd_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c new file mode 100644 index 0000000..da64be7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Derived from RTnet project file stack/rtdev.c: + * + * Copyright (C) 1999 Lineo, Inc + * 1999, 2002 David A. Schleef <ds@schleef.org> + * 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * 2003-2005 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/spinlock.h> +#include <linux/if.h> +#include <linux/if_arp.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "rtcan_internal.h" +#include "rtcan_dev.h" + + +static struct rtcan_device *rtcan_devices[RTCAN_MAX_DEVICES]; +static DEFINE_RTDM_LOCK(rtcan_devices_rt_lock); + +DEFINE_SEMAPHORE(rtcan_devices_nrt_lock); + +/* Spinlock for all reception lists and also for some members in + * struct rtcan_socket */ +DEFINE_RTDM_LOCK(rtcan_socket_lock); + +/* Spinlock for all reception lists and also for some members in + * struct rtcan_socket */ +DEFINE_RTDM_LOCK(rtcan_recv_list_lock); + +static inline struct rtcan_device *__rtcan_dev_get_by_name(const char *name) +{ + int i; + struct rtcan_device *dev; + + + for (i = 0; i < RTCAN_MAX_DEVICES; i++) { + dev = rtcan_devices[i]; + if ((dev != NULL) && (strncmp(dev->name, name, IFNAMSIZ) == 0)) + return dev; + } + return NULL; +} + + +struct rtcan_device *rtcan_dev_get_by_name(const char *name) +{ + struct rtcan_device *dev; +#ifdef RTCAN_USE_REFCOUNT + rtdm_lockctx_t context; +#endif + + +#ifdef RTCAN_USE_REFCOUNT + rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context); +#endif + + dev = __rtcan_dev_get_by_name(name); + +#ifdef RTCAN_USE_REFCOUNT + if (dev != NULL) + atomic_inc(&dev->refcount); + rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context); +#endif + + return dev; +} + + +static inline struct rtcan_device *__rtcan_dev_get_by_index(int ifindex) +{ + return rtcan_devices[ifindex - 1]; +} + + +struct rtcan_device *rtcan_dev_get_by_index(int ifindex) +{ + struct rtcan_device *dev; +#ifdef RTCAN_USE_REFCOUNT + rtdm_lockctx_t context; +#endif + + + if ((ifindex <= 0) || (ifindex > RTCAN_MAX_DEVICES)) + return NULL; + +#ifdef RTCAN_USE_REFCOUNT + rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context); +#endif + + dev = __rtcan_dev_get_by_index(ifindex); + +#ifdef RTCAN_USE_REFCOUNT + if (dev != NULL) + atomic_inc(&dev->refcount); + rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context); +#endif + + return dev; +} + + +void rtcan_dev_alloc_name(struct rtcan_device *dev, const char *mask) +{ + char buf[IFNAMSIZ]; + struct rtcan_device *tmp; + int i; + + + for (i = 0; i < RTCAN_MAX_DEVICES; i++) { + ksformat(buf, IFNAMSIZ, mask, i); + if ((tmp = rtcan_dev_get_by_name(buf)) == NULL) { + strncpy(dev->name, buf, IFNAMSIZ); + break; + } +#ifdef RTCAN_USE_REFCOUNT + else + rtcan_dev_dereference(tmp); +#endif + } +} + + +struct rtcan_device *rtcan_dev_alloc(int sizeof_priv, int sizeof_board_priv) +{ + struct rtcan_device *dev; + struct rtcan_recv *recv_list_elem; + int alloc_size; + int j; + + + alloc_size = sizeof(*dev) + sizeof_priv + sizeof_board_priv; + + dev = (struct rtcan_device *)kmalloc(alloc_size, GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "rtcan: cannot allocate rtcan device\n"); + return NULL; + } + + memset(dev, 0, alloc_size); + + sema_init(&dev->nrt_lock, 1); + + rtdm_lock_init(&dev->device_lock); + + /* Init TX Semaphore, will be destroyed forthwith + * when setting stop mode */ + rtdm_sem_init(&dev->tx_sem, 0); +#ifdef RTCAN_USE_REFCOUNT + atomic_set(&dev->refcount, 0); +#endif + + /* Initialize receive list */ + dev->empty_list = recv_list_elem = dev->receivers; + for (j = 0; j < RTCAN_MAX_RECEIVERS - 1; j++, recv_list_elem++) + recv_list_elem->next = recv_list_elem + 1; + recv_list_elem->next = NULL; + dev->free_entries = RTCAN_MAX_RECEIVERS; + + if (sizeof_priv) + dev->priv = (void *)((unsigned long)dev + sizeof(*dev)); + if (sizeof_board_priv) + dev->board_priv = (void *)((unsigned long)dev + sizeof(*dev) + sizeof_priv); + + return dev; +} + +void rtcan_dev_free (struct rtcan_device *dev) +{ + if (dev != NULL) { + rtdm_sem_destroy(&dev->tx_sem); + kfree(dev); + } +} + + +static inline int __rtcan_dev_new_index(void) +{ + int i; + + + for (i = 0; i < RTCAN_MAX_DEVICES; i++) + if (rtcan_devices[i] == NULL) + return i+1; + + return -ENOMEM; +} + + +int rtcan_dev_register(struct rtcan_device *dev) +{ + rtdm_lockctx_t context; + int ret; + + down(&rtcan_devices_nrt_lock); + + if ((ret = __rtcan_dev_new_index()) < 0) { + up(&rtcan_devices_nrt_lock); + return ret; + } + dev->ifindex = ret; + + if (strchr(dev->name,'%') != NULL) + rtcan_dev_alloc_name(dev, dev->name); + + if (__rtcan_dev_get_by_name(dev->name) != NULL) { + up(&rtcan_devices_nrt_lock); + return -EEXIST; + } + + rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context); + + rtcan_devices[dev->ifindex - 1] = dev; + + rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context); + rtcan_dev_create_proc(dev); + + up(&rtcan_devices_nrt_lock); + + printk("rtcan: registered %s\n", dev->name); + + return 0; +} + + +int rtcan_dev_unregister(struct rtcan_device *dev) +{ + rtdm_lockctx_t context; + + + RTCAN_ASSERT(dev->ifindex != 0, + printk("RTCAN: device %s/%p was not registered\n", + dev->name, dev); return -ENODEV;); + + /* If device is running, close it first. */ + if (CAN_STATE_OPERATING(dev->state)) + return -EBUSY; + + down(&rtcan_devices_nrt_lock); + + rtcan_dev_remove_proc(dev); + + rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context); + +#ifdef RTCAN_USE_REFCOUNT + while (atomic_read(&dev->refcount) > 0) { + rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context); + up(&rtcan_devices_nrt_lock); + + RTCAN_DBG("RTCAN: unregistering %s deferred (refcount = %d)\n", + dev->name, atomic_read(&dev->refcount)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1*HZ); /* wait a second */ + + down(&rtcan_devices_nrt_lock); + rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context); + } +#endif + rtcan_devices[dev->ifindex - 1] = NULL; + + rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context); + up(&rtcan_devices_nrt_lock); + +#ifdef RTCAN_USE_REFCOUNT + RTCAN_ASSERT(atomic_read(&dev->refcount) == 0, + printk("RTCAN: dev reference counter < 0!\n");); +#endif + + printk("RTCAN: unregistered %s\n", dev->name); + + return 0; +} + + +EXPORT_SYMBOL_GPL(rtcan_socket_lock); +EXPORT_SYMBOL_GPL(rtcan_recv_list_lock); + +EXPORT_SYMBOL_GPL(rtcan_dev_free); + +EXPORT_SYMBOL_GPL(rtcan_dev_alloc); +EXPORT_SYMBOL_GPL(rtcan_dev_alloc_name); + +EXPORT_SYMBOL_GPL(rtcan_dev_register); +EXPORT_SYMBOL_GPL(rtcan_dev_unregister); + +EXPORT_SYMBOL_GPL(rtcan_dev_get_by_name); +EXPORT_SYMBOL_GPL(rtcan_dev_get_by_index); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h new file mode 100644 index 0000000..3642e92 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Derived from RTnet project file stack/include/rtdev.h: + * + * Copyright (C) 1999 Lineo, Inc + * 1999, 2002 David A. Schleef <ds@schleef.org> + * 2003-2005 Jan Kiszka <jan.kiszka@web.de> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __RTCAN_DEV_H_ +#define __RTCAN_DEV_H_ + + +#ifdef __KERNEL__ + +#include <asm/atomic.h> +#include <linux/netdevice.h> +#include <linux/semaphore.h> + +#include "rtcan_list.h" + + +/* Number of MSCAN devices the driver can handle */ +#define RTCAN_MAX_DEVICES CONFIG_XENO_DRIVERS_CAN_MAX_DEVICES + +/* Maximum number of single filters per controller which can be registered + * for reception at the same time using Bind */ +#define RTCAN_MAX_RECEIVERS CONFIG_XENO_DRIVERS_CAN_MAX_RECEIVERS + +/* Suppress handling of refcount if module support is not enabled + * or modules cannot be unloaded */ + +#if defined(CONFIG_MODULES) && defined(CONFIG_MODULE_UNLOAD) +#define RTCAN_USE_REFCOUNT +#endif + +/* + * CAN harware-dependent bit-timing constant + * + * Used for calculating and checking bit-timing parameters + */ +struct can_bittiming_const { + char name[16]; /* Name of the CAN controller hardware */ + __u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */ + __u32 tseg1_max; + __u32 tseg2_min; /* Time segement 2 = phase_seg2 */ + __u32 tseg2_max; + __u32 sjw_max; /* Synchronisation jump width */ + __u32 brp_min; /* Bit-rate prescaler */ + __u32 brp_max; + __u32 brp_inc; +}; + +struct rtcan_device { + unsigned int version; + + char name[IFNAMSIZ]; + + char *ctrl_name; /* Name of CAN controller */ + char *board_name;/* Name of CAN board */ + + unsigned long base_addr; /* device I/O address */ + rtdm_irq_t irq_handle; /* RTDM IRQ handle */ + + int ifindex; +#ifdef RTCAN_USE_REFCOUNT + atomic_t refcount; +#endif + + void *priv; /* pointer to chip private data */ + + void *board_priv;/* pointer to board private data*/ + + struct semaphore nrt_lock; /* non-real-time locking */ + + /* Spinlock for all devices (but not for all attributes) and also for HW + * access to all CAN controllers + */ + rtdm_lock_t device_lock; + + /* Acts as a mutex allowing only one sender to write to the MSCAN + * simultaneously. Created when the controller goes into operating mode, + * destroyed if it goes into reset mode. */ + rtdm_sem_t tx_sem; + + /* Baudrate of this device. Protected by device_lock in all device + * structures. */ + unsigned int can_sys_clock; + + + /* Baudrate of this device. Protected by device_lock in all device + * structures. */ + can_baudrate_t baudrate; + + struct can_bittime bit_time; + const struct can_bittiming_const *bittiming_const; + + /* State which the controller is in. Protected by device_lock in all + * device structures. */ + can_state_t state; + + /* State which the controller was before sleeping. Protected by + * device_lock in all device structures. */ + can_state_t state_before_sleep; + + /* Controller specific settings. Protected by device_lock in all + * device structures. */ + can_ctrlmode_t ctrl_mode; + + /* Device operations */ + int (*hard_start_xmit)(struct rtcan_device *dev, + struct can_frame *frame); + int (*do_set_mode)(struct rtcan_device *dev, + can_mode_t mode, + rtdm_lockctx_t *lock_ctx); + can_state_t (*do_get_state)(struct rtcan_device *dev); + int (*do_set_bit_time)(struct rtcan_device *dev, + struct can_bittime *bit_time, + rtdm_lockctx_t *lock_ctx); +#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR + void (*do_enable_bus_err)(struct rtcan_device *dev); +#endif + + /* Reception list head. This list contains all filters which have been + * registered via a bind call. */ + struct rtcan_recv *recv_list; + + /* Empty list head. This list contains all empty entries not needed + * by the reception list and therefore is disjunctive with it. */ + struct rtcan_recv *empty_list; + + /* Preallocated array for the list entries. To increase cache + * locality all list elements are kept in this array. */ + struct rtcan_recv receivers[RTCAN_MAX_RECEIVERS]; + + /* Indicates the length of the empty list */ + int free_entries; + + /* A few statistics counters */ + unsigned int tx_count; + unsigned int rx_count; + unsigned int err_count; + +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_root; +#endif +#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK + struct rtcan_skb tx_skb; + struct rtcan_socket *tx_socket; +#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */ +}; + + +extern struct semaphore rtcan_devices_nrt_lock; + + +void rtcan_dev_free(struct rtcan_device *dev); + +int rtcan_dev_register(struct rtcan_device *dev); +int rtcan_dev_unregister(struct rtcan_device *dev); + +struct rtcan_device *rtcan_dev_alloc(int sizeof_priv, int sizeof_board_priv); +void rtcan_dev_alloc_name (struct rtcan_device *dev, const char *name_mask); + +struct rtcan_device *rtcan_dev_get_by_name(const char *if_name); +struct rtcan_device *rtcan_dev_get_by_index(int ifindex); + +#ifdef RTCAN_USE_REFCOUNT +#define rtcan_dev_reference(dev) atomic_inc(&(dev)->refcount) +#define rtcan_dev_dereference(dev) atomic_dec(&(dev)->refcount) +#else +#define rtcan_dev_reference(dev) do {} while(0) +#define rtcan_dev_dereference(dev) do {} while(0) +#endif + +#ifdef CONFIG_PROC_FS +int rtcan_dev_create_proc(struct rtcan_device* dev); +void rtcan_dev_remove_proc(struct rtcan_device* dev); +#else /* !CONFIG_PROC_FS */ +static inline int rtcan_dev_create_proc(struct rtcan_device* dev) +{ + return 0; +} +static inline void rtcan_dev_remove_proc(struct rtcan_device* dev) { } +#endif /* !CONFIG_PROC_FS */ + +#endif /* __KERNEL__ */ + +#endif /* __RTCAN_DEV_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c new file mode 100644 index 0000000..3348e8c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c @@ -0,0 +1,1538 @@ +/* + * RTDM-based FLEXCAN CAN controller driver + * + * Rebased on linux 4.14.58 flexcan driver: + * Copyright (c) 2018 Philippe Gerum <rpm@xenomai.org> + * + * Original port to RTDM: + * Copyright (c) 2012 Wolfgang Grandegger <wg@denx.de> + * + * Copyright (c) 2005-2006 Varma Electronics Oy + * Copyright (c) 2009 Sascha Hauer, Pengutronix + * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2014 David Jander, Protonic Holland + * + * Based on code originally by Andrey Volkov <avolkov@varma-el.com> + * + * LICENCE: + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <rtdm/driver.h> +#include <rtdm/can.h> +#include "rtcan_dev.h" +#include "rtcan_raw.h" +#include "rtcan_internal.h" +#include <asm/unaligned.h> + +#define DRV_NAME "flexcan" +#define DEV_NAME "rtcan%d" + +#define CAN_MAX_DLC 8 +#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) + +/* 8 for RX fifo and 2 error handling */ +#define FLEXCAN_NAPI_WEIGHT (8 + 2) + +/* FLEXCAN module configuration register (CANMCR) bits */ +#define FLEXCAN_MCR_MDIS BIT(31) +#define FLEXCAN_MCR_FRZ BIT(30) +#define FLEXCAN_MCR_FEN BIT(29) +#define FLEXCAN_MCR_HALT BIT(28) +#define FLEXCAN_MCR_NOT_RDY BIT(27) +#define FLEXCAN_MCR_WAK_MSK BIT(26) +#define FLEXCAN_MCR_SOFTRST BIT(25) +#define FLEXCAN_MCR_FRZ_ACK BIT(24) +#define FLEXCAN_MCR_SUPV BIT(23) +#define FLEXCAN_MCR_SLF_WAK BIT(22) +#define FLEXCAN_MCR_WRN_EN BIT(21) +#define FLEXCAN_MCR_LPM_ACK BIT(20) +#define FLEXCAN_MCR_WAK_SRC BIT(19) +#define FLEXCAN_MCR_DOZE BIT(18) +#define FLEXCAN_MCR_SRX_DIS BIT(17) +#define FLEXCAN_MCR_IRMQ BIT(16) +#define FLEXCAN_MCR_LPRIO_EN BIT(13) +#define FLEXCAN_MCR_AEN BIT(12) +/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */ +#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) +#define FLEXCAN_MCR_IDAM_A (0x0 << 8) +#define FLEXCAN_MCR_IDAM_B (0x1 << 8) +#define FLEXCAN_MCR_IDAM_C (0x2 << 8) +#define FLEXCAN_MCR_IDAM_D (0x3 << 8) + +/* FLEXCAN control register (CANCTRL) bits */ +#define FLEXCAN_CTRL_PRESDIV(x) (((x) & 0xff) << 24) +#define FLEXCAN_CTRL_RJW(x) (((x) & 0x03) << 22) +#define FLEXCAN_CTRL_PSEG1(x) (((x) & 0x07) << 19) +#define FLEXCAN_CTRL_PSEG2(x) (((x) & 0x07) << 16) +#define FLEXCAN_CTRL_BOFF_MSK BIT(15) +#define FLEXCAN_CTRL_ERR_MSK BIT(14) +#define FLEXCAN_CTRL_CLK_SRC BIT(13) +#define FLEXCAN_CTRL_LPB BIT(12) +#define FLEXCAN_CTRL_TWRN_MSK BIT(11) +#define FLEXCAN_CTRL_RWRN_MSK BIT(10) +#define FLEXCAN_CTRL_SMP BIT(7) +#define FLEXCAN_CTRL_BOFF_REC BIT(6) +#define FLEXCAN_CTRL_TSYN BIT(5) +#define FLEXCAN_CTRL_LBUF BIT(4) +#define FLEXCAN_CTRL_LOM BIT(3) +#define FLEXCAN_CTRL_PROPSEG(x) ((x) & 0x07) +#define FLEXCAN_CTRL_ERR_BUS (FLEXCAN_CTRL_ERR_MSK) +#define FLEXCAN_CTRL_ERR_STATE \ + (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \ + FLEXCAN_CTRL_BOFF_MSK) +#define FLEXCAN_CTRL_ERR_ALL \ + (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) + +/* FLEXCAN control register 2 (CTRL2) bits */ +#define FLEXCAN_CTRL2_ECRWRE BIT(29) +#define FLEXCAN_CTRL2_WRMFRZ BIT(28) +#define FLEXCAN_CTRL2_RFFN(x) (((x) & 0x0f) << 24) +#define FLEXCAN_CTRL2_TASD(x) (((x) & 0x1f) << 19) +#define FLEXCAN_CTRL2_MRP BIT(18) +#define FLEXCAN_CTRL2_RRS BIT(17) +#define FLEXCAN_CTRL2_EACEN BIT(16) + +/* FLEXCAN memory error control register (MECR) bits */ +#define FLEXCAN_MECR_ECRWRDIS BIT(31) +#define FLEXCAN_MECR_HANCEI_MSK BIT(19) +#define FLEXCAN_MECR_FANCEI_MSK BIT(18) +#define FLEXCAN_MECR_CEI_MSK BIT(16) +#define FLEXCAN_MECR_HAERRIE BIT(15) +#define FLEXCAN_MECR_FAERRIE BIT(14) +#define FLEXCAN_MECR_EXTERRIE BIT(13) +#define FLEXCAN_MECR_RERRDIS BIT(9) +#define FLEXCAN_MECR_ECCDIS BIT(8) +#define FLEXCAN_MECR_NCEFAFRZ BIT(7) + +/* FLEXCAN error and status register (ESR) bits */ +#define FLEXCAN_ESR_TWRN_INT BIT(17) +#define FLEXCAN_ESR_RWRN_INT BIT(16) +#define FLEXCAN_ESR_BIT1_ERR BIT(15) +#define FLEXCAN_ESR_BIT0_ERR BIT(14) +#define FLEXCAN_ESR_ACK_ERR BIT(13) +#define FLEXCAN_ESR_CRC_ERR BIT(12) +#define FLEXCAN_ESR_FRM_ERR BIT(11) +#define FLEXCAN_ESR_STF_ERR BIT(10) +#define FLEXCAN_ESR_TX_WRN BIT(9) +#define FLEXCAN_ESR_RX_WRN BIT(8) +#define FLEXCAN_ESR_IDLE BIT(7) +#define FLEXCAN_ESR_TXRX BIT(6) +#define FLEXCAN_EST_FLT_CONF_SHIFT (4) +#define FLEXCAN_ESR_FLT_CONF_MASK (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT) +#define FLEXCAN_ESR_FLT_CONF_ACTIVE (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT) +#define FLEXCAN_ESR_FLT_CONF_PASSIVE (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT) +#define FLEXCAN_ESR_BOFF_INT BIT(2) +#define FLEXCAN_ESR_ERR_INT BIT(1) +#define FLEXCAN_ESR_WAK_INT BIT(0) +#define FLEXCAN_ESR_ERR_BUS \ + (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \ + FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \ + FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR) +#define FLEXCAN_ESR_ERR_STATE \ + (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT) +#define FLEXCAN_ESR_ERR_ALL \ + (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) +#define FLEXCAN_ESR_ALL_INT \ + (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \ + FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) + +/* FLEXCAN interrupt flag register (IFLAG) bits */ +/* Errata ERR005829 step7: Reserve first valid MB */ +#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 +#define FLEXCAN_TX_MB_OFF_FIFO 9 +#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 +#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) +#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 +#define FLEXCAN_RX_MB_TIMESTAMP_COUNT (FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST - \ + FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST + 1) +#define FLEXCAN_IFLAG_MB(x) BIT(x) +#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) +#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) +#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) + +/* FLEXCAN message buffers */ +#define FLEXCAN_MB_CODE_MASK (0xf << 24) +#define FLEXCAN_MB_CODE_RX_BUSY_BIT (0x1 << 24) +#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) +#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) +#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) +#define FLEXCAN_MB_CODE_RX_OVERRUN (0x6 << 24) +#define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) + +#define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) +#define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) +#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) +#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) + +#define FLEXCAN_MB_CNT_SRR BIT(22) +#define FLEXCAN_MB_CNT_IDE BIT(21) +#define FLEXCAN_MB_CNT_RTR BIT(20) +#define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) +#define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) + +#define FLEXCAN_TIMEOUT_US (50) + +/* FLEXCAN hardware feature flags + * + * Below is some version info we got: + * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- + * Filter? connected? Passive detection ception in MB + * MX25 FlexCAN2 03.00.00.00 no no ? no no + * MX28 FlexCAN2 03.00.04.00 yes yes no no no + * MX35 FlexCAN2 03.00.00.00 no no ? no no + * MX53 FlexCAN2 03.00.00.00 yes no no no no + * MX6s FlexCAN3 10.00.12.00 yes yes no no yes + * VF610 FlexCAN3 ? no yes no yes yes? + * + * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. + */ +#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */ +#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ +#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ +#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ +#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ +#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ + +/* Structure of the message buffer */ +struct flexcan_mb { + u32 can_ctrl; + u32 can_id; + u32 data[2]; +}; + +/* Structure of the hardware registers */ +struct flexcan_regs { + u32 mcr; /* 0x00 */ + u32 ctrl; /* 0x04 */ + u32 timer; /* 0x08 */ + u32 _reserved1; /* 0x0c */ + u32 rxgmask; /* 0x10 */ + u32 rx14mask; /* 0x14 */ + u32 rx15mask; /* 0x18 */ + u32 ecr; /* 0x1c */ + u32 esr; /* 0x20 */ + u32 imask2; /* 0x24 */ + u32 imask1; /* 0x28 */ + u32 iflag2; /* 0x2c */ + u32 iflag1; /* 0x30 */ + union { /* 0x34 */ + u32 gfwr_mx28; /* MX28, MX53 */ + u32 ctrl2; /* MX6, VF610 */ + }; + u32 esr2; /* 0x38 */ + u32 imeur; /* 0x3c */ + u32 lrfr; /* 0x40 */ + u32 crcr; /* 0x44 */ + u32 rxfgmask; /* 0x48 */ + u32 rxfir; /* 0x4c */ + u32 _reserved3[12]; /* 0x50 */ + struct flexcan_mb mb[64]; /* 0x80 */ + /* FIFO-mode: + * MB + * 0x080...0x08f 0 RX message buffer + * 0x090...0x0df 1-5 reserverd + * 0x0e0...0x0ff 6-7 8 entry ID table + * (mx25, mx28, mx35, mx53) + * 0x0e0...0x2df 6-7..37 8..128 entry ID table + * size conf'ed via ctrl2::RFFN + * (mx6, vf610) + */ + u32 _reserved4[256]; /* 0x480 */ + u32 rximr[64]; /* 0x880 */ + u32 _reserved5[24]; /* 0x980 */ + u32 gfwr_mx6; /* 0x9e0 - MX6 */ + u32 _reserved6[63]; /* 0x9e4 */ + u32 mecr; /* 0xae0 */ + u32 erriar; /* 0xae4 */ + u32 erridpr; /* 0xae8 */ + u32 errippr; /* 0xaec */ + u32 rerrar; /* 0xaf0 */ + u32 rerrdr; /* 0xaf4 */ + u32 rerrsynr; /* 0xaf8 */ + u32 errsr; /* 0xafc */ +}; + +struct flexcan_devtype_data { + u32 quirks; /* quirks needed for different IP cores */ +}; + +struct flexcan_timestamped_frame { + struct rtcan_skb skb; + u32 timestamp; + struct list_head next; +}; + +struct flexcan_priv { + unsigned int irq; + unsigned int mb_first; + unsigned int mb_last; + struct can_bittime bittiming; + struct flexcan_timestamped_frame *ts_frames; + + struct flexcan_regs __iomem *regs; + struct flexcan_mb __iomem *tx_mb; + struct flexcan_mb __iomem *tx_mb_reserved; + u8 tx_mb_idx; + u32 reg_ctrl_default; + u32 reg_imask1_default; + u32 reg_imask2_default; + + struct clk *clk_ipg; + struct clk *clk_per; + const struct flexcan_devtype_data *devtype_data; + struct regulator *reg_xceiver; + + unsigned long bus_errors; +}; + +static const struct flexcan_devtype_data fsl_p1010_devtype_data = { + .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE, +}; + +static const struct flexcan_devtype_data fsl_imx28_devtype_data = { + .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE, +}; + +static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE, +}; + +static const struct flexcan_devtype_data fsl_vf610_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | + FLEXCAN_QUIRK_BROKEN_PERR_STATE, +}; + +static const struct can_bittiming_const flexcan_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* Abstract off the read/write for arm versus ppc. This + * assumes that PPC uses big-endian registers and everything + * else uses little-endian registers, independent of CPU + * endianness. + */ +#if defined(CONFIG_PPC) +static inline u32 flexcan_read(void __iomem *addr) +{ + return in_be32(addr); +} + +static inline void flexcan_write(u32 val, void __iomem *addr) +{ + out_be32(addr, val); +} +#else +static inline u32 flexcan_read(void __iomem *addr) +{ + return readl(addr); +} + +static inline void flexcan_write(u32 val, void __iomem *addr) +{ + writel(val, addr); +} +#endif + +static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK); + + flexcan_write(reg_ctrl, ®s->ctrl); +} + +static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK); + + flexcan_write(reg_ctrl, ®s->ctrl); +} + +static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_enable(priv->reg_xceiver); +} + +static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_disable(priv->reg_xceiver); +} + +static int flexcan_chip_enable(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + u32 reg; + + reg = flexcan_read(®s->mcr); + reg &= ~FLEXCAN_MCR_MDIS; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_disable(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + u32 reg; + + reg = flexcan_read(®s->mcr); + reg |= FLEXCAN_MCR_MDIS; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + udelay(10); + + if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_freeze(struct rtcan_device *dev) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = 1000 * 1000 * 10 / dev->baudrate; + u32 reg; + + reg = flexcan_read(®s->mcr); + reg |= FLEXCAN_MCR_HALT; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + udelay(100); + + if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_unfreeze(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + u32 reg; + + reg = flexcan_read(®s->mcr); + reg &= ~FLEXCAN_MCR_HALT; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + udelay(10); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_softreset(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) + udelay(10); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_start_xmit(struct rtcan_device *dev, struct can_frame *cf) +{ + const struct flexcan_priv *priv = rtcan_priv(dev); + u32 can_id, data, ctrl; + + ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16); + if (cf->can_id & CAN_EFF_FLAG) { + can_id = cf->can_id & CAN_EFF_MASK; + ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR; + } else { + can_id = (cf->can_id & CAN_SFF_MASK) << 18; + } + + if (cf->can_id & CAN_RTR_FLAG) + ctrl |= FLEXCAN_MB_CNT_RTR; + + if (cf->can_dlc > CAN_MAX_DLC) + cf->can_dlc = CAN_MAX_DLC; + + if (cf->can_dlc > 0) { + data = be32_to_cpup((__be32 *)&cf->data[0]); + flexcan_write(data, &priv->tx_mb->data[0]); + } + if (cf->can_dlc > 4) { + data = be32_to_cpup((__be32 *)&cf->data[4]); + flexcan_write(data, &priv->tx_mb->data[1]); + } + + flexcan_write(can_id, &priv->tx_mb->can_id); + flexcan_write(ctrl, &priv->tx_mb->can_ctrl); + + /* Errata ERR005829 step8: + * Write twice INACTIVE(0x8) code to first MB. + */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb_reserved->can_ctrl); + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb_reserved->can_ctrl); + + return 0; +} + +static void init_err_skb(struct rtcan_skb *skb) +{ + struct rtcan_rb_frame *cf = &skb->rb_frame; + + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC; + cf->can_id = CAN_ERR_FLAG; + cf->can_dlc = CAN_ERR_DLC; + memset(&cf->data[0], 0, cf->can_dlc); +} + +static void flexcan_irq_bus_err(struct rtcan_device *dev, + u32 reg_esr, struct rtcan_skb *skb) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct rtcan_rb_frame *cf = &skb->rb_frame; + + init_err_skb(skb); + + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { + rtcandev_dbg(dev, "BIT1_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + } + if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { + rtcandev_dbg(dev, "BIT0_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + } + if (reg_esr & FLEXCAN_ESR_ACK_ERR) { + rtcandev_dbg(dev, "ACK_ERR irq\n"); + cf->can_id |= CAN_ERR_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } + if (reg_esr & FLEXCAN_ESR_CRC_ERR) { + rtcandev_dbg(dev, "CRC_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_BIT; + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } + if (reg_esr & FLEXCAN_ESR_FRM_ERR) { + rtcandev_dbg(dev, "FRM_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + } + if (reg_esr & FLEXCAN_ESR_STF_ERR) { + rtcandev_dbg(dev, "STF_ERR irq\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + } + + priv->bus_errors++; +} + +struct berr_counter { + u16 txerr; + u16 rxerr; +}; + +static void flexcan_change_state(struct rtcan_device *dev, + struct rtcan_rb_frame *cf, + struct berr_counter *bec, + can_state_t new_state) +{ + switch (dev->state) { + case CAN_STATE_ERROR_ACTIVE: + /* + * from: ERROR_ACTIVE + * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF + * => : there was a warning int + */ + if (new_state >= CAN_STATE_ERROR_WARNING && + new_state <= CAN_STATE_BUS_OFF) { + rtcandev_dbg(dev, "Error Warning IRQ\n"); + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec->txerr > bec->rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + } + fallthrough; + case CAN_STATE_ERROR_WARNING: + /* + * from: ERROR_ACTIVE, ERROR_WARNING + * to : ERROR_PASSIVE, BUS_OFF + * => : error passive int + */ + if (new_state >= CAN_STATE_ERROR_PASSIVE && + new_state <= CAN_STATE_BUS_OFF) { + rtcandev_dbg(dev, "Error Passive IRQ\n"); + + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec->txerr > bec->rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + } + break; + case CAN_STATE_BUS_OFF: + rtcandev_err(dev, "BUG! " + "hardware recovered automatically from BUS_OFF\n"); + break; + default: + break; + } + + /* process state changes depending on the new state */ + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + rtcandev_dbg(dev, "Error Active\n"); + cf->can_id |= CAN_ERR_PROT; + cf->data[2] = CAN_ERR_PROT_ACTIVE; + break; + case CAN_STATE_BUS_OFF: + cf->can_id |= CAN_ERR_BUSOFF; + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + break; + default: + break; + } + + dev->state = new_state; +} + +static bool flexcan_irq_state(struct rtcan_device *dev, u32 reg_esr, + struct rtcan_skb *skb) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + enum CAN_STATE new_state, rx_state, tx_state; + struct rtcan_rb_frame *cf = &skb->rb_frame; + struct berr_counter bec; + u32 reg; + int flt; + + reg = flexcan_read(®s->ecr); + bec.txerr = (reg >> 0) & 0xff; + bec.rxerr = (reg >> 8) & 0xff; + + flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; + if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { + tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ? + CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; + rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? + CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; + new_state = max(tx_state, rx_state); + } else + new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ? + CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF; + + /* state hasn't changed */ + if (likely(new_state == dev->state)) + return false; + + init_err_skb(skb); + + flexcan_change_state(dev, cf, &bec, new_state); + + return true; +} + +static unsigned int flexcan_mailbox_read(struct rtcan_device *dev, + struct rtcan_skb *skb, + u32 *timestamp, unsigned int n) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + struct flexcan_mb __iomem *mb = ®s->mb[n]; + u32 reg_ctrl, reg_id, reg_iflag1, code; + struct rtcan_rb_frame *cf = &skb->rb_frame; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + do { + reg_ctrl = flexcan_read(&mb->can_ctrl); + } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT); + + /* is this MB empty? */ + code = reg_ctrl & FLEXCAN_MB_CODE_MASK; + if ((code != FLEXCAN_MB_CODE_RX_FULL) && + (code != FLEXCAN_MB_CODE_RX_OVERRUN)) + return 0; + } else { + reg_iflag1 = flexcan_read(®s->iflag1); + if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) + return 0; + + reg_ctrl = flexcan_read(&mb->can_ctrl); + } + + /* increase timstamp to full 32 bit */ + *timestamp = reg_ctrl << 16; + + cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); + reg_id = flexcan_read(&mb->can_id); + if (reg_ctrl & FLEXCAN_MB_CNT_IDE) + cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (reg_id >> 18) & CAN_SFF_MASK; + + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE; + + if (reg_ctrl & FLEXCAN_MB_CNT_RTR) + cf->can_id |= CAN_RTR_FLAG; + else + skb->rb_frame_size += cf->can_dlc; + + put_unaligned_be32(flexcan_read(&mb->data[0]), cf->data + 0); + put_unaligned_be32(flexcan_read(&mb->data[1]), cf->data + 4); + + cf->can_ifindex = dev->ifindex; + + /* mark as read */ + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + /* Clear IRQ */ + if (n < 32) + flexcan_write(BIT(n), ®s->iflag1); + else + flexcan_write(BIT(n - 32), ®s->iflag2); + } else { + flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, ®s->iflag1); + flexcan_read(®s->timer); + } + + return 1; +} + +static inline bool flexcan_rx_le(struct flexcan_priv *priv, unsigned int a, unsigned int b) +{ + if (priv->mb_first < priv->mb_last) + return a <= b; + + return a >= b; +} + +static inline unsigned int flexcan_rx_inc(struct flexcan_priv *priv, unsigned int *val) +{ + if (priv->mb_first < priv->mb_last) + return (*val)++; + + return (*val)--; +} + +static int flexcan_mailbox_read_timestamp(struct rtcan_device *dev, u64 pending) +{ + struct flexcan_timestamped_frame *new, *pos, *tmp; + struct flexcan_priv *priv = rtcan_priv(dev); + struct list_head q, *head; + int i, count = 0; + + INIT_LIST_HEAD(&q); + + for (i = priv->mb_first; + flexcan_rx_le(priv, i, priv->mb_last); + flexcan_rx_inc(priv, &i)) { + if (!(pending & BIT_ULL(i))) + continue; + + new = priv->ts_frames + (i - priv->mb_first); + if (!flexcan_mailbox_read(dev, &new->skb, &new->timestamp, i)) + break; + + head = &q; + if (list_empty(&q)) + goto add; + + list_for_each_entry_reverse(pos, &q, next) { + /* + * Substract two u32 and return result as int, + * to keep difference steady around the u32 + * overflow. + */ + if (((int)(new->timestamp - pos->timestamp)) >= 0) { + head = &pos->next; + break; + } + } + add: + list_add(&new->next, head); + count++; + } + + if (list_empty(&q)) + return 0; + + list_for_each_entry_safe(pos, tmp, &q, next) + rtcan_rcv(dev, &pos->skb); + + return count; +} + +static void flexcan_mailbox_read_fifo(struct rtcan_device *dev) +{ + struct rtcan_skb skb; + u32 timestamp; + + for (;;) { + if (!flexcan_mailbox_read(dev, &skb, ×tamp, 0)) + break; + rtcan_rcv(dev, &skb); + } +} + +static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 iflag1, iflag2; + + iflag2 = flexcan_read(®s->iflag2) & priv->reg_imask2_default; + iflag1 = flexcan_read(®s->iflag1) & priv->reg_imask1_default & + ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + + return (u64)iflag2 << 32 | iflag1; +} + +static int flexcan_do_rx(struct rtcan_device *dev, u32 reg_iflag1) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + struct rtcan_skb skb; + struct rtcan_rb_frame *cf = &skb.rb_frame; + bool input = false; + u64 reg; + int ret; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + while ((reg = flexcan_read_reg_iflag_rx(priv))) { + input = true; + ret = flexcan_mailbox_read_timestamp(dev, reg); + if (!ret) + break; + } + } else { + if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { + flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, ®s->iflag1); + init_err_skb(&skb); + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + input = true; + } else if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { + flexcan_mailbox_read_fifo(dev); + input = true; + } + } + + return input; +} + +static int flexcan_irq(rtdm_irq_t *irq_handle) +{ + struct rtcan_device *dev = rtdm_irq_get_arg(irq_handle, void); + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_iflag1, reg_esr; + struct rtcan_skb skb; + int handled; + + rtdm_lock_get(&dev->device_lock); + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + + reg_iflag1 = flexcan_read(®s->iflag1); + + /* reception interrupt */ + if (flexcan_do_rx(dev, reg_iflag1)) + handled = RTDM_IRQ_HANDLED; + + /* transmission complete interrupt */ + if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + /* after sending a RTR frame MB is in RX mode */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb->can_ctrl); + flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag1); + rtdm_sem_up(&dev->tx_sem); + dev->tx_count++; + if (rtcan_loopback_pending(dev)) + rtcan_loopback(dev); + handled = RTDM_IRQ_HANDLED; + } + + reg_esr = flexcan_read(®s->esr); + + /* ACK all bus error and state change IRQ sources */ + if (reg_esr & FLEXCAN_ESR_ALL_INT) { + flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); + handled = RTDM_IRQ_HANDLED; + } + + /* state change interrupt or broken error state quirk fix is enabled */ + if (reg_esr & FLEXCAN_ESR_ERR_STATE) + handled = RTDM_IRQ_HANDLED; + else if (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE)) + goto esr_err; + + if (reg_esr & FLEXCAN_ESR_ERR_STATE) { + esr_err: + if (flexcan_irq_state(dev, reg_esr, &skb)) { + rtcan_rcv(dev, &skb); + } + } + + /* bus error IRQ - report unconditionally */ + if (reg_esr & FLEXCAN_ESR_ERR_BUS) { + flexcan_irq_bus_err(dev, reg_esr, &skb); + rtcan_rcv(dev, &skb); + handled = RTDM_IRQ_HANDLED; + } + + rtdm_lock_put(&rtcan_socket_lock); + rtdm_lock_put(&rtcan_recv_list_lock); + rtdm_lock_put(&dev->device_lock); + + return handled; +} + +static void flexcan_set_bittiming(struct rtcan_device *dev) +{ + const struct flexcan_priv *priv = rtcan_priv(dev); + const struct can_bittime *bt = &priv->bittiming; + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg; + + reg = flexcan_read(®s->ctrl); + reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) | + FLEXCAN_CTRL_RJW(0x3) | + FLEXCAN_CTRL_PSEG1(0x7) | + FLEXCAN_CTRL_PSEG2(0x7) | + FLEXCAN_CTRL_PROPSEG(0x7) | + FLEXCAN_CTRL_LPB | + FLEXCAN_CTRL_SMP | + FLEXCAN_CTRL_LOM); + + reg |= FLEXCAN_CTRL_PRESDIV(bt->std.brp - 1) | + FLEXCAN_CTRL_PSEG1(bt->std.phase_seg1 - 1) | + FLEXCAN_CTRL_PSEG2(bt->std.phase_seg2 - 1) | + FLEXCAN_CTRL_RJW(bt->std.sjw - 1) | + FLEXCAN_CTRL_PROPSEG(bt->std.prop_seg - 1); + + if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK) + reg |= FLEXCAN_CTRL_LPB; + if (dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY) + reg |= FLEXCAN_CTRL_LOM; + if (dev->ctrl_mode & CAN_CTRLMODE_3_SAMPLES) + reg |= FLEXCAN_CTRL_SMP; + + rtcandev_dbg(dev, "writing ctrl=0x%08x\n", reg); + flexcan_write(reg, ®s->ctrl); + + /* print chip status */ + rtcandev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, + flexcan_read(®s->mcr), flexcan_read(®s->ctrl)); +} + +/* flexcan_chip_start + * + * this functions is entered with clocks enabled + * + */ +static int flexcan_chip_start(struct rtcan_device *dev) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr; + int err, i; + + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + goto out_disable_ipg; + + /* enable module */ + err = flexcan_chip_enable(priv); + if (err) + goto out_disable_per; + + /* soft reset */ + err = flexcan_chip_softreset(priv); + if (err) + goto out_chip_disable; + + flexcan_set_bittiming(dev); + + /* MCR + * + * enable freeze + * enable fifo + * halt now + * only supervisor access + * enable warning int + * disable local echo + * enable individual RX masking + * choose format C + * set max mailbox number + */ + reg_mcr = flexcan_read(®s->mcr); + reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); + reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | + FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | + FLEXCAN_MCR_IDAM_C; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + reg_mcr &= ~FLEXCAN_MCR_FEN; + reg_mcr |= FLEXCAN_MCR_MAXMB(priv->mb_last); + } else { + reg_mcr |= FLEXCAN_MCR_FEN | + FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + } + rtcandev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); + flexcan_write(reg_mcr, ®s->mcr); + + /* CTRL + * + * disable timer sync feature + * + * disable auto busoff recovery + * transmit lowest buffer first + * + * enable tx and rx warning interrupt + * enable bus off interrupt + * (== FLEXCAN_CTRL_ERR_STATE) + */ + reg_ctrl = flexcan_read(®s->ctrl); + reg_ctrl &= ~FLEXCAN_CTRL_TSYN; + reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | + FLEXCAN_CTRL_ERR_STATE; + + /* enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK), + * on most Flexcan cores, too. Otherwise we don't get + * any error warning or passive interrupts. + */ + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE) + reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; + else + reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK; + + /* save for later use */ + priv->reg_ctrl_default = reg_ctrl; + /* leave interrupts disabled for now */ + reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL; + rtcandev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); + flexcan_write(reg_ctrl, ®s->ctrl); + + if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { + reg_ctrl2 = flexcan_read(®s->ctrl2); + reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; + flexcan_write(reg_ctrl2, ®s->ctrl2); + } + + /* clear and invalidate all mailboxes first */ + for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) { + flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->mb[i].can_ctrl); + } + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + for (i = priv->mb_first; i <= priv->mb_last; i++) + flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY, + ®s->mb[i].can_ctrl); + } + + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb_reserved->can_ctrl); + + /* mark TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + &priv->tx_mb->can_ctrl); + + /* acceptance mask/acceptance code (accept everything) */ + flexcan_write(0x0, ®s->rxgmask); + flexcan_write(0x0, ®s->rx14mask); + flexcan_write(0x0, ®s->rx15mask); + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG) + flexcan_write(0x0, ®s->rxfgmask); + + /* clear acceptance filters */ + for (i = 0; i < ARRAY_SIZE(regs->mb); i++) + flexcan_write(0, ®s->rximr[i]); + + /* On Vybrid, disable memory error detection interrupts + * and freeze mode. + * This also works around errata e5295 which generates + * false positive memory errors and put the device in + * freeze mode. + */ + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) { + /* Follow the protocol as described in "Detection + * and Correction of Memory Errors" to write to + * MECR register + */ + reg_ctrl2 = flexcan_read(®s->ctrl2); + reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE; + flexcan_write(reg_ctrl2, ®s->ctrl2); + + reg_mecr = flexcan_read(®s->mecr); + reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; + flexcan_write(reg_mecr, ®s->mecr); + reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | + FLEXCAN_MECR_FANCEI_MSK); + flexcan_write(reg_mecr, ®s->mecr); + } + + err = flexcan_transceiver_enable(priv); + if (err) + goto out_chip_disable; + + /* synchronize with the can bus */ + err = flexcan_chip_unfreeze(priv); + if (err) + goto out_transceiver_disable; + + dev->state = CAN_STATE_ERROR_ACTIVE; + + /* enable interrupts atomically */ + rtdm_irq_disable(&dev->irq_handle); + flexcan_write(priv->reg_ctrl_default, ®s->ctrl); + flexcan_write(priv->reg_imask1_default, ®s->imask1); + flexcan_write(priv->reg_imask2_default, ®s->imask2); + rtdm_irq_enable(&dev->irq_handle); + + /* print chip status */ + rtcandev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__, + flexcan_read(®s->mcr), flexcan_read(®s->ctrl)); + + return 0; + + out_transceiver_disable: + flexcan_transceiver_disable(priv); + out_chip_disable: + flexcan_chip_disable(priv); + out_disable_per: + clk_disable_unprepare(priv->clk_per); + out_disable_ipg: + clk_disable_unprepare(priv->clk_ipg); + + return err; +} + +/* flexcan_chip_stop + * + * this functions is entered with clocks enabled + */ +static void flexcan_chip_stop(struct rtcan_device *dev) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + + /* freeze + disable module */ + flexcan_chip_freeze(dev); + flexcan_chip_disable(priv); + + /* Disable all interrupts */ + flexcan_write(0, ®s->imask2); + flexcan_write(0, ®s->imask1); + flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); + + flexcan_transceiver_disable(priv); + + clk_disable_unprepare(priv->clk_per); + clk_disable_unprepare(priv->clk_ipg); +} + +static int flexcan_mode_start(struct rtcan_device *dev, + rtdm_lockctx_t *lock_ctx) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + int err = 0; + + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + + switch (dev->state) { + + case CAN_STATE_ACTIVE: + case CAN_STATE_BUS_WARNING: + case CAN_STATE_BUS_PASSIVE: + break; + + case CAN_STATE_STOPPED: + /* Register IRQ handler and pass device structure as arg */ + err = rtdm_irq_request(&dev->irq_handle, priv->irq, + flexcan_irq, 0, DRV_NAME, + dev); + if (err) { + rtcandev_err(dev, "couldn't request irq %d\n", + priv->irq); + goto out; + } + + /* Set up sender "mutex" */ + rtdm_sem_init(&dev->tx_sem, 1); + + /* start chip and queuing */ + err = flexcan_chip_start(dev); + if (err) { + rtdm_irq_free(&dev->irq_handle); + rtdm_sem_destroy(&dev->tx_sem); + goto out; + } + break; + + case CAN_STATE_BUS_OFF: + /* Set up sender "mutex" */ + rtdm_sem_init(&dev->tx_sem, 1); + /* start chip and queuing */ + err = flexcan_chip_start(dev); + if (err) { + rtdm_sem_destroy(&dev->tx_sem); + goto out; + } + break; + + case CAN_STATE_SLEEPING: + default: + err = 0; + break; + } + +out: + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + + return err; +} + +static int flexcan_mode_stop(struct rtcan_device *dev, + rtdm_lockctx_t *lock_ctx) +{ + if (!CAN_STATE_OPERATING(dev->state)) + return 0; + + dev->state = CAN_STATE_STOPPED; + + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + + flexcan_chip_stop(dev); + rtdm_irq_free(&dev->irq_handle); + rtdm_sem_destroy(&dev->tx_sem); + + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + + return 0; +} + +static int flexcan_set_mode(struct rtcan_device *dev, can_mode_t mode, + rtdm_lockctx_t *lock_ctx) +{ + if (mode == CAN_MODE_START) + return flexcan_mode_start(dev, lock_ctx); + + if (mode == CAN_MODE_STOP) + return flexcan_mode_stop(dev, lock_ctx); + + return -EOPNOTSUPP; +} + +static int flexcan_copy_bittiming(struct rtcan_device *dev, + struct can_bittime *bt, + rtdm_lockctx_t *lock_ctx) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + + memcpy(&priv->bittiming, bt, sizeof(*bt)); + + return 0; +} + +static int register_flexcandev(struct rtcan_device *dev) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg, err; + + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + goto out_disable_ipg; + + /* select "bus clock", chip must be disabled */ + err = flexcan_chip_disable(priv); + if (err) + goto out_disable_per; + reg = flexcan_read(®s->ctrl); + reg |= FLEXCAN_CTRL_CLK_SRC; + flexcan_write(reg, ®s->ctrl); + + err = flexcan_chip_enable(priv); + if (err) + goto out_chip_disable; + + /* set freeze, halt and activate FIFO, restrict register access */ + reg = flexcan_read(®s->mcr); + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | + FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + flexcan_write(reg, ®s->mcr); + + /* Currently we only support newer versions of this core + * featuring a RX hardware FIFO (although this driver doesn't + * make use of it on some cores). Older cores, found on some + * Coldfire derivates are not tested. + */ + reg = flexcan_read(®s->mcr); + if (!(reg & FLEXCAN_MCR_FEN)) { + rtcandev_err(dev, "Could not enable RX FIFO, unsupported core\n"); + err = -ENODEV; + goto out_chip_disable; + } + + err = rtcan_dev_register(dev); + + /* disable core and turn off clocks */ + out_chip_disable: + flexcan_chip_disable(priv); + out_disable_per: + clk_disable_unprepare(priv->clk_per); + out_disable_ipg: + clk_disable_unprepare(priv->clk_ipg); + + return err; +} + +static void unregister_flexcandev(struct rtcan_device *dev) +{ + struct flexcan_priv *priv = rtcan_priv(dev); + + rtcan_dev_unregister(dev); + if (priv->ts_frames) + kfree(priv->ts_frames); +} + +static const struct of_device_id flexcan_of_match[] = { + { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, + { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, + { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, + { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, flexcan_of_match); + +static const struct platform_device_id flexcan_id_table[] = { + { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(platform, flexcan_id_table); + +static int flexcan_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + const struct flexcan_devtype_data *devtype_data; + struct rtcan_device *dev; + struct flexcan_priv *priv; + struct regulator *reg_xceiver; + struct resource *mem; + struct clk *clk_ipg = NULL, *clk_per = NULL; + struct flexcan_regs __iomem *regs; + int err, irq; + u32 clock_freq = 0; + + reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); + if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(reg_xceiver)) + reg_xceiver = NULL; + + if (pdev->dev.of_node) + of_property_read_u32(pdev->dev.of_node, + "clock-frequency", &clock_freq); + + if (!clock_freq) { + clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(clk_ipg)) { + dev_err(&pdev->dev, "no ipg clock defined\n"); + return PTR_ERR(clk_ipg); + } + + clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(clk_per)) { + dev_err(&pdev->dev, "no per clock defined\n"); + return PTR_ERR(clk_per); + } + clock_freq = clk_get_rate(clk_per); + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENODEV; + + regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + of_id = of_match_device(flexcan_of_match, &pdev->dev); + if (of_id) { + devtype_data = of_id->data; + } else if (platform_get_device_id(pdev)->driver_data) { + devtype_data = (struct flexcan_devtype_data *) + platform_get_device_id(pdev)->driver_data; + } else { + return -ENODEV; + } + + dev = rtcan_dev_alloc(sizeof(struct flexcan_priv), 0); + if (!dev) + return -ENOMEM; + + platform_set_drvdata(pdev, dev); + + priv = rtcan_priv(dev); + priv->regs = regs; + priv->irq = irq; + priv->clk_ipg = clk_ipg; + priv->clk_per = clk_per; + priv->devtype_data = devtype_data; + priv->reg_xceiver = reg_xceiver; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; + } else { + priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO; + priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; + } + priv->tx_mb = ®s->mb[priv->tx_mb_idx]; + + priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + priv->reg_imask2_default = 0; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 imask; + + priv->mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; + priv->mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST; + priv->ts_frames = kzalloc(sizeof(*priv->ts_frames) * + FLEXCAN_RX_MB_TIMESTAMP_COUNT, GFP_KERNEL); + if (priv->ts_frames == NULL) { + err = -ENOMEM; + goto failed_fralloc; + } + + imask = GENMASK_ULL(priv->mb_last, priv->mb_first); + priv->reg_imask1_default |= imask; + priv->reg_imask2_default |= imask >> 32; + } else { + priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; + priv->ts_frames = NULL; + } + + dev->ctrl_name = "FLEXCAN"; + dev->board_name = "FLEXCAN"; + dev->base_addr = (unsigned long)regs; + dev->can_sys_clock = clock_freq; + dev->hard_start_xmit = flexcan_start_xmit; + dev->do_set_mode = flexcan_set_mode; + dev->do_set_bit_time = flexcan_copy_bittiming; + dev->bittiming_const = &flexcan_bittiming_const; + dev->state = CAN_STATE_STOPPED; + strncpy(dev->name, DEV_NAME, IFNAMSIZ); + + err = register_flexcandev(dev); + if (err) { + dev_err(&pdev->dev, "registering netdev failed\n"); + goto failed_register; + } + + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", + priv->regs, priv->irq); + + return 0; + + failed_register: + if (priv->ts_frames) + kfree(priv->ts_frames); + failed_fralloc: + rtcan_dev_free(dev); + return err; +} + +static int flexcan_remove(struct platform_device *pdev) +{ + struct rtcan_device *dev = platform_get_drvdata(pdev); + + unregister_flexcandev(dev); + rtcan_dev_free(dev); + + return 0; +} + +static struct platform_driver flexcan_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = flexcan_of_match, + }, + .probe = flexcan_probe, + .remove = flexcan_remove, + .id_table = flexcan_id_table, +}; + +module_platform_driver(flexcan_driver); + +MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>, " + "Sascha Hauer <kernel@pengutronix.de>, " + "Marc Kleine-Budde <kernel@pengutronix.de>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("RT-CAN port driver for flexcan based chip"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h new file mode 100644 index 0000000..b290005 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Derived from RTnet project file stack/include/rtnet_internal.h: + * + * Copyright (C) 1999 Lineo, Inc + * 1999, 2002 David A. Schleef <ds@schleef.org> + * 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * 2003-2005 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __RTCAN_INTERNAL_H_ +#define __RTCAN_INTERNAL_H_ + +#include <linux/module.h> +#include <rtdm/driver.h> + +#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG +#define RTCAN_ASSERT(expr, func) \ + if (!(expr)) { \ + rtdm_printk("Assertion failed! %s:%s:%d %s\n", \ + __FILE__, __FUNCTION__, __LINE__, (#expr)); \ + func \ + } +#else +#define RTCAN_ASSERT(expr, func) +#endif /* CONFIG_RTCAN_CHECKED */ + +#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG +# define RTCAN_DBG(fmt,args...) do { printk(fmt ,##args); } while (0) +# define RTCAN_RTDM_DBG(fmt,args...) do { rtdm_printk(fmt ,##args); } while (0) +#else +# define RTCAN_DBG(fmt,args...) do {} while (0) +# define RTCAN_RTDM_DBG(fmt,args...) do {} while (0) +#endif + +#define rtcan_priv(dev) (dev)->priv +#define rtcandev_dbg(dev, fmt, args...) \ + printk(KERN_DEBUG "%s: " fmt, (dev)->name, ##args) +#define rtcandev_info(dev, fmt, args...) \ + printk(KERN_INFO "%s: " fmt, (dev)->name, ##args) +#define rtcandev_warn(dev, fmt, args...) \ + printk(KERN_WARNING "%s: " fmt, (dev)->name, ##args) +#define rtcandev_err(dev, fmt, args...) \ + printk(KERN_ERR "%s: " fmt, (dev)->name, ##args) + +#endif /* __RTCAN_INTERNAL_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h new file mode 100644 index 0000000..17a4fbd --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h @@ -0,0 +1,68 @@ +/* + * List management for the RTDM RTCAN device driver + * + * Copyright (C) 2005,2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __RTCAN_LIST_H_ +#define __RTCAN_LIST_H_ + +#include "rtcan_socket.h" + + +/* + * List element in a single linked list used for registering reception sockets. + * Every single struct can_filter which was bound to a socket gets such a + * list entry. There is no member for the CAN interface because there is one + * reception list for every CAN controller. This is because when a CAN message + * is received it is clear from which interface and therefore minimizes + * searching time. + */ +struct rtcan_recv { + can_filter_t can_filter; /* filter used for deciding if + * a socket wants to get a CAN + * message */ + unsigned int match_count; /* count accepted messages */ + struct rtcan_socket *sock; /* pointer to registered socket + */ + struct rtcan_recv *next; /* pointer to next list element + */ +}; + + +/* + * Element in a TX wait queue. + * + * Every socket holds a TX wait queue where all RT tasks are queued when they + * are blocked while waiting to be able to transmit a message via this socket. + * + * Every sender holds its own element. + */ +struct tx_wait_queue { + struct list_head tx_wait_list; /* List pointers */ + rtdm_task_t *rt_task; /* Pointer to task handle */ +}; + + +/* Spinlock for all reception lists and also for some members in + * struct rtcan_socket */ +extern rtdm_lock_t rtcan_recv_list_lock; + + +#endif /* __RTCAN_LIST_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c new file mode 100644 index 0000000..c071e12 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c @@ -0,0 +1,439 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Derived from RTnet project file stack/rtcan_module.c: + * + * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * 2003-2006 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> + +#include <rtdm/driver.h> +#include <rtdm/can.h> +#include <rtcan_version.h> +#include <rtcan_internal.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> + +MODULE_LICENSE("GPL"); + + +const char rtcan_rtdm_provider_name[] = + "(C) 2006 RT-Socket-CAN Development Team"; + + +#ifdef CONFIG_PROC_FS + +struct proc_dir_entry *rtcan_proc_root; + +static void rtcan_dev_get_ctrlmode_name(can_ctrlmode_t ctrlmode, + char* name, int max_len) +{ + snprintf(name, max_len, "%s%s", + ctrlmode & CAN_CTRLMODE_LISTENONLY ? "listen-only " : "", + ctrlmode & CAN_CTRLMODE_LOOPBACK ? "loopback " : ""); +} + +static char *rtcan_state_names[] = { + "active", "warning", "passive" , "bus-off", + "scanning", "stopped", "sleeping" +}; + +static void rtcan_dev_get_state_name(can_state_t state, + char* name, int max_len) +{ + if (state >= CAN_STATE_ACTIVE && + state <= CAN_STATE_SLEEPING) + strncpy(name, rtcan_state_names[state], max_len); + else + strncpy(name, "unknown", max_len); +} + +static void rtcan_dev_get_baudrate_name(can_baudrate_t baudrate, + char* name, int max_len) +{ + switch (baudrate) { + case CAN_BAUDRATE_UNCONFIGURED: + strncpy(name, "undefined", max_len); + break; + case CAN_BAUDRATE_UNKNOWN: + strncpy(name, "unknown", max_len); + break; + default: + ksformat(name, max_len, "%d", baudrate); + break; + } +} + +static void rtcan_dev_get_bittime_name(struct can_bittime *bit_time, + char* name, int max_len) +{ + switch (bit_time->type) { + case CAN_BITTIME_STD: + ksformat(name, max_len, + "brp=%d prop_seg=%d phase_seg1=%d " + "phase_seg2=%d sjw=%d sam=%d", + bit_time->std.brp, + bit_time->std.prop_seg, + bit_time->std.phase_seg1, + bit_time->std.phase_seg2, + bit_time->std.sjw, + bit_time->std.sam); + break; + case CAN_BITTIME_BTR: + ksformat(name, max_len, "btr0=0x%02x btr1=0x%02x", + bit_time->btr.btr0, bit_time->btr.btr1); + break; + default: + strncpy(name, "unknown", max_len); + break; + } +} + +static void rtcan_get_timeout_name(nanosecs_rel_t timeout, + char* name, int max_len) +{ + if (timeout == RTDM_TIMEOUT_INFINITE) + strncpy(name, "infinite", max_len); + else + ksformat(name, max_len, "%lld", (long long)timeout); +} + +static int rtcan_read_proc_devices(struct seq_file *p, void *data) +{ + int i; + struct rtcan_device *dev; + char state_name[20], baudrate_name[20]; + + if (down_interruptible(&rtcan_devices_nrt_lock)) + return -ERESTARTSYS; + + /* Name___________ _Baudrate State___ _TX_Counts _TX_Counts ____Errors + * rtcan0 125000 stopped 1234567890 1234567890 1234567890 + * rtcan1 undefined warning 1234567890 1234567890 1234567890 + * rtcan2 undefined scanning 1234567890 1234567890 1234567890 + */ + seq_printf(p, "Name___________ _Baudrate State___ TX_Counter RX_Counter " + "____Errors\n"); + + for (i = 1; i <= RTCAN_MAX_DEVICES; i++) { + if ((dev = rtcan_dev_get_by_index(i)) != NULL) { + rtcan_dev_get_state_name(dev->state, + state_name, sizeof(state_name)); + rtcan_dev_get_baudrate_name(dev->baudrate, + baudrate_name, sizeof(baudrate_name)); + seq_printf(p, "%-15s %9s %-8s %10d %10d %10d\n", + dev->name, baudrate_name, state_name, dev->tx_count, + dev->rx_count, dev->err_count); + rtcan_dev_dereference(dev); + } + } + + up(&rtcan_devices_nrt_lock); + + return 0; +} + +static int rtcan_proc_devices_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_read_proc_devices, NULL); +} + +static const DEFINE_PROC_OPS(rtcan_proc_devices_ops, + rtcan_proc_devices_open, + single_release, + seq_read, + NULL); + +static int rtcan_read_proc_sockets(struct seq_file *p, void *data) +{ + struct rtcan_socket *sock; + struct rtdm_fd *fd; + struct rtcan_device *dev; + char name[IFNAMSIZ] = "not-bound"; + char rx_timeout[20], tx_timeout[20]; + rtdm_lockctx_t lock_ctx; + int ifindex; + + if (down_interruptible(&rtcan_devices_nrt_lock)) + return -ERESTARTSYS; + + /* Name___________ Filter ErrMask RX_Timeout TX_Timeout RX_BufFull TX_Lo + * rtcan0 1 0x00010 1234567890 1234567890 1234567890 12345 + */ + seq_printf(p, "Name___________ Filter ErrMask RX_Timeout_ns " + "TX_Timeout_ns RX_BufFull TX_Lo\n"); + + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + + list_for_each_entry(sock, &rtcan_socket_list, socket_list) { + fd = rtcan_socket_to_fd(sock); + if (rtcan_sock_is_bound(sock)) { + ifindex = atomic_read(&sock->ifindex); + if (ifindex) { + dev = rtcan_dev_get_by_index(ifindex); + if (dev) { + strncpy(name, dev->name, IFNAMSIZ); + rtcan_dev_dereference(dev); + } + } else + ksformat(name, sizeof(name), "%d", ifindex); + } + rtcan_get_timeout_name(sock->tx_timeout, + tx_timeout, sizeof(tx_timeout)); + rtcan_get_timeout_name(sock->rx_timeout, + rx_timeout, sizeof(rx_timeout)); + seq_printf(p, "%-15s %6d 0x%05x %13s %13s %10d %5d\n", + name, sock->flistlen, sock->err_mask, + rx_timeout, tx_timeout, sock->rx_buf_full, + rtcan_loopback_enabled(sock)); + } + + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + + up(&rtcan_devices_nrt_lock); + + return 0; +} + +static int rtcan_proc_sockets_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_read_proc_sockets, NULL); +} + +static const DEFINE_PROC_OPS(rtcan_proc_sockets_ops, + rtcan_proc_sockets_open, + single_release, + seq_read, + NULL); + +static int rtcan_read_proc_info(struct seq_file *p, void *data) +{ + struct rtcan_device *dev = p->private; + char state_name[20], baudrate_name[20]; + char ctrlmode_name[80], bittime_name[80]; + + if (down_interruptible(&rtcan_devices_nrt_lock)) + return -ERESTARTSYS; + + rtcan_dev_get_state_name(dev->state, + state_name, sizeof(state_name)); + rtcan_dev_get_ctrlmode_name(dev->ctrl_mode, + ctrlmode_name, sizeof(ctrlmode_name)); + rtcan_dev_get_baudrate_name(dev->baudrate, + baudrate_name, sizeof(baudrate_name)); + rtcan_dev_get_bittime_name(&dev->bit_time, + bittime_name, sizeof(bittime_name)); + + seq_printf(p, "Device %s\n", dev->name); + seq_printf(p, "Controller %s\n", dev->ctrl_name); + seq_printf(p, "Board %s\n", dev->board_name); + seq_printf(p, "Clock-Hz %d\n", dev->can_sys_clock); + seq_printf(p, "Baudrate %s\n", baudrate_name); + seq_printf(p, "Bit-time %s\n", bittime_name); + seq_printf(p, "Ctrl-Mode %s\n", ctrlmode_name); + seq_printf(p, "State %s\n", state_name); + seq_printf(p, "TX-Counter %d\n", dev->tx_count); + seq_printf(p, "RX-Counter %d\n", dev->rx_count); + seq_printf(p, "Errors %d\n", dev->err_count); +#ifdef RTCAN_USE_REFCOUNT + seq_printf(p, "Refcount %d\n", atomic_read(&dev->refcount)); +#endif + + up(&rtcan_devices_nrt_lock); + + return 0; +} + +static int rtcan_proc_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_read_proc_info, pde_data(inode)); +} + +static const DEFINE_PROC_OPS(rtcan_proc_info_ops, + rtcan_proc_info_open, + single_release, + seq_read, + NULL); + +static int rtcan_read_proc_filter(struct seq_file *p, void *data) +{ + struct rtcan_device *dev = p->private; + struct rtcan_recv *recv_listener = dev->recv_list; + struct rtdm_fd *fd; + rtdm_lockctx_t lock_ctx; + + /* __CAN_ID__ _CAN_Mask_ Inv MatchCount + * 0x12345678 0x12345678 no 1234567890 + */ + + seq_printf(p, "__CAN_ID__ _CAN_Mask_ Inv MatchCount\n"); + + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + + /* Loop over the reception list of the device */ + while (recv_listener != NULL) { + fd = rtcan_socket_to_fd(recv_listener->sock); + + seq_printf(p, "0x%08x 0x%08x %s %10d\n", + recv_listener->can_filter.can_id, + recv_listener->can_filter.can_mask & ~CAN_INV_FILTER, + (recv_listener->can_filter.can_mask & CAN_INV_FILTER) ? + "yes" : " no", + recv_listener->match_count); + + recv_listener = recv_listener->next; + } + + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + + return 0; +} + +static int rtcan_proc_filter_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_read_proc_filter, pde_data(inode)); +} + +static const DEFINE_PROC_OPS(rtcan_proc_filter_ops, + rtcan_proc_filter_open, + single_release, + seq_read, + NULL); + +static int rtcan_read_proc_version(struct seq_file *p, void *data) +{ + seq_printf(p, "RT-Socket-CAN %d.%d.%d\n", + RTCAN_MAJOR_VER, RTCAN_MINOR_VER, RTCAN_BUGFIX_VER); + + return 0; +} + +static int rtcan_proc_version_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_read_proc_version, NULL); +} + +static const DEFINE_PROC_OPS(rtcan_proc_version_ops, + rtcan_proc_version_open, + single_release, + seq_read, + NULL); + +void rtcan_dev_remove_proc(struct rtcan_device* dev) +{ + if (!dev->proc_root) + return; + + remove_proc_entry("info", dev->proc_root); + remove_proc_entry("filters", dev->proc_root); + remove_proc_entry(dev->name, rtcan_proc_root); + + dev->proc_root = NULL; +} + +int rtcan_dev_create_proc(struct rtcan_device* dev) +{ + if (!rtcan_proc_root) + return -EINVAL; + + dev->proc_root = proc_mkdir(dev->name, rtcan_proc_root); + if (!dev->proc_root) { + printk("%s: unable to create /proc device entries\n", dev->name); + return -1; + } + + proc_create_data("info", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root, + &rtcan_proc_info_ops, dev); + proc_create_data("filters", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root, + &rtcan_proc_filter_ops, dev); + return 0; + +} + + +static int rtcan_proc_register(void) +{ + rtcan_proc_root = proc_mkdir("rtcan", NULL); + if (!rtcan_proc_root) { + printk("rtcan: unable to initialize /proc entries\n"); + return -1; + } + + proc_create("devices", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root, + &rtcan_proc_devices_ops); + proc_create("version", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root, + &rtcan_proc_version_ops); + proc_create("sockets", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root, + &rtcan_proc_sockets_ops); + return 0; +} + + + +static void rtcan_proc_unregister(void) +{ + remove_proc_entry("devices", rtcan_proc_root); + remove_proc_entry("version", rtcan_proc_root); + remove_proc_entry("sockets", rtcan_proc_root); + remove_proc_entry("rtcan", 0); +} +#endif /* CONFIG_PROC_FS */ + + + +int __init rtcan_init(void) +{ + int err = 0; + + if (!rtdm_available()) + return -ENOSYS; + + printk("RT-Socket-CAN %d.%d.%d - %s\n", + RTCAN_MAJOR_VER, RTCAN_MINOR_VER, RTCAN_BUGFIX_VER, + rtcan_rtdm_provider_name); + + if ((err = rtcan_raw_proto_register()) != 0) + goto out; + +#ifdef CONFIG_PROC_FS + if ((err = rtcan_proc_register()) != 0) + goto out; +#endif + + out: + return err; +} + + +void __exit rtcan_exit(void) +{ + rtcan_raw_proto_unregister(); +#ifdef CONFIG_PROC_FS + rtcan_proc_unregister(); +#endif + + printk("rtcan: unloaded\n"); +} + + +module_init(rtcan_init); +module_exit(rtcan_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c new file mode 100644 index 0000000..441bfbc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c @@ -0,0 +1,994 @@ +/* + * Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * Parts of this software are based on the following: + * + * - RTAI CAN device driver for SJA1000 controllers by Jan Kiszka + * + * - linux-can.patch, a CAN socket framework for Linux, + * Copyright (C) 2004, 2005, Robert Schwebel, Benedikt Spranger, + * Marc Kleine-Budde, Sascha Hauer, Pengutronix + * + * - RTnet (www.rtnet.org) + * + * - serial device driver and profile included in Xenomai (RTDM), + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/stringify.h> + +#include <rtdm/driver.h> + +#include <rtdm/can.h> +#include <rtdm/compat.h> +#include "rtcan_version.h" +#include "rtcan_socket.h" +#include "rtcan_list.h" +#include "rtcan_dev.h" +#include "rtcan_raw.h" +#include "rtcan_internal.h" + + +/* + * Set if socket wants to receive a high precision timestamp together with + * CAN frames + */ +#define RTCAN_GET_TIMESTAMP 0 + + +MODULE_AUTHOR("RT-Socket-CAN Development Team"); +MODULE_DESCRIPTION("RTDM CAN raw socket device driver"); +MODULE_VERSION(__stringify(RTCAN_MAJOR_VER) + __stringify(RTCAN_MINOR_VER) + __stringify(RTCAN_BUGFIX_VER)); +MODULE_LICENSE("GPL"); + +void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock, + can_frame_t *frame); + +static inline int rtcan_accept_msg(uint32_t can_id, can_filter_t *filter) +{ + if ((filter->can_mask & CAN_INV_FILTER)) + return ((can_id & filter->can_mask) != filter->can_id); + else + return ((can_id & filter->can_mask) == filter->can_id); +} + + +static void rtcan_rcv_deliver(struct rtcan_recv *recv_listener, + struct rtcan_skb *skb) +{ + int size_free; + size_t cpy_size, first_part_size; + struct rtcan_rb_frame *frame = &skb->rb_frame; + struct rtdm_fd *fd = rtdm_private_to_fd(recv_listener->sock); + struct rtcan_socket *sock; + + if (rtdm_fd_lock(fd) < 0) + return; + + sock = recv_listener->sock; + + cpy_size = skb->rb_frame_size; + /* Check if socket wants to receive a timestamp */ + if (test_bit(RTCAN_GET_TIMESTAMP, &sock->flags)) { + cpy_size += RTCAN_TIMESTAMP_SIZE; + frame->can_dlc |= RTCAN_HAS_TIMESTAMP; + } else + frame->can_dlc &= RTCAN_HAS_NO_TIMESTAMP; + + /* Calculate free size in the ring buffer */ + size_free = sock->recv_head - sock->recv_tail; + if (size_free <= 0) + size_free += RTCAN_RXBUF_SIZE; + + /* Test if ring buffer has enough space. */ + if (size_free > cpy_size) { + /* Check if we must wrap around the end of buffer */ + if ((sock->recv_tail + cpy_size) > RTCAN_RXBUF_SIZE) { + /* Wrap around: Two memcpy operations */ + + first_part_size = RTCAN_RXBUF_SIZE - sock->recv_tail; + + memcpy(&sock->recv_buf[sock->recv_tail], (void *)frame, + first_part_size); + memcpy(&sock->recv_buf[0], (void *)frame + + first_part_size, cpy_size - first_part_size); + } else + memcpy(&sock->recv_buf[sock->recv_tail], (void *)frame, + cpy_size); + + /* Adjust tail */ + sock->recv_tail = (sock->recv_tail + cpy_size) & + (RTCAN_RXBUF_SIZE - 1); + + /*Notify the delivery of the message */ + rtdm_sem_up(&sock->recv_sem); + + } else { + /* Overflow of socket's ring buffer! */ + sock->rx_buf_full++; + RTCAN_RTDM_DBG("rtcan: socket buffer overflow, message discarded\n"); + } + + rtdm_fd_unlock(fd); +} + + +void rtcan_rcv(struct rtcan_device *dev, struct rtcan_skb *skb) +{ + nanosecs_abs_t timestamp = rtdm_clock_read(); + /* Entry in reception list, begin with head */ + struct rtcan_recv *recv_listener = dev->recv_list; + struct rtcan_rb_frame *frame = &skb->rb_frame; + + /* Copy timestamp to skb */ + memcpy((void *)&skb->rb_frame + skb->rb_frame_size, + ×tamp, RTCAN_TIMESTAMP_SIZE); + + if ((frame->can_id & CAN_ERR_FLAG)) { + dev->err_count++; + while (recv_listener != NULL) { + if ((frame->can_id & recv_listener->sock->err_mask)) { + recv_listener->match_count++; + rtcan_rcv_deliver(recv_listener, skb); + } + recv_listener = recv_listener->next; + } + } else { + dev->rx_count++; + while (recv_listener != NULL) { + if (rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) { + recv_listener->match_count++; + rtcan_rcv_deliver(recv_listener, skb); + } + recv_listener = recv_listener->next; + } + } +} + +#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK + +void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock, + can_frame_t *frame) +{ + struct rtcan_rb_frame *rb_frame = &dev->tx_skb.rb_frame; + + RTCAN_ASSERT(dev->tx_socket == 0, + rtdm_printk("(%d) TX skb still in use", dev->ifindex);); + + rb_frame->can_id = frame->can_id; + rb_frame->can_dlc = frame->can_dlc; + dev->tx_skb.rb_frame_size = EMPTY_RB_FRAME_SIZE; + if (frame->can_dlc && !(frame->can_id & CAN_RTR_FLAG)) { + memcpy(rb_frame->data, frame->data, frame->can_dlc); + dev->tx_skb.rb_frame_size += frame->can_dlc; + } + rb_frame->can_ifindex = dev->ifindex; + dev->tx_socket = sock; +} + +void rtcan_loopback(struct rtcan_device *dev) +{ + nanosecs_abs_t timestamp = rtdm_clock_read(); + /* Entry in reception list, begin with head */ + struct rtcan_recv *recv_listener = dev->recv_list; + struct rtcan_rb_frame *frame = &dev->tx_skb.rb_frame; + + memcpy((void *)&dev->tx_skb.rb_frame + dev->tx_skb.rb_frame_size, + ×tamp, RTCAN_TIMESTAMP_SIZE); + + while (recv_listener != NULL) { + dev->rx_count++; + if ((dev->tx_socket != recv_listener->sock) && + rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) { + recv_listener->match_count++; + rtcan_rcv_deliver(recv_listener, &dev->tx_skb); + } + recv_listener = recv_listener->next; + } + dev->tx_socket = NULL; +} + +EXPORT_SYMBOL_GPL(rtcan_loopback); + +#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */ + + +int rtcan_raw_socket(struct rtdm_fd *fd, int protocol) +{ + /* Only protocol CAN_RAW is supported */ + if (protocol != CAN_RAW && protocol != 0) + return -EPROTONOSUPPORT; + + rtcan_socket_init(fd); + + return 0; +} + + +static inline void rtcan_raw_unbind(struct rtcan_socket *sock) +{ + rtcan_raw_remove_filter(sock); + if (!rtcan_flist_no_filter(sock->flist) && sock->flist) + rtdm_free(sock->flist); + sock->flist = NULL; + sock->flistlen = RTCAN_SOCK_UNBOUND; + atomic_set(&sock->ifindex, 0); +} + + +static void rtcan_raw_close(struct rtdm_fd *fd) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + rtdm_lockctx_t lock_ctx; + + /* Get lock for reception lists */ + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + + /* Check if socket is bound */ + if (rtcan_sock_is_bound(sock)) + rtcan_raw_unbind(sock); + + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + + + rtcan_socket_cleanup(fd); +} + + +int rtcan_raw_bind(struct rtdm_fd *fd, + struct sockaddr_can *scan) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + rtdm_lockctx_t lock_ctx; + int ret = 0; + + /* Check address family and + check if given length of filter list is plausible */ + if (scan->can_family != AF_CAN) + return -EINVAL; + /* Check range of ifindex, must be between 0 and RTCAN_MAX_DEVICES */ + if (scan->can_ifindex < 0 || scan->can_ifindex > RTCAN_MAX_DEVICES) + return -ENODEV; + + /* Get lock for reception lists */ + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + + if ((ret = rtcan_raw_check_filter(sock, scan->can_ifindex, + sock->flist))) + goto out; + rtcan_raw_remove_filter(sock); + /* Add filter and mark socket as bound */ + sock->flistlen = rtcan_raw_add_filter(sock, scan->can_ifindex); + + /* Set new interface index the socket is now bound to */ + atomic_set(&sock->ifindex, scan->can_ifindex); + + out: + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + + return ret; +} + + +static int rtcan_raw_setsockopt(struct rtdm_fd *fd, + struct _rtdm_setsockopt_args *so) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + struct rtcan_filter_list *flist; + int ifindex = atomic_read(&sock->ifindex); + rtdm_lockctx_t lock_ctx; + can_err_mask_t err_mask; + int val, ret = 0; + + if (so->level != SOL_CAN_RAW) + return -ENOPROTOOPT; + + switch (so->optname) { + + case CAN_RAW_FILTER: + if (so->optlen == 0) { + flist = RTCAN_FLIST_NO_FILTER; + } else { + int flistlen; + flistlen = so->optlen / sizeof(struct can_filter); + if (flistlen < 1 || flistlen > RTCAN_MAX_RECEIVERS || + so->optlen % sizeof(struct can_filter) != 0) + return -EINVAL; + + flist = (struct rtcan_filter_list *)rtdm_malloc(so->optlen + sizeof(int)); + if (flist == NULL) + return -ENOMEM; + if (rtdm_fd_is_user(fd)) { + if (!rtdm_read_user_ok(fd, so->optval, so->optlen) || + rtdm_copy_from_user(fd, flist->flist, + so->optval, so->optlen)) { + rtdm_free(flist); + return -EFAULT; + } + } else + memcpy(flist->flist, so->optval, so->optlen); + flist->flistlen = flistlen; + } + + /* Get lock for reception lists */ + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + + /* Check if there is space for the filter list if already bound */ + if (rtcan_sock_is_bound(sock)) { + if (!rtcan_flist_no_filter(flist) && + (ret = rtcan_raw_check_filter(sock, ifindex, flist))) { + rtdm_free(flist); + goto out_filter; + } + rtcan_raw_remove_filter(sock); + } + + /* Remove previous list and attach the new one */ + if (!rtcan_flist_no_filter(flist) && sock->flist) + rtdm_free(sock->flist); + sock->flist = flist; + + if (rtcan_sock_is_bound(sock)) + sock->flistlen = rtcan_raw_add_filter(sock, ifindex); + + out_filter: + /* Release lock for reception lists */ + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + break; + + case CAN_RAW_ERR_FILTER: + + if (so->optlen != sizeof(can_err_mask_t)) + return -EINVAL; + + if (rtdm_fd_is_user(fd)) { + if (!rtdm_read_user_ok(fd, so->optval, so->optlen) || + rtdm_copy_from_user(fd, &err_mask, so->optval, so->optlen)) + return -EFAULT; + } else + memcpy(&err_mask, so->optval, so->optlen); + + /* Get lock for reception lists */ + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + sock->err_mask = err_mask; + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + + break; + + case CAN_RAW_LOOPBACK: + + if (so->optlen != sizeof(int)) + return -EINVAL; + + if (rtdm_fd_is_user(fd)) { + if (!rtdm_read_user_ok(fd, so->optval, so->optlen) || + rtdm_copy_from_user(fd, &val, so->optval, so->optlen)) + return -EFAULT; + } else + memcpy(&val, so->optval, so->optlen); + +#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK + sock->loopback = val; +#else + if (val) + return -EOPNOTSUPP; +#endif + break; + + default: + ret = -ENOPROTOOPT; + } + + return ret; +} + + +int rtcan_raw_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + int ret = 0; + + switch (request) { + COMPAT_CASE(_RTIOC_BIND): { + + struct _rtdm_setsockaddr_args *setaddr, setaddr_buf; + struct sockaddr_can *sockaddr, sockaddr_buf; + + if (rtdm_fd_is_user(fd)) { + ret = rtdm_fd_get_setsockaddr_args(fd, &setaddr_buf, arg); + if (ret) + return ret; + + setaddr = &setaddr_buf; + + /* Check size */ + if (setaddr->addrlen != sizeof(struct sockaddr_can)) + return -EINVAL; + + /* Copy argument structure from userspace */ + if (!rtdm_read_user_ok(fd, arg, + sizeof(struct sockaddr_can)) || + rtdm_copy_from_user(fd, &sockaddr_buf, setaddr->addr, + sizeof(struct sockaddr_can))) + return -EFAULT; + sockaddr = &sockaddr_buf; + } else { + setaddr = (struct _rtdm_setsockaddr_args *)arg; + sockaddr = (struct sockaddr_can *)setaddr->addr; + } + + /* Now, all required data are in kernel space */ + ret = rtcan_raw_bind(fd, sockaddr); + + break; + } + + COMPAT_CASE(_RTIOC_SETSOCKOPT): { + struct _rtdm_setsockopt_args *setopt; + struct _rtdm_setsockopt_args setopt_buf; + + if (rtdm_fd_is_user(fd)) { + ret = rtdm_fd_get_setsockopt_args(fd, &setopt_buf, arg); + if (ret) + return ret; + + setopt = &setopt_buf; + } else + setopt = (struct _rtdm_setsockopt_args *)arg; + + return rtcan_raw_setsockopt(fd, setopt); + } + + case RTCAN_RTIOC_TAKE_TIMESTAMP: { + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + long timestamp_switch = (long)arg; + + if (timestamp_switch == RTCAN_TAKE_TIMESTAMPS) + set_bit(RTCAN_GET_TIMESTAMP, &sock->flags); + else + clear_bit(RTCAN_GET_TIMESTAMP, &sock->flags); + break; + } + + case RTCAN_RTIOC_RCV_TIMEOUT: + case RTCAN_RTIOC_SND_TIMEOUT: { + /* Do some work these requests have in common. */ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + + nanosecs_rel_t *timeout = (nanosecs_rel_t *)arg; + nanosecs_rel_t timeo_buf; + + if (rtdm_fd_is_user(fd)) { + /* Copy 64 bit timeout value from userspace */ + if (!rtdm_read_user_ok(fd, arg, + sizeof(nanosecs_rel_t)) || + rtdm_copy_from_user(fd, &timeo_buf, + arg, sizeof(nanosecs_rel_t))) + return -EFAULT; + + timeout = &timeo_buf; + } + + /* Now the differences begin between the requests. */ + if (request == RTCAN_RTIOC_RCV_TIMEOUT) + sock->rx_timeout = *timeout; + else + sock->tx_timeout = *timeout; + + break; + } + + default: + ret = rtcan_raw_ioctl_dev(fd, request, arg); + break; + } + + return ret; +} + + +#define MEMCPY_FROM_RING_BUF(to, len) \ +do { \ + if (unlikely((recv_buf_index + len) > RTCAN_RXBUF_SIZE)) { \ + /* Wrap around end of buffer */ \ + first_part_size = RTCAN_RXBUF_SIZE - recv_buf_index; \ + memcpy(to, &recv_buf[recv_buf_index], first_part_size); \ + memcpy((void *)to + first_part_size, recv_buf, \ + len - first_part_size); \ + } else \ + memcpy(to, &recv_buf[recv_buf_index], len); \ + recv_buf_index = (recv_buf_index + len) & (RTCAN_RXBUF_SIZE - 1); \ +} while (0) + +ssize_t rtcan_raw_recvmsg(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + struct sockaddr_can scan; + nanosecs_rel_t timeout; + struct iovec *iov = (struct iovec *)msg->msg_iov; + struct iovec iov_buf; + can_frame_t frame; + nanosecs_abs_t timestamp = 0; + unsigned char ifindex; + unsigned char can_dlc; + unsigned char *recv_buf; + int recv_buf_index; + size_t first_part_size; + size_t payload_size; + rtdm_lockctx_t lock_ctx; + int ret; + + /* Clear frame memory location */ + memset(&frame, 0, sizeof(can_frame_t)); + + /* Check flags */ + if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) + return -EINVAL; + + + /* Check if msghdr entries are sane */ + + if (msg->msg_name != NULL) { + if (msg->msg_namelen < sizeof(struct sockaddr_can)) + return -EINVAL; + + if (rtdm_fd_is_user(fd)) { + if (!rtdm_rw_user_ok(fd, msg->msg_name, msg->msg_namelen)) + return -EFAULT; + } + + } else { + if (msg->msg_namelen != 0) + return -EINVAL; + } + + /* Check msg_iovlen, only one buffer allowed */ + if (msg->msg_iovlen != 1) + return -EMSGSIZE; + + if (rtdm_fd_is_user(fd)) { + /* Copy IO vector from userspace */ + ret = rtdm_fd_get_iovec(fd, &iov_buf, msg, true); + if (ret) + return -EFAULT; + + iov = &iov_buf; + } + + /* Check size of buffer */ + if (iov->iov_len < sizeof(can_frame_t)) + return -EMSGSIZE; + + /* Check buffer if in user space */ + if (rtdm_fd_is_user(fd)) { + if (!rtdm_rw_user_ok(fd, iov->iov_base, iov->iov_len)) + return -EFAULT; + } + + if (msg->msg_control != NULL) { + if (msg->msg_controllen < sizeof(nanosecs_abs_t)) + return -EINVAL; + + if (rtdm_fd_is_user(fd)) { + if (!rtdm_rw_user_ok(fd, msg->msg_control, + msg->msg_controllen)) + return -EFAULT; + } + + } else { + if (msg->msg_controllen != 0) + return -EINVAL; + } + + rtcan_raw_enable_bus_err(sock); + + /* Set RX timeout */ + timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sock->rx_timeout; + + /* Fetch message (ok, try it ...) */ + ret = rtdm_sem_timeddown(&sock->recv_sem, timeout, NULL); + + /* Error code returned? */ + if (unlikely(ret)) { + /* Which error code? */ + + if (ret == -EIDRM) + /* Socket was closed */ + return -EBADF; + + else if (ret == -EWOULDBLOCK) + /* We would block but don't want to */ + return -EAGAIN; + + else + /* Return all other error codes unmodified. */ + return ret; + } + + + /* OK, we've got mail. */ + + rtdm_lock_get_irqsave(&rtcan_socket_lock, lock_ctx); + + + /* Construct a struct can_frame with data from socket's ring buffer */ + recv_buf_index = sock->recv_head; + recv_buf = sock->recv_buf; + + + /* Begin with CAN ID */ + MEMCPY_FROM_RING_BUF(&frame.can_id, sizeof(uint32_t)); + + + /* Fetch interface index */ + ifindex = recv_buf[recv_buf_index]; + recv_buf_index = (recv_buf_index + 1) & (RTCAN_RXBUF_SIZE - 1); + + + /* Fetch DLC (with indicator if a timestamp exists) */ + can_dlc = recv_buf[recv_buf_index]; + recv_buf_index = (recv_buf_index + 1) & (RTCAN_RXBUF_SIZE - 1); + + frame.can_dlc = can_dlc & RTCAN_HAS_NO_TIMESTAMP; + payload_size = (frame.can_dlc > 8) ? 8 : frame.can_dlc; + + + /* If frame is an RTR or one with no payload it's not necessary + * to copy the data bytes. */ + if (!(frame.can_id & CAN_RTR_FLAG) && payload_size) + /* Copy data bytes */ + MEMCPY_FROM_RING_BUF(frame.data, payload_size); + + /* Is a timestamp available and is the caller actually interested? */ + if (msg->msg_controllen && (can_dlc & RTCAN_HAS_TIMESTAMP)) + /* Copy timestamp */ + MEMCPY_FROM_RING_BUF(×tamp, RTCAN_TIMESTAMP_SIZE); + + /* Message completely read from the socket's ring buffer. Now check if + * caller is just peeking. */ + if (flags & MSG_PEEK) + /* Next one, please! */ + rtdm_sem_up(&sock->recv_sem); + else + /* Adjust begin of first message in the ring buffer. */ + sock->recv_head = recv_buf_index; + + + /* Release lock */ + rtdm_lock_put_irqrestore(&rtcan_socket_lock, lock_ctx); + + + /* Create CAN socket address to give back */ + if (msg->msg_namelen) { + scan.can_family = AF_CAN; + scan.can_ifindex = ifindex; + } + + + /* Last duty: Copy all back to the caller's buffers. */ + + if (rtdm_fd_is_user(fd)) { + /* Copy to user space */ + + /* Copy socket address */ + if (msg->msg_namelen) { + if (rtdm_copy_to_user(fd, msg->msg_name, &scan, + sizeof(struct sockaddr_can))) + return -EFAULT; + + msg->msg_namelen = sizeof(struct sockaddr_can); + } + + /* Copy CAN frame */ + if (rtdm_copy_to_user(fd, iov->iov_base, &frame, + sizeof(can_frame_t))) + return -EFAULT; + /* Adjust iovec in the common way */ + iov->iov_base += sizeof(can_frame_t); + iov->iov_len -= sizeof(can_frame_t); + /* ... and copy it, too. */ + ret = rtdm_fd_put_iovec(fd, iov, msg); + if (ret) + return -EFAULT; + + /* Copy timestamp if existent and wanted */ + if (msg->msg_controllen) { + if (can_dlc & RTCAN_HAS_TIMESTAMP) { + if (rtdm_copy_to_user(fd, msg->msg_control, + ×tamp, RTCAN_TIMESTAMP_SIZE)) + return -EFAULT; + + msg->msg_controllen = RTCAN_TIMESTAMP_SIZE; + } else + msg->msg_controllen = 0; + } + + } else { + /* Kernel space */ + + /* Copy socket address */ + if (msg->msg_namelen) { + memcpy(msg->msg_name, &scan, sizeof(struct sockaddr_can)); + msg->msg_namelen = sizeof(struct sockaddr_can); + } + + /* Copy CAN frame */ + memcpy(iov->iov_base, &frame, sizeof(can_frame_t)); + /* Adjust iovec in the common way */ + iov->iov_base += sizeof(can_frame_t); + iov->iov_len -= sizeof(can_frame_t); + + /* Copy timestamp if existent and wanted */ + if (msg->msg_controllen) { + if (can_dlc & RTCAN_HAS_TIMESTAMP) { + memcpy(msg->msg_control, ×tamp, RTCAN_TIMESTAMP_SIZE); + msg->msg_controllen = RTCAN_TIMESTAMP_SIZE; + } else + msg->msg_controllen = 0; + } + } + + + return sizeof(can_frame_t); +} + + +ssize_t rtcan_raw_sendmsg(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + struct sockaddr_can *scan = (struct sockaddr_can *)msg->msg_name; + struct sockaddr_can scan_buf; + struct iovec *iov = (struct iovec *)msg->msg_iov; + struct iovec iov_buf; + can_frame_t *frame; + can_frame_t frame_buf; + rtdm_lockctx_t lock_ctx; + nanosecs_rel_t timeout = 0; + struct tx_wait_queue tx_wait; + struct rtcan_device *dev; + int ifindex = 0; + int ret = 0; + spl_t s; + + + if (flags & MSG_OOB) /* Mirror BSD error message compatibility */ + return -EOPNOTSUPP; + + /* Only MSG_DONTWAIT is a valid flag. */ + if (flags & ~MSG_DONTWAIT) + return -EINVAL; + + /* Check msg_iovlen, only one buffer allowed */ + if (msg->msg_iovlen != 1) + return -EMSGSIZE; + + if (scan == NULL) { + /* No socket address. Will use bound interface for sending */ + + if (msg->msg_namelen != 0) + return -EINVAL; + + + /* We only want a consistent value here, a spin lock would be + * overkill. Nevertheless, the binding could change till we have + * the chance to send. Blame the user, though. */ + ifindex = atomic_read(&sock->ifindex); + + if (!ifindex) + /* Socket isn't bound or bound to all interfaces. Go out. */ + return -ENXIO; + } else { + /* Socket address given */ + if (msg->msg_namelen < sizeof(struct sockaddr_can)) + return -EINVAL; + + if (rtdm_fd_is_user(fd)) { + /* Copy socket address from userspace */ + if (!rtdm_read_user_ok(fd, msg->msg_name, + sizeof(struct sockaddr_can)) || + rtdm_copy_from_user(fd, &scan_buf, msg->msg_name, + sizeof(struct sockaddr_can))) + return -EFAULT; + + scan = &scan_buf; + } + + /* Check address family */ + if (scan->can_family != AF_CAN) + return -EINVAL; + + ifindex = scan->can_ifindex; + } + + if (rtdm_fd_is_user(fd)) { + /* Copy IO vector from userspace */ + if (rtdm_fd_get_iovec(fd, &iov_buf, msg, false)) + return -EFAULT; + + iov = &iov_buf; + } + + /* Check size of buffer */ + if (iov->iov_len != sizeof(can_frame_t)) + return -EMSGSIZE; + + frame = (can_frame_t *)iov->iov_base; + + if (rtdm_fd_is_user(fd)) { + /* Copy CAN frame from userspace */ + if (!rtdm_read_user_ok(fd, iov->iov_base, + sizeof(can_frame_t)) || + rtdm_copy_from_user(fd, &frame_buf, iov->iov_base, + sizeof(can_frame_t))) + return -EFAULT; + + frame = &frame_buf; + } + + /* Adjust iovec in the common way */ + iov->iov_base += sizeof(can_frame_t); + iov->iov_len -= sizeof(can_frame_t); + /* ... and copy it back to userspace if necessary */ + if (rtdm_fd_is_user(fd)) { + if (rtdm_copy_to_user(fd, msg->msg_iov, iov, + sizeof(struct iovec))) + return -EFAULT; + } + + /* At last, we've got the frame ... */ + + /* Check if DLC between 0 and 15 */ + if (frame->can_dlc > 15) + return -EINVAL; + + /* Check if it is a standard frame and the ID between 0 and 2031 */ + if (!(frame->can_id & CAN_EFF_FLAG)) { + u32 id = frame->can_id & CAN_EFF_MASK; + if (id > (CAN_SFF_MASK - 16)) + return -EINVAL; + } + + if ((dev = rtcan_dev_get_by_index(ifindex)) == NULL) + return -ENXIO; + + timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sock->tx_timeout; + + tx_wait.rt_task = rtdm_task_current(); + + /* Register the task at the socket's TX wait queue and decrement + * the TX semaphore. This must be atomic. Finally, the task must + * be deregistered again (also atomic). */ + cobalt_atomic_enter(s); + + list_add(&tx_wait.tx_wait_list, &sock->tx_wait_head); + + /* Try to pass the guard in order to access the controller */ + ret = rtdm_sem_timeddown(&dev->tx_sem, timeout, NULL); + + /* Only dequeue task again if socket isn't being closed i.e. if + * this task was not unblocked within the close() function. */ + if (likely(!list_empty(&tx_wait.tx_wait_list))) + /* Dequeue this task from the TX wait queue */ + list_del_init(&tx_wait.tx_wait_list); + else + /* The socket was closed. */ + ret = -EBADF; + + cobalt_atomic_leave(s); + + /* Error code returned? */ + if (ret != 0) { + /* Which error code? */ + switch (ret) { + case -EIDRM: + /* Controller is stopped or bus-off */ + ret = -ENETDOWN; + goto send_out1; + + case -EWOULDBLOCK: + /* We would block but don't want to */ + ret = -EAGAIN; + goto send_out1; + + default: + /* Return all other error codes unmodified. */ + goto send_out1; + } + } + + /* We got access */ + + + /* Push message onto stack for loopback when TX done */ + if (rtcan_loopback_enabled(sock)) + rtcan_tx_push(dev, sock, frame); + + rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx); + + /* Controller should be operating */ + if (!CAN_STATE_OPERATING(dev->state)) { + if (dev->state == CAN_STATE_SLEEPING) { + ret = -ECOMM; + rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx); + rtdm_sem_up(&dev->tx_sem); + goto send_out1; + } + ret = -ENETDOWN; + goto send_out2; + } + + ret = dev->hard_start_xmit(dev, frame); + + /* Return number of bytes sent upon successful completion */ + if (ret == 0) + ret = sizeof(can_frame_t); + + send_out2: + rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx); + send_out1: + rtcan_dev_dereference(dev); + return ret; +} + + +static struct rtdm_driver rtcan_driver = { + .profile_info = RTDM_PROFILE_INFO(rtcan, + RTDM_CLASS_CAN, + RTDM_SUBCLASS_GENERIC, + RTCAN_PROFILE_VER), + .device_flags = RTDM_PROTOCOL_DEVICE, + .device_count = 1, + .context_size = sizeof(struct rtcan_socket), + .protocol_family = PF_CAN, + .socket_type = SOCK_RAW, + .ops = { + .socket = rtcan_raw_socket, + .close = rtcan_raw_close, + .ioctl_nrt = rtcan_raw_ioctl, + .recvmsg_rt = rtcan_raw_recvmsg, + .sendmsg_rt = rtcan_raw_sendmsg, + }, +}; + +static struct rtdm_device rtcan_device = { + .driver = &rtcan_driver, + .label = "rtcan", +}; + +int __init rtcan_raw_proto_register(void) +{ + return rtdm_dev_register(&rtcan_device); +} + +void __exit rtcan_raw_proto_unregister(void) +{ + rtdm_dev_unregister(&rtcan_device); +} + + +EXPORT_SYMBOL_GPL(rtcan_rcv); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h new file mode 100644 index 0000000..cd1523e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __RTCAN_RAW_H_ +#define __RTCAN_RAW_H_ + +#ifdef __KERNEL__ + +int rtcan_raw_ioctl_dev(struct rtdm_fd *fd, int request, void *arg); + +int rtcan_raw_check_filter(struct rtcan_socket *sock, + int ifindex, struct rtcan_filter_list *flist); +int rtcan_raw_add_filter(struct rtcan_socket *sock, int ifindex); +void rtcan_raw_remove_filter(struct rtcan_socket *sock); + +void rtcan_rcv(struct rtcan_device *rtcandev, struct rtcan_skb *skb); + +void rtcan_loopback(struct rtcan_device *rtcandev); +#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK +#define rtcan_loopback_enabled(sock) (sock->loopback) +#define rtcan_loopback_pending(dev) (dev->tx_socket) +#else /* !CONFIG_XENO_DRIVERS_CAN_LOOPBACK */ +#define rtcan_loopback_enabled(sock) (0) +#define rtcan_loopback_pending(dev) (0) +#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */ + +#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR +void __rtcan_raw_enable_bus_err(struct rtcan_socket *sock); +static inline void rtcan_raw_enable_bus_err(struct rtcan_socket *sock) +{ + if ((sock->err_mask & CAN_ERR_BUSERROR)) + __rtcan_raw_enable_bus_err(sock); +} +#else +#define rtcan_raw_enable_bus_err(sock) +#endif + +int __init rtcan_raw_proto_register(void); +void __exit rtcan_raw_proto_unregister(void); + +#endif /* __KERNEL__ */ + +#endif /* __RTCAN_RAW_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c new file mode 100644 index 0000000..d1ff640 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c @@ -0,0 +1,455 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger, <wg@grandegger.com> + * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix + * Copyright (C) 2006 Andrey Volkov, Varma Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +#include <rtdm/can.h> +#include "rtcan_dev.h" +#include "rtcan_raw.h" +#include "rtcan_internal.h" + +#ifdef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD + +#define RTCAN_MAX_TSEG1 15 +#define RTCAN_MAX_TSEG2 7 + +/* + * Calculate standard bit-time values for odd bitrates. + * Most parts of this code is from Arnaud Westenberg <arnaud@wanadoo.nl> + */ +static int rtcan_calc_bit_time(struct rtcan_device *dev, + can_baudrate_t rate, + struct can_bittime_std *bit_time) +{ + int best_error = 1000000000; + int error; + int best_tseg=0, best_brp=0, best_rate=0, brp=0; + int tseg=0, tseg1=0, tseg2=0; + int clock = dev->can_sys_clock; + int sjw = 0; + int sampl_pt = 90; + + /* some heuristic specials */ + if (rate > ((1000000 + 500000) / 2)) + sampl_pt = 75; + + if (rate < ((12500 + 10000) / 2)) + sampl_pt = 75; + + if (rate < ((100000 + 125000) / 2)) + sjw = 1; + + /* tseg even = round down, odd = round up */ + for (tseg = (0 + 0 + 2) * 2; + tseg <= (RTCAN_MAX_TSEG2 + RTCAN_MAX_TSEG1 + 2) * 2 + 1; + tseg++) { + brp = clock / ((1 + tseg / 2) * rate) + tseg % 2; + if ((brp == 0) || (brp > 64)) + continue; + + error = rate - clock / (brp * (1 + tseg / 2)); + if (error < 0) + error = -error; + + if (error <= best_error) { + best_error = error; + best_tseg = tseg/2; + best_brp = brp - 1; + best_rate = clock / (brp * (1 + tseg / 2)); + } + } + + if (best_error && (rate / best_error < 10)) { + RTCAN_RTDM_DBG("%s: bitrate %d is not possible with %d Hz clock\n", + dev->name, rate, clock); + return -EDOM; + } + + tseg2 = best_tseg - (sampl_pt * (best_tseg + 1)) / 100; + + if (tseg2 < 0) + tseg2 = 0; + + if (tseg2 > RTCAN_MAX_TSEG2) + tseg2 = RTCAN_MAX_TSEG2; + + tseg1 = best_tseg - tseg2 - 2; + + if (tseg1 > RTCAN_MAX_TSEG1) { + tseg1 = RTCAN_MAX_TSEG1; + tseg2 = best_tseg-tseg1-2; + } + + bit_time->brp = best_brp + 1; + bit_time->prop_seg = 0; + bit_time->phase_seg1 = tseg1 + 1; + bit_time->phase_seg2 = tseg2 + 1; + bit_time->sjw = sjw + 1; + bit_time->sam = 0; + + return 0; +} + +#else /* !CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD */ + +/* This is the bit-time calculation method from the Linux kernel */ + +#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ + +static int can_update_spt(const struct can_bittiming_const *btc, + unsigned int sampl_pt, unsigned int tseg, + unsigned int *tseg1, unsigned int *tseg2) +{ + *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000; + *tseg2 = clamp(*tseg2, btc->tseg2_min, btc->tseg2_max); + *tseg1 = tseg - *tseg2; + if (*tseg1 > btc->tseg1_max) { + *tseg1 = btc->tseg1_max; + *tseg2 = tseg - *tseg1; + } + + return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); +} + +static int rtcan_calc_bit_time(struct rtcan_device *dev, + can_baudrate_t bitrate, + struct can_bittime_std *bt) +{ + const struct can_bittiming_const *btc = dev->bittiming_const; + long rate; /* current bitrate */ + long rate_error;/* difference between current and target value */ + long best_rate_error = 1000000000; + int spt; /* current sample point in thousandth */ + int spt_error; /* difference between current and target value */ + int best_spt_error = 1000; + int sampl_pt; /* target sample point */ + int best_tseg = 0, best_brp = 0; /* current best values for tseg and brp */ + unsigned int brp, tsegall, tseg, tseg1, tseg2; + u64 v64; + + if (!dev->bittiming_const) + return -ENOTSUPP; + + /* Use CIA recommended sample points */ + if (bitrate > 800000) + sampl_pt = 750; + else if (bitrate > 500000) + sampl_pt = 800; + else + sampl_pt = 875; + + /* tseg even = round down, odd = round up */ + for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; + tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { + tsegall = 1 + tseg / 2; + + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ + brp = dev->can_sys_clock / (tsegall * bitrate) + tseg % 2; + + /* chose brp step which is possible in system */ + brp = (brp / btc->brp_inc) * btc->brp_inc; + if ((brp < btc->brp_min) || (brp > btc->brp_max)) + continue; + + rate = dev->can_sys_clock / (brp * tsegall); + rate_error = abs((long)(bitrate - rate)); + + /* tseg brp biterror */ + if (rate_error > best_rate_error) + continue; + + /* reset sample point error if we have a better bitrate */ + if (rate_error < best_rate_error) + best_spt_error = 1000; + + spt = can_update_spt(btc, sampl_pt, tseg / 2, &tseg1, &tseg2); + spt_error = abs((long)(sampl_pt - spt)); + if (spt_error > best_spt_error) + continue; + + best_spt_error = spt_error; + best_rate_error = rate_error; + best_tseg = tseg / 2; + best_brp = brp; + + if (rate_error == 0 && spt_error == 0) + break; + } + + if (best_rate_error) { + /* Error in one-tenth of a percent */ + rate_error = (best_rate_error * 1000) / bitrate; + if (rate_error > CAN_CALC_MAX_ERROR) { + rtcandev_err(dev, + "bitrate error %ld.%ld%% too high\n", + rate_error / 10, rate_error % 10); + return -EDOM; + } else { + rtcandev_warn(dev, "bitrate error %ld.%ld%%\n", + rate_error / 10, rate_error % 10); + } + } + + /* real sample point */ + sampl_pt = can_update_spt(btc, sampl_pt, best_tseg, &tseg1, &tseg2); + + v64 = (u64)best_brp * 1000000000UL; + do_div(v64, dev->can_sys_clock); + bt->prop_seg = tseg1 / 2; + bt->phase_seg1 = tseg1 - bt->prop_seg; + bt->phase_seg2 = tseg2; + bt->sjw = 1; + bt->sam = 0; + bt->brp = best_brp; + + /* real bit-rate */ + rate = dev->can_sys_clock / (bt->brp * (tseg1 + tseg2 + 1)); + + rtcandev_dbg(dev, "real bitrate %ld, sampling point %d.%d%%\n", + rate, sampl_pt/10, sampl_pt%10); + + return 0; +} + +#endif /* CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD */ + +static inline int rtcan_raw_ioctl_dev_get(struct rtcan_device *dev, + int request, struct can_ifreq *ifr) +{ + rtdm_lockctx_t lock_ctx; + + switch (request) { + + case SIOCGIFINDEX: + ifr->ifr_ifindex = dev->ifindex; + break; + + case SIOCGCANSTATE: + rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx); + if (dev->do_get_state) + dev->state = dev->do_get_state(dev); + ifr->ifr_ifru.state = dev->state; + rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx); + break; + + case SIOCGCANCTRLMODE: + ifr->ifr_ifru.ctrlmode = dev->ctrl_mode; + break; + + case SIOCGCANBAUDRATE: + ifr->ifr_ifru.baudrate = dev->baudrate; + break; + + case SIOCGCANCUSTOMBITTIME: + ifr->ifr_ifru.bittime = dev->bit_time; + break; + } + + return 0; +} + +static inline int rtcan_raw_ioctl_dev_set(struct rtcan_device *dev, + int request, struct can_ifreq *ifr) +{ + rtdm_lockctx_t lock_ctx; + int ret = 0, started = 0; + struct can_bittime bit_time, *bt; + + switch (request) { + case SIOCSCANBAUDRATE: + if (!dev->do_set_bit_time) + return 0; + ret = rtcan_calc_bit_time(dev, ifr->ifr_ifru.baudrate, &bit_time.std); + if (ret) + break; + bit_time.type = CAN_BITTIME_STD; + break; + } + + rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx); + + if (dev->do_get_state) + dev->state = dev->do_get_state(dev); + + switch (request) { + case SIOCSCANCTRLMODE: + case SIOCSCANBAUDRATE: + case SIOCSCANCUSTOMBITTIME: + if ((started = CAN_STATE_OPERATING(dev->state))) { + if ((ret = dev->do_set_mode(dev, CAN_MODE_STOP, &lock_ctx))) + goto out; + } + break; + } + + switch (request) { + case SIOCSCANMODE: + if (dev->do_set_mode && + !(ifr->ifr_ifru.mode == CAN_MODE_START && + CAN_STATE_OPERATING(dev->state))) + ret = dev->do_set_mode(dev, ifr->ifr_ifru.mode, &lock_ctx); + break; + + case SIOCSCANCTRLMODE: + dev->ctrl_mode = ifr->ifr_ifru.ctrlmode; + break; + + case SIOCSCANBAUDRATE: + ret = dev->do_set_bit_time(dev, &bit_time, &lock_ctx); + if (!ret) { + dev->baudrate = ifr->ifr_ifru.baudrate; + dev->bit_time = bit_time; + } + break; + + case SIOCSCANCUSTOMBITTIME: + bt = &ifr->ifr_ifru.bittime; + ret = dev->do_set_bit_time(dev, bt, &lock_ctx); + if (!ret) { + dev->bit_time = *bt; + if (bt->type == CAN_BITTIME_STD && bt->std.brp) + dev->baudrate = (dev->can_sys_clock / + (bt->std.brp * (1 + bt->std.prop_seg + + bt->std.phase_seg1 + + bt->std.phase_seg2))); + else + dev->baudrate = CAN_BAUDRATE_UNKNOWN; + } + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + out: + if (started) + dev->do_set_mode(dev, CAN_MODE_START, &lock_ctx); + + rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx); + + return ret; +} + +int rtcan_raw_ioctl_dev(struct rtdm_fd *fd, int request, void *arg) +{ + struct can_ifreq *ifr; + int ret = 0, get = 0; + union { + /* + * We need to deal with callers still passing struct ifreq + * instead of can_ifreq, which might have a larger memory + * footprint (but can't be smaller though). Field offsets + * will be the same regardless. + */ + struct ifreq ifr_legacy; + struct can_ifreq ifr_can; + } ifr_buf; + struct rtcan_device *dev; + + switch (request) { + + case SIOCGIFINDEX: + case SIOCGCANSTATE: + case SIOCGCANBAUDRATE: + case SIOCGCANCUSTOMBITTIME: + get = 1; + fallthrough; + case SIOCSCANMODE: + case SIOCSCANCTRLMODE: + case SIOCSCANBAUDRATE: + case SIOCSCANCUSTOMBITTIME: + + if (rtdm_fd_is_user(fd)) { + /* Copy struct can_ifreq from userspace */ + if (!rtdm_read_user_ok(fd, arg, + sizeof(struct can_ifreq)) || + rtdm_copy_from_user(fd, &ifr_buf, arg, + sizeof(struct can_ifreq))) + return -EFAULT; + + ifr = &ifr_buf.ifr_can; + } else + ifr = (struct can_ifreq *)arg; + + /* Get interface index and data */ + dev = rtcan_dev_get_by_name(ifr->ifr_name); + if (dev == NULL) + return -ENODEV; + + if (get) { + ret = rtcan_raw_ioctl_dev_get(dev, request, ifr); + rtcan_dev_dereference(dev); + if (ret == 0 && rtdm_fd_is_user(fd)) { + /* + * Since we yet tested if user memory is rw safe, + * we can copy to user space directly. + */ + if (rtdm_copy_to_user(fd, arg, ifr, + sizeof(struct can_ifreq))) + return -EFAULT; + } + } else { + ret = rtcan_raw_ioctl_dev_set(dev, request, ifr); + rtcan_dev_dereference(dev); + } + break; + + default: + ret = -EOPNOTSUPP; + break; + + } + + return ret; +} + +#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR +void __rtcan_raw_enable_bus_err(struct rtcan_socket *sock) +{ + int i, begin, end; + struct rtcan_device *dev; + rtdm_lockctx_t lock_ctx; + int ifindex = atomic_read(&sock->ifindex); + + if (ifindex) { + begin = ifindex; + end = ifindex; + } else { + begin = 1; + end = RTCAN_MAX_DEVICES; + } + + for (i = begin; i <= end; i++) { + if ((dev = rtcan_dev_get_by_index(i)) == NULL) + continue; + + if (dev->do_enable_bus_err) { + rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx); + dev->do_enable_bus_err(dev); + rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx); + } + rtcan_dev_dereference(dev); + } +} +#endif /* CONFIG_XENO_DRIVERS_CAN_BUS_ERR*/ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c new file mode 100644 index 0000000..e121061 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c @@ -0,0 +1,256 @@ +/* + * Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; eitherer version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +#include <rtdm/can.h> +#include "rtcan_internal.h" +#include "rtcan_socket.h" +#include "rtcan_list.h" +#include "rtcan_dev.h" +#include "rtcan_raw.h" + + +#if 0 +void rtcan_raw_print_filter(struct rtcan_device *dev) +{ + int i; + struct rtcan_recv *r = dev->receivers; + + rtdm_printk("%s: recv_list=%p empty_list=%p free_entries=%d\n", + dev->name, dev->recv_list, dev->empty_list, dev->free_entries); + for (i = 0; i < RTCAN_MAX_RECEIVERS; i++, r++) { + rtdm_printk("%2d %p sock=%p next=%p id=%x mask=%x\n", + i, r, r->sock, r->next, + r->can_filter.can_id, r->can_filter.can_mask); + } +} +#else +#define rtcan_raw_print_filter(dev) +#endif + + +static inline void rtcan_raw_mount_filter(can_filter_t *recv_filter, + can_filter_t *filter) +{ + if (filter->can_id & CAN_INV_FILTER) { + recv_filter->can_id = filter->can_id & ~CAN_INV_FILTER; + recv_filter->can_mask = filter->can_mask | CAN_INV_FILTER; + } else { + recv_filter->can_id = filter->can_id; + recv_filter->can_mask = filter->can_mask & ~CAN_INV_FILTER; + } + + /* Apply mask for fast filter check */ + recv_filter->can_id &= recv_filter->can_mask; +} + + +int rtcan_raw_check_filter(struct rtcan_socket *sock, int ifindex, + struct rtcan_filter_list *flist) +{ + int old_ifindex = 0, old_flistlen_all = 0; + int free_entries, i, begin, end; + struct rtcan_device *dev; + int flistlen; + + if (rtcan_flist_no_filter(flist)) + return 0; + + /* Check if filter list has been defined by user */ + flistlen = (flist) ? flist->flistlen : 1; + + /* Now we check if a reception list would overflow. This takes some + * preparation, so let's go ... */ + + /* Check current bind status */ + if (rtcan_sock_has_filter(sock)) { + /* Socket is bound */ + i = atomic_read(&sock->ifindex); + + if (i == 0) + /* Socket was bound to ALL interfaces */ + old_flistlen_all = sock->flistlen; + else /* Socket was bound to only one interface */ + old_ifindex = i; + } + + if (ifindex) { + /* We bind the socket to only one interface. */ + begin = ifindex; + end = ifindex; + } else { + /* Socket must be bound to all interfaces. */ + begin = 1; + end = RTCAN_MAX_DEVICES; + } + + /* Check if there is space for the new binding */ + for (i = begin; i <= end; i++) { + if ((dev = rtcan_dev_get_by_index(i)) == NULL) + continue; + free_entries = dev->free_entries + old_flistlen_all; + rtcan_dev_dereference(dev); + if (i == old_ifindex) + free_entries += sock->flistlen; + /* Compare free list space to new filter list length */ + if (free_entries < flistlen) + return -ENOSPC; + } + + return 0; +} + + +int rtcan_raw_add_filter(struct rtcan_socket *sock, int ifindex) +{ + int i, j, begin, end; + struct rtcan_recv *first, *last; + struct rtcan_device *dev; + /* Check if filter list has been defined by user */ + int flistlen; + + if (rtcan_flist_no_filter(sock->flist)) { + return 0; + } + + flistlen = (sock->flist) ? sock->flist->flistlen : 0; + + if (ifindex) { + /* We bind the socket to only one interface. */ + begin = ifindex; + end = ifindex; + } else { + /* Socket must be bound to all interfaces. */ + begin = 1; + end = RTCAN_MAX_DEVICES; + } + + for (i = begin; i <= end; i++) { + if ((dev = rtcan_dev_get_by_index(i)) == NULL) + continue; + + /* Take first entry of empty list */ + first = last = dev->empty_list; + /* Check if filter list is empty */ + if (flistlen) { + /* Filter list is not empty */ + /* Register first filter */ + rtcan_raw_mount_filter(&last->can_filter, + &sock->flist->flist[0]); + last->match_count = 0; + last->sock = sock; + for (j = 1; j < flistlen; j++) { + /* Register remaining filters */ + last = last->next; + rtcan_raw_mount_filter(&last->can_filter, + &sock->flist->flist[j]); + last->sock = sock; + last->match_count = 0; + } + /* Decrease free entries counter by length of filter list */ + dev->free_entries -= flistlen; + + } else { + /* Filter list is empty. Socket must be bound to all CAN IDs. */ + /* Fill list entry members */ + last->can_filter.can_id = last->can_filter.can_mask = 0; + last->sock = sock; + last->match_count = 0; + /* Decrease free entries counter by 1 + * (one filter for all CAN frames) */ + dev->free_entries--; + } + + /* Set new empty list header */ + dev->empty_list = last->next; + /* Add new partial recv list to the head of reception list */ + last->next = dev->recv_list; + /* Adjust rececption list pointer */ + dev->recv_list = first; + + rtcan_raw_print_filter(dev); + rtcan_dev_dereference(dev); + } + + return (flistlen) ? flistlen : 1; +} + + +void rtcan_raw_remove_filter(struct rtcan_socket *sock) +{ + int i, j, begin, end; + struct rtcan_recv *first, *next, *last; + int ifindex = atomic_read(&sock->ifindex); + struct rtcan_device *dev; + + if (!rtcan_sock_has_filter(sock)) /* nothing to do */ + return; + + if (ifindex) { + /* Socket was bound to one interface only. */ + begin = ifindex; + end = ifindex; + } else { + /* Socket was bound to all interfaces */ + begin = 1; + end = RTCAN_MAX_DEVICES; + } + + for (i = begin; i <= end; i++) { + + if ((dev = rtcan_dev_get_by_index(i)) == NULL) + continue; + + /* Search for first list entry pointing to this socket */ + first = NULL; + next = dev->recv_list; + while (next->sock != sock) { + first = next; + next = first->next; + } + + /* Now go to the end of the old filter list */ + last = next; + for (j = 1; j < sock->flistlen; j++) + last = last->next; + + /* Detach found first list entry from reception list */ + if (first) + first->next = last->next; + else + dev->recv_list = last->next; + /* Add partial list to the head of empty list */ + last->next = dev->empty_list; + /* Adjust empty list pointer */ + dev->empty_list = next; + + /* Increase free entries counter by length of old filter list */ + dev->free_entries += sock->flistlen; + + rtcan_raw_print_filter(dev); + rtcan_dev_dereference(dev); + } +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c new file mode 100644 index 0000000..edd4619 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2005,2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * Based on stack/socket.c - sockets implementation for RTnet + * + * Copyright (C) 1999 Lineo, Inc + * 1999, 2002 David A. Schleef <ds@schleef.org> + * 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * 2003-2005 Jan Kiszka <jan.kiszka@web.de> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "rtcan_socket.h" +#include "rtcan_list.h" + + +LIST_HEAD(rtcan_socket_list); + +void rtcan_socket_init(struct rtdm_fd *fd) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + rtdm_lockctx_t lock_ctx; + + + rtdm_sem_init(&sock->recv_sem, 0); + + sock->recv_head = 0; + sock->recv_tail = 0; + atomic_set(&sock->ifindex, 0); + sock->flistlen = RTCAN_SOCK_UNBOUND; + sock->flist = NULL; + sock->err_mask = 0; + sock->rx_buf_full = 0; + sock->flags = 0; +#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK + sock->loopback = 1; +#endif + + sock->tx_timeout = RTDM_TIMEOUT_INFINITE; + sock->rx_timeout = RTDM_TIMEOUT_INFINITE; + + INIT_LIST_HEAD(&sock->tx_wait_head); + + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + list_add(&sock->socket_list, &rtcan_socket_list); + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); +} + + +void rtcan_socket_cleanup(struct rtdm_fd *fd) +{ + struct rtcan_socket *sock = rtdm_fd_to_private(fd); + struct tx_wait_queue *tx_waiting; + rtdm_lockctx_t lock_ctx; + int tx_list_empty; + + /* Wake up sleeping senders. This is re-entrant-safe. */ + do { + cobalt_atomic_enter(lock_ctx); + /* Is someone there? */ + if (list_empty(&sock->tx_wait_head)) + tx_list_empty = 1; + else { + tx_list_empty = 0; + + /* Get next entry pointing to a waiting task */ + tx_waiting = list_entry(sock->tx_wait_head.next, + struct tx_wait_queue, tx_wait_list); + + /* Remove it from list */ + list_del_init(&tx_waiting->tx_wait_list); + + /* Wake task up (atomic section is left implicitly) */ + rtdm_task_unblock(tx_waiting->rt_task); + } + cobalt_atomic_leave(lock_ctx); + } while (!tx_list_empty); + + rtdm_sem_destroy(&sock->recv_sem); + + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + if (sock->socket_list.next) { + list_del(&sock->socket_list); + sock->socket_list.next = NULL; + } + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h new file mode 100644 index 0000000..cf4422a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2005,2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * Derived from RTnet project file include/stack/socket.h: + * + * Copyright (C) 1999 Lineo, Inc + * 1999, 2002 David A. Schleef <ds@schleef.org> + * 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * 2003-2005 Jan Kiszka <jan.kiszka@web.de> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __RTCAN_SOCKET_H_ +#define __RTCAN_SOCKET_H_ + +#include <rtdm/driver.h> + +#include <rtdm/can.h> + + + +/* This MUST BE 2^N */ +#define RTCAN_RXBUF_SIZE CONFIG_XENO_DRIVERS_CAN_RXBUF_SIZE + +/* Size of timestamp */ +#define RTCAN_TIMESTAMP_SIZE sizeof(nanosecs_abs_t) + +/* Bit in the can_dlc member of struct ring_buffer_frame used to indicate + * whether a frame has got a timestamp or not */ +#define RTCAN_HAS_TIMESTAMP 0x80 + +/* Mask for clearing bit RTCAN_HAS_TIMESTAMP */ +#define RTCAN_HAS_NO_TIMESTAMP 0x7F + +#define RTCAN_SOCK_UNBOUND -1 +#define RTCAN_FLIST_NO_FILTER (struct rtcan_filter_list *)-1 +#define rtcan_flist_no_filter(f) ((f) == RTCAN_FLIST_NO_FILTER) +#define rtcan_sock_has_filter(s) ((s)->flistlen > 0) +#define rtcan_sock_is_bound(s) ((s)->flistlen >= 0) + +/* + * Internal frame representation within the ring buffer of a + * struct rtcan_socket. + * + * The data array is of arbitrary size when the frame is actually + * stored in a socket's ring buffer. The timestamp member exists if the + * socket was set to take timestamps (then it follows direcly after the + * arbitrary-sized data array), otherwise it does not exist. + */ +struct rtcan_rb_frame { + + /* CAN ID representation equal to struct can_frame */ + uint32_t can_id; + + /* Interface index from which the frame originates */ + unsigned char can_ifindex; + + /* DLC (between 0 and 15) and mark if frame has got a timestamp. The + * existence of a timestamp is indicated by the RTCAN_HAS_TIMESTAMP + * bit. */ + unsigned char can_dlc; + + /* Data bytes */ + uint8_t data[8]; + + /* High precision timestamp indicating when the frame was received. + * Exists when RTCAN_HAS_TIMESTAMP bit in can_dlc is set. */ + nanosecs_abs_t timestamp; + +} __attribute__ ((packed)); + + +/* Size of struct rtcan_rb_frame without any data bytes and timestamp */ +#define EMPTY_RB_FRAME_SIZE \ + sizeof(struct rtcan_rb_frame) - 8 - RTCAN_TIMESTAMP_SIZE + + +/* + * Wrapper structure around a struct rtcan_rb_frame with actual size + * of the frame. + * + * This isn't really a socket buffer but only a sort of. It is constructed + * within the interrupt routine when a CAN frame is read from + * the controller. Then it's passed to the reception handler where only + * rb_frame finds its way to the sockets' ring buffers. + */ +struct rtcan_skb { + /* Actual size of following rb_frame (without timestamp) */ + size_t rb_frame_size; + /* Frame to be stored in the sockets' ring buffers (as is) */ + struct rtcan_rb_frame rb_frame; +}; + +struct rtcan_filter_list { + int flistlen; + struct can_filter flist[1]; +}; + +/* + * Internal CAN socket structure. + * + * Every socket has an internal ring buffer for incoming messages. A message + * is not stored as a struct can_frame (in order to save buffer space) + * but as struct rtcan_rb_frame of arbitrary length depending on the + * actual payload. + */ +struct rtcan_socket { + + struct list_head socket_list; + + unsigned long flags; + + /* Transmission timeout in ns. Protected by rtcan_socket_lock + * in all socket structures. */ + nanosecs_rel_t tx_timeout; + + /* Reception timeout in ns. Protected by rtcan_socket_lock + * in all socket structures. */ + nanosecs_rel_t rx_timeout; + + + /* Begin of first frame data in the ring buffer. Protected by + * rtcan_socket_lock in all socket structures. */ + int recv_head; + + /* End of last frame data in the ring buffer. I.e. position of first + * free byte in the ring buffer. Protected by + * rtcan_socket_lock in all socket structures. */ + int recv_tail; + + /* Ring buffer for incoming CAN frames. Protected by + * rtcan_socket_lock in all socket structures. */ + unsigned char recv_buf[RTCAN_RXBUF_SIZE]; + + /* Semaphore for receivers and incoming messages */ + rtdm_sem_t recv_sem; + + + /* All senders waiting to be able to send + * via this socket are queued here */ + struct list_head tx_wait_head; + + + /* Interface index the socket is bound to. Protected by + * rtcan_recv_list_lock in all socket structures. */ + atomic_t ifindex; + + /* Length of filter list. I.e. how many entries does this socket occupy in + * the reception list. 0 if unbound. Protected by + * rtcan_recv_list_lock in all socket structures. */ + int flistlen; + + uint32_t err_mask; + + uint32_t rx_buf_full; + + struct rtcan_filter_list *flist; + +#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK + int loopback; +#endif +}; + + + +/* + * Get the RTDM context from a struct rtcan_socket + * + * @param[in] sock Pointer to socket structure + * + * @return Pointer to a file descriptor of type struct rtdm_fd this socket + * belongs to + */ +/* FIXME: to be replaced with container_of */ +static inline struct rtdm_fd *rtcan_socket_to_fd(struct rtcan_socket *sock) +{ + return rtdm_private_to_fd(sock); +} + +/* Spinlock protecting the ring buffers and the timeouts of all + * rtcan_sockets */ +extern rtdm_lock_t rtcan_socket_lock; +extern struct list_head rtcan_socket_list; + +extern void rtcan_socket_init(struct rtdm_fd *fd); +extern void rtcan_socket_cleanup(struct rtdm_fd *fd); + + +#endif /* __RTCAN_SOCKET_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h new file mode 100644 index 0000000..c2ced56 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __RTCAN_VERSION_H_ +#define __RTCAN_VERSION_H_ + +#define RTCAN_MAJOR_VER 0 +#define RTCAN_MINOR_VER 90 +#define RTCAN_BUGFIX_VER 2 + +#endif /* __RTCAN_VERSION_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c new file mode 100644 index 0000000..28e06a9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c @@ -0,0 +1,199 @@ +/* + * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + + +#include <linux/module.h> +#include <rtdm/driver.h> +#include <rtdm/can.h> +#include "rtcan_dev.h" +#include "rtcan_raw.h" + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "VIRT" +#define RTCAN_MAX_VIRT_DEVS 8 + +#define VIRT_TX_BUFS 1 + +static char *virt_ctlr_name = "<virtual>"; +static char *virt_board_name = "<virtual>"; + +MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>"); +MODULE_DESCRIPTION("Virtual RT-Socket-CAN driver"); +MODULE_LICENSE("GPL"); + +static unsigned int devices = 2; + +module_param(devices, uint, 0400); +MODULE_PARM_DESC(devices, "Number of devices on the virtual bus"); + +static struct rtcan_device *rtcan_virt_devs[RTCAN_MAX_VIRT_DEVS]; + + +static int rtcan_virt_start_xmit(struct rtcan_device *tx_dev, + can_frame_t *tx_frame) +{ + int i; + struct rtcan_device *rx_dev; + struct rtcan_skb skb; + struct rtcan_rb_frame *rx_frame = &skb.rb_frame; + rtdm_lockctx_t lock_ctx; + + /* we can transmit immediately again */ + rtdm_sem_up(&tx_dev->tx_sem); + tx_dev->tx_count++; + + skb.rb_frame_size = EMPTY_RB_FRAME_SIZE; + + rx_frame->can_dlc = tx_frame->can_dlc; + rx_frame->can_id = tx_frame->can_id; + + if (!(tx_frame->can_id & CAN_RTR_FLAG)) { + memcpy(rx_frame->data, tx_frame->data, tx_frame->can_dlc); + skb.rb_frame_size += tx_frame->can_dlc; + } + + rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx); + rtdm_lock_get(&rtcan_socket_lock); + + + /* Deliver to all other devices on the virtual bus */ + for (i = 0; i < devices; i++) { + rx_dev = rtcan_virt_devs[i]; + if (rx_dev->state == CAN_STATE_ACTIVE) { + if (tx_dev != rx_dev) { + rx_frame->can_ifindex = rx_dev->ifindex; + rtcan_rcv(rx_dev, &skb); + } else if (rtcan_loopback_pending(tx_dev)) + rtcan_loopback(tx_dev); + } + } + rtdm_lock_put(&rtcan_socket_lock); + rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx); + + return 0; +} + + +static int rtcan_virt_set_mode(struct rtcan_device *dev, can_mode_t mode, + rtdm_lockctx_t *lock_ctx) +{ + int err = 0; + + switch (mode) { + case CAN_MODE_STOP: + dev->state = CAN_STATE_STOPPED; + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + break; + + case CAN_MODE_START: + rtdm_sem_init(&dev->tx_sem, VIRT_TX_BUFS); + dev->state = CAN_STATE_ACTIVE; + break; + + default: + err = -EOPNOTSUPP; + } + + return err; +} + + +static int __init rtcan_virt_init_one(int idx) +{ + struct rtcan_device *dev; + int err; + + if ((dev = rtcan_dev_alloc(0, 0)) == NULL) + return -ENOMEM; + + dev->ctrl_name = virt_ctlr_name; + dev->board_name = virt_board_name; + + rtcan_virt_set_mode(dev, CAN_MODE_STOP, NULL); + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + dev->hard_start_xmit = rtcan_virt_start_xmit; + dev->do_set_mode = rtcan_virt_set_mode; + + /* Register RTDM device */ + err = rtcan_dev_register(dev); + if (err) { + printk(KERN_ERR "ERROR %d while trying to register RTCAN device!\n", err); + goto error_out; + } + + /* Remember initialized devices */ + rtcan_virt_devs[idx] = dev; + + printk("%s: %s driver loaded\n", dev->name, RTCAN_DRV_NAME); + + return 0; + + error_out: + rtcan_dev_free(dev); + return err; +} + + +/** Init module */ +static int __init rtcan_virt_init(void) +{ + int i, err = 0; + + if (!rtdm_available()) + return -ENOSYS; + + for (i = 0; i < devices; i++) { + err = rtcan_virt_init_one(i); + if (err) { + while (--i >= 0) { + struct rtcan_device *dev = rtcan_virt_devs[i]; + + rtcan_dev_unregister(dev); + rtcan_dev_free(dev); + } + break; + } + } + + return err; +} + + +/** Cleanup module */ +static void __exit rtcan_virt_exit(void) +{ + int i; + struct rtcan_device *dev; + + for (i = 0; i < devices; i++) { + dev = rtcan_virt_devs[i]; + + printk("Unloading %s device %s\n", RTCAN_DRV_NAME, dev->name); + + rtcan_virt_set_mode(dev, CAN_MODE_STOP, NULL); + rtcan_dev_unregister(dev); + rtcan_dev_free(dev); + } +} + +module_init(rtcan_virt_init); +module_exit(rtcan_virt_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig new file mode 100644 index 0000000..9fab4a4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig @@ -0,0 +1,100 @@ +config XENO_DRIVERS_CAN_SJA1000 + depends on XENO_DRIVERS_CAN + tristate "Philips SJA1000 CAN controller" + select XENO_DRIVERS_CAN_BUS_ERR + +config XENO_DRIVERS_CAN_SJA1000_ISA + depends on XENO_DRIVERS_CAN_SJA1000 + tristate "Standard ISA controllers" + help + + This driver is for CAN devices connected to the ISA bus of a PC + or a PC/104 system. The I/O port, interrupt number and a few other + hardware specific parameters can be defined via module parameters. + +config XENO_DRIVERS_CAN_SJA1000_MEM + depends on XENO_DRIVERS_CAN_SJA1000 + tristate "Memory mapped controllers" + help + + This driver is for memory mapped CAN devices. The memory address, + interrupt number and a few other hardware specific parameters can + be defined via module parameters. + +config XENO_DRIVERS_CAN_SJA1000_PEAK_PCI + depends on XENO_DRIVERS_CAN_SJA1000 && PCI + tristate "PEAK PCI Card" + help + + This driver is for the PCAN PCI, the PC-PCI CAN plug-in card (1 or + 2 channel) from PEAK Systems (http://www.peak-system.com). To get + the second channel working, Xenomai's shared interrupt support + must be enabled. + +config XENO_DRIVERS_CAN_SJA1000_IXXAT_PCI + depends on XENO_DRIVERS_CAN_SJA1000 && PCI + tristate "IXXAT PCI Card" + help + + This driver is for the IXXAT PC-I 04/PCI card (1 or 2 channel) + from the IXXAT Automation GmbH (http://www.ixxat.de). To get + the second channel working, Xenomai's shared interrupt support + must be enabled. + +config XENO_DRIVERS_CAN_SJA1000_ADV_PCI + depends on XENO_DRIVERS_CAN_SJA1000 && PCI + tristate "ADVANTECH PCI Cards" + help + + This driver is for the ADVANTECH PCI cards (1 or more channels) + It supports the 1680U and some other ones. + + +config XENO_DRIVERS_CAN_SJA1000_PLX_PCI + depends on XENO_DRIVERS_CAN_SJA1000 && PCI + tristate "PLX90xx PCI-bridge based Cards" + help + + This driver is for CAN interface cards based on + the PLX90xx PCI bridge. + Driver supports now: + - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/) + - Adlink PCI-7841/cPCI-7841 SE card + - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/) + - esd CAN-PCI/PMC/266 + - esd CAN-PCIe/2000 + - Marathon CAN-bus-PCI card (http://www.marathon.ru/) + - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) + +config XENO_DRIVERS_CAN_SJA1000_EMS_PCI + depends on XENO_DRIVERS_CAN_SJA1000 && PCI + tristate "EMS CPC PCI Card" + help + + This driver is for the 2 channel CPC PCI card from EMS Dr. Thomas + Wünsche (http://www.ems-wuensche.de). To get the second channel + working, Xenomai's shared interrupt support must be enabled. + +config XENO_DRIVERS_CAN_SJA1000_ESD_PCI + depends on XENO_DRIVERS_CAN_SJA1000 && PCI + tristate "ESD PCI Cards (DEPRECATED)" + help + + This driver supports the esd PCI CAN cards CAN-PCI/200, + CAN-PCI/266, CAN-PMC/266 (PMC), CAN-CPCI/200 (CompactPCI), + CAN-PCIe2000 (PCI Express) and CAN-PCI104/200 (PCI104) + from the esd electronic system design gmbh (http://www.esd.eu). + + This driver is deprecated. It's functionality is now provided by + "PLX90xx PCI-bridge based Cards" driver. + +config XENO_DRIVERS_CAN_SJA1000_PEAK_DNG + depends on XENO_DRIVERS_CAN_SJA1000 && !PARPORT + tristate "PEAK Parallel Port Dongle" + help + + This driver is for the PCAN Dongle, the PC parallel port to CAN + converter from PEAK Systems (http://www.peak-system.com). You need + to disable parallel port support in the kernel (CONFIG_PARPORT) for + proper operation. The interface type (sp or epp), I/O port and + interrupt number should be defined via module parameters. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile new file mode 100644 index 0000000..ff67155 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile @@ -0,0 +1,24 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/can -I$(srctree)/drivers/xenomai/can/sja1000 + +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000) += xeno_can_sja1000.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PEAK_PCI) += xeno_can_peak_pci.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PEAK_DNG) += xeno_can_peak_dng.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PLX_PCI) += xeno_can_plx_pci.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_IXXAT_PCI) += xeno_can_ixxat_pci.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ADV_PCI) += xeno_can_adv_pci.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_EMS_PCI) += xeno_can_ems_pci.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ESD_PCI) += xeno_can_esd_pci.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ISA) += xeno_can_isa.o +obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_MEM) += xeno_can_mem.o + +xeno_can_sja1000-y := rtcan_sja1000.o +xeno_can_sja1000-$(CONFIG_FS_PROCFS) += rtcan_sja1000_proc.o +xeno_can_peak_pci-y := rtcan_peak_pci.o +xeno_can_peak_dng-y := rtcan_peak_dng.o +xeno_can_plx_pci-y := rtcan_plx_pci.o +xeno_can_ixxat_pci-y := rtcan_ixxat_pci.o +xeno_can_adv_pci-y := rtcan_adv_pci.o +xeno_can_ems_pci-y := rtcan_ems_pci.o +xeno_can_esd_pci-y := rtcan_esd_pci.o +xeno_can_isa-y := rtcan_isa.o +xeno_can_mem-y := rtcan_mem.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c new file mode 100644 index 0000000..f09be05 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Copyright (C) 2012 Thierry Bultel <thierry.bultel@basystemes.fr> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/io.h> + +#include <rtdm/driver.h> + +#define ADV_PCI_BASE_SIZE 0x80 + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "ADV-PCI-CAN" + +static char *adv_pci_board_name = "ADV-PCI"; + +MODULE_AUTHOR("Thierry Bultel <thierry.bultel@basystemes.fr>"); +MODULE_DESCRIPTION("RTCAN board driver for Advantech PCI cards"); +MODULE_LICENSE("GPL"); + +struct rtcan_adv_pci { + struct pci_dev *pci_dev; + struct rtcan_device *slave_dev; + void __iomem *conf_addr; + void __iomem *base_addr; +}; + +/* + * According to the datasheet, + * internal clock is 1/2 of the external oscillator frequency + * which is 16 MHz + */ +#define ADV_PCI_CAN_CLOCK (16000000 / 2) + +/* + * Output control register + Depends on the board configuration + */ + +#define ADV_PCI_OCR (SJA_OCR_MODE_NORMAL |\ + SJA_OCR_TX0_PUSHPULL |\ + SJA_OCR_TX1_PUSHPULL |\ + SJA_OCR_TX1_INVERT) + +/* + * In the CDR register, you should set CBP to 1. + */ +#define ADV_PCI_CDR (SJA_CDR_CBP | SJA_CDR_CAN_MODE) + +#define ADV_PCI_VENDOR_ID 0x13fe + +#define CHANNEL_SINGLE 0 /* this is a single channel device */ +#define CHANNEL_MASTER 1 /* multi channel device, this device is master */ +#define CHANNEL_SLAVE 2 /* multi channel device, this is slave */ + +#define ADV_PCI_DEVICE(device_id)\ + { ADV_PCI_VENDOR_ID, device_id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 } + +static const struct pci_device_id adv_pci_tbl[] = { + ADV_PCI_DEVICE(0x1680), + ADV_PCI_DEVICE(0x3680), + ADV_PCI_DEVICE(0x2052), + ADV_PCI_DEVICE(0x1681), + ADV_PCI_DEVICE(0xc001), + ADV_PCI_DEVICE(0xc002), + ADV_PCI_DEVICE(0xc004), + ADV_PCI_DEVICE(0xc101), + ADV_PCI_DEVICE(0xc102), + ADV_PCI_DEVICE(0xc104), + /* required last entry */ + { } +}; + +MODULE_DEVICE_TABLE(pci, adv_pci_tbl); + +static u8 rtcan_adv_pci_read_reg(struct rtcan_device *dev, int port) +{ + struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv; + + return ioread8(board->base_addr + port); +} + +static void rtcan_adv_pci_write_reg(struct rtcan_device *dev, int port, u8 data) +{ + struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv; + + iowrite8(data, board->base_addr + port); +} + +static void rtcan_adv_pci_del_chan(struct pci_dev *pdev, + struct rtcan_device *dev) +{ + struct rtcan_adv_pci *board; + + if (!dev) + return; + + board = (struct rtcan_adv_pci *)dev->board_priv; + + rtcan_sja1000_unregister(dev); + + pci_iounmap(pdev, board->base_addr); + + rtcan_dev_free(dev); +} + + +static int rtcan_adv_pci_add_chan(struct pci_dev *pdev, + int channel, + unsigned int bar, + unsigned int offset, + struct rtcan_device **master_dev) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_adv_pci *board; + void __iomem *base_addr; + int ret; + + dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_adv_pci)); + if (dev == NULL) + return -ENOMEM; + + chip = (struct rtcan_sja1000 *)dev->priv; + board = (struct rtcan_adv_pci *)dev->board_priv; + + if (channel == CHANNEL_SLAVE) { + struct rtcan_adv_pci *master_board = + (struct rtcan_adv_pci *)(*master_dev)->board_priv; + master_board->slave_dev = dev; + + if (offset) { + base_addr = master_board->base_addr+offset; + } else { + base_addr = pci_iomap(pdev, bar, ADV_PCI_BASE_SIZE); + if (!base_addr) { + ret = -EIO; + goto failure; + } + } + } else { + base_addr = pci_iomap(pdev, bar, ADV_PCI_BASE_SIZE) + offset; + if (!base_addr) { + ret = -EIO; + goto failure; + } + } + + board->pci_dev = pdev; + board->conf_addr = NULL; + board->base_addr = base_addr; + + dev->board_name = adv_pci_board_name; + + chip->read_reg = rtcan_adv_pci_read_reg; + chip->write_reg = rtcan_adv_pci_write_reg; + + /* Clock frequency in Hz */ + dev->can_sys_clock = ADV_PCI_CAN_CLOCK; + + /* Output control register */ + chip->ocr = ADV_PCI_OCR; + + /* Clock divider register */ + chip->cdr = ADV_PCI_CDR; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + /* Make sure SJA1000 is in reset mode */ + chip->write_reg(dev, SJA_MOD, SJA_MOD_RM); + /* Set PeliCAN mode */ + chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE); + + /* check if mode is set */ + ret = chip->read_reg(dev, SJA_CDR); + if (ret != SJA_CDR_CAN_MODE) { + ret = -EIO; + goto failure_iounmap; + } + + /* Register and setup interrupt handling */ + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->irq_num = pdev->irq; + + RTCAN_DBG("%s: base_addr=%p conf_addr=%p irq=%d ocr=%#x cdr=%#x\n", + RTCAN_DRV_NAME, board->base_addr, board->conf_addr, + chip->irq_num, chip->ocr, chip->cdr); + + /* Register SJA1000 device */ + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR "ERROR %d while trying to register SJA1000 device!\n", + ret); + goto failure_iounmap; + } + + if (channel != CHANNEL_SLAVE) + *master_dev = dev; + + return 0; + +failure_iounmap: + if (channel != CHANNEL_SLAVE || !offset) + pci_iounmap(pdev, base_addr); +failure: + rtcan_dev_free(dev); + + return ret; +} + +static int adv_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret, channel; + unsigned int nb_ports = 0; + unsigned int bar = 0; + unsigned int bar_flag = 0; + unsigned int offset = 0; + unsigned int ix; + + struct rtcan_device *master_dev = NULL; + + if (!rtdm_available()) + return -ENODEV; + + dev_info(&pdev->dev, "RTCAN Registering card"); + + ret = pci_enable_device(pdev); + if (ret) + goto failure; + + dev_info(&pdev->dev, "RTCAN detected Advantech PCI card at slot #%i\n", + PCI_SLOT(pdev->devfn)); + + ret = pci_request_regions(pdev, RTCAN_DRV_NAME); + if (ret) + goto failure_device; + + switch (pdev->device) { + case 0xc001: + case 0xc002: + case 0xc004: + case 0xc101: + case 0xc102: + case 0xc104: + nb_ports = pdev->device & 0x7; + offset = 0x100; + bar = 0; + break; + case 0x1680: + case 0x2052: + nb_ports = 2; + bar = 2; + bar_flag = 1; + break; + case 0x1681: + nb_ports = 1; + bar = 2; + bar_flag = 1; + break; + default: + goto failure_regions; + } + + if (nb_ports > 1) + channel = CHANNEL_MASTER; + else + channel = CHANNEL_SINGLE; + + RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n", + RTCAN_DRV_NAME, + pdev->vendor, + pdev->device, + pdev->subsystem_device); + + ret = rtcan_adv_pci_add_chan(pdev, channel, bar, offset, &master_dev); + if (ret) + goto failure_iounmap; + + /* register slave channel, if any */ + + for (ix = 1; ix < nb_ports; ix++) { + ret = rtcan_adv_pci_add_chan(pdev, + CHANNEL_SLAVE, + bar + (bar_flag ? ix : 0), + offset * ix, + &master_dev); + if (ret) + goto failure_iounmap; + } + + pci_set_drvdata(pdev, master_dev); + + return 0; + +failure_iounmap: + if (master_dev) + rtcan_adv_pci_del_chan(pdev, master_dev); + +failure_regions: + pci_release_regions(pdev); + +failure_device: + pci_disable_device(pdev); + +failure: + return ret; +} + +static void adv_pci_remove_one(struct pci_dev *pdev) +{ + struct rtcan_device *dev = pci_get_drvdata(pdev); + struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv; + + if (board->slave_dev) + rtcan_adv_pci_del_chan(pdev, board->slave_dev); + + rtcan_adv_pci_del_chan(pdev, dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver rtcan_adv_pci_driver = { + .name = RTCAN_DRV_NAME, + .id_table = adv_pci_tbl, + .probe = adv_pci_init_one, + .remove = adv_pci_remove_one, +}; + +module_pci_driver(rtcan_adv_pci_driver); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c new file mode 100644 index 0000000..2e3001b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c @@ -0,0 +1,393 @@ +/* + * Copyright (C) 2007, 2016 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com> + * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com> + * + * Derived from Linux CAN SJA1000 PCI driver "ems_pci". + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/io.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "EMS-CPC-PCI-CAN" + +static char *ems_pci_board_name = "EMS-CPC-PCI"; + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RTCAN board driver for EMS CPC-PCI/PCIe/104P CAN cards"); +MODULE_LICENSE("GPL v2"); + +#define EMS_PCI_V1_MAX_CHAN 2 +#define EMS_PCI_V2_MAX_CHAN 4 +#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN + +struct ems_pci_card { + int version; + int channels; + + struct pci_dev *pci_dev; + struct rtcan_device *rtcan_dev[EMS_PCI_MAX_CHAN]; + + void __iomem *conf_addr; + void __iomem *base_addr; +}; + +#define EMS_PCI_CAN_CLOCK (16000000 / 2) + +/* + * Register definitions and descriptions are from LinCAN 0.3.3. + * + * PSB4610 PITA-2 bridge control registers + */ +#define PITA2_ICR 0x00 /* Interrupt Control Register */ +#define PITA2_ICR_INT0 0x00000002 /* [RC] INT0 Active/Clear */ +#define PITA2_ICR_INT0_EN 0x00020000 /* [RW] Enable INT0 */ + +#define PITA2_MISC 0x1c /* Miscellaneous Register */ +#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */ + +/* + * Register definitions for the PLX 9030 + */ +#define PLX_ICSR 0x4c /* Interrupt Control/Status register */ +#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */ +#define PLX_ICSR_PCIINT_ENA 0x0040 /* PCI Interrupt Enable */ +#define PLX_ICSR_LINTI1_CLR 0x0400 /* Local Edge Triggerable Interrupt Clear */ +#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \ + PLX_ICSR_LINTI1_CLR) + +/* + * The board configuration is probably following: + * RX1 is connected to ground. + * TX1 is not connected. + * CLKO is not connected. + * Setting the OCR register to 0xDA is a good idea. + * This means normal output mode, push-pull and the correct polarity. + */ +#define EMS_PCI_OCR (SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL) + +/* + * In the CDR register, you should set CBP to 1. + * You will probably also want to set the clock divider value to 7 + * (meaning direct oscillator output) because the second SJA1000 chip + * is driven by the first one CLKOUT output. + */ +#define EMS_PCI_CDR (SJA_CDR_CBP | SJA_CDR_CLKOUT_MASK) + +#define EMS_PCI_V1_BASE_BAR 1 +#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */ +#define EMS_PCI_V2_BASE_BAR 2 +#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */ +#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */ +#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ + +#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ + +static const struct pci_device_id ems_pci_tbl[] = { + /* CPC-PCI v1 */ + {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, + /* CPC-PCI v2 */ + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000}, + /* CPC-104P v2 */ + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002}, + {0,} +}; +MODULE_DEVICE_TABLE(pci, ems_pci_tbl); + +/* + * Helper to read internal registers from card logic (not CAN) + */ +static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port) +{ + return readb((void __iomem *)card->base_addr + (port * 4)); +} + +static u8 ems_pci_v1_read_reg(struct rtcan_device *dev, int port) +{ + return readb((void __iomem *)dev->base_addr + (port * 4)); +} + +static void ems_pci_v1_write_reg(struct rtcan_device *dev, + int port, u8 val) +{ + writeb(val, (void __iomem *)dev->base_addr + (port * 4)); +} + +static void ems_pci_v1_post_irq(struct rtcan_device *dev) +{ + struct ems_pci_card *card = (struct ems_pci_card *)dev->board_priv; + + /* reset int flag of pita */ + writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, + card->conf_addr + PITA2_ICR); +} + +static u8 ems_pci_v2_read_reg(struct rtcan_device *dev, int port) +{ + return readb((void __iomem *)dev->base_addr + port); +} + +static void ems_pci_v2_write_reg(struct rtcan_device *dev, + int port, u8 val) +{ + writeb(val, (void __iomem *)dev->base_addr + port); +} + +static void ems_pci_v2_post_irq(struct rtcan_device *dev) +{ + struct ems_pci_card *card = (struct ems_pci_card *)dev->board_priv; + + writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); +} + +/* + * Check if a CAN controller is present at the specified location + * by trying to set 'em into the PeliCAN mode + */ +static inline int ems_pci_check_chan(struct rtcan_device *dev) +{ + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + unsigned char res; + + /* Make sure SJA1000 is in reset mode */ + chip->write_reg(dev, SJA_MOD, 1); + + chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE); + + /* read reset-values */ + res = chip->read_reg(dev, SJA_CDR); + + if (res == SJA_CDR_CAN_MODE) + return 1; + + return 0; +} + +static void ems_pci_del_card(struct pci_dev *pdev) +{ + struct ems_pci_card *card = pci_get_drvdata(pdev); + struct rtcan_device *dev; + int i = 0; + + for (i = 0; i < card->channels; i++) { + dev = card->rtcan_dev[i]; + + if (!dev) + continue; + + dev_info(&pdev->dev, "Removing %s.\n", dev->name); + rtcan_sja1000_unregister(dev); + rtcan_dev_free(dev); + } + + if (card->base_addr != NULL) + pci_iounmap(card->pci_dev, card->base_addr); + + if (card->conf_addr != NULL) + pci_iounmap(card->pci_dev, card->conf_addr); + + kfree(card); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static void ems_pci_card_reset(struct ems_pci_card *card) +{ + /* Request board reset */ + writeb(0, card->base_addr); +} + +/* + * Probe PCI device for EMS CAN signature and register each available + * CAN channel to RTCAN subsystem. + */ +static int ems_pci_add_card(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct rtcan_sja1000 *chip; + struct rtcan_device *dev; + struct ems_pci_card *card; + int max_chan, conf_size, base_bar; + int err, i; + + if (!rtdm_available()) + return -ENODEV; + + /* Enabling PCI device */ + if (pci_enable_device(pdev) < 0) { + dev_err(&pdev->dev, "Enabling PCI device failed\n"); + return -ENODEV; + } + + /* Allocating card structures to hold addresses, ... */ + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (card == NULL) { + pci_disable_device(pdev); + return -ENOMEM; + } + + pci_set_drvdata(pdev, card); + + card->pci_dev = pdev; + + card->channels = 0; + + if (pdev->vendor == PCI_VENDOR_ID_PLX) { + card->version = 2; /* CPC-PCI v2 */ + max_chan = EMS_PCI_V2_MAX_CHAN; + base_bar = EMS_PCI_V2_BASE_BAR; + conf_size = EMS_PCI_V2_CONF_SIZE; + } else { + card->version = 1; /* CPC-PCI v1 */ + max_chan = EMS_PCI_V1_MAX_CHAN; + base_bar = EMS_PCI_V1_BASE_BAR; + conf_size = EMS_PCI_V1_CONF_SIZE; + } + + /* Remap configuration space and controller memory area */ + card->conf_addr = pci_iomap(pdev, 0, conf_size); + if (card->conf_addr == NULL) { + err = -ENOMEM; + goto failure_cleanup; + } + + card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE); + if (card->base_addr == NULL) { + err = -ENOMEM; + goto failure_cleanup; + } + + if (card->version == 1) { + /* Configure PITA-2 parallel interface (enable MUX) */ + writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC); + + /* Check for unique EMS CAN signature */ + if (ems_pci_v1_readb(card, 0) != 0x55 || + ems_pci_v1_readb(card, 1) != 0xAA || + ems_pci_v1_readb(card, 2) != 0x01 || + ems_pci_v1_readb(card, 3) != 0xCB || + ems_pci_v1_readb(card, 4) != 0x11) { + dev_err(&pdev->dev, + "Not EMS Dr. Thomas Wuensche interface\n"); + err = -ENODEV; + goto failure_cleanup; + } + } + + ems_pci_card_reset(card); + + for (i = 0; i < max_chan; i++) { + dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), 0); + if (!dev) { + err = -ENOMEM; + goto failure_cleanup; + } + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + dev->board_name = ems_pci_board_name; + dev->board_priv = card; + + card->rtcan_dev[i] = dev; + chip = card->rtcan_dev[i]->priv; + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->irq_num = pdev->irq; + + dev->base_addr = (unsigned long)card->base_addr + + EMS_PCI_CAN_BASE_OFFSET + (i * EMS_PCI_CAN_CTRL_SIZE); + if (card->version == 1) { + chip->read_reg = ems_pci_v1_read_reg; + chip->write_reg = ems_pci_v1_write_reg; + chip->irq_ack = ems_pci_v1_post_irq; + } else { + chip->read_reg = ems_pci_v2_read_reg; + chip->write_reg = ems_pci_v2_write_reg; + chip->irq_ack = ems_pci_v2_post_irq; + } + + /* Check if channel is present */ + if (ems_pci_check_chan(dev)) { + dev->can_sys_clock = EMS_PCI_CAN_CLOCK; + chip->ocr = EMS_PCI_OCR | SJA_OCR_MODE_NORMAL; + chip->cdr = EMS_PCI_CDR | SJA_CDR_CAN_MODE; + + if (card->version == 1) + /* reset int flag of pita */ + writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, + card->conf_addr + PITA2_ICR); + else + /* enable IRQ in PLX 9030 */ + writel(PLX_ICSR_ENA_CLR, + card->conf_addr + PLX_ICSR); + + /* Register SJA1000 device */ + err = rtcan_sja1000_register(dev); + if (err) { + dev_err(&pdev->dev, "Registering device failed " + "(err=%d)\n", err); + rtcan_dev_free(dev); + goto failure_cleanup; + } + + card->channels++; + + dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d " + "registered as %s\n", i + 1, + (void* __iomem)dev->base_addr, chip->irq_num, + dev->name); + } else { + dev_err(&pdev->dev, "Channel #%d not detected\n", + i + 1); + rtcan_dev_free(dev); + } + } + + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + return 0; + +failure_cleanup: + dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); + + ems_pci_del_card(pdev); + + return err; +} + +static struct pci_driver ems_pci_driver = { + .name = RTCAN_DRV_NAME, + .id_table = ems_pci_tbl, + .probe = ems_pci_add_card, + .remove = ems_pci_del_card, +}; + +module_pci_driver(ems_pci_driver); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c new file mode 100644 index 0000000..2b5a19c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c @@ -0,0 +1,344 @@ +/* + * Copyright (C) 2009 Sebastian Smolorz <sesmo@gmx.net> + * + * This driver is based on the Socket-CAN driver esd_pci.c, + * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix + * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <asm/io.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "ESD-PCI-CAN" + +static char *esd_pci_board_name = "ESD-PCI"; + +MODULE_AUTHOR("Sebastian Smolorz <sesmo@gmx.net"); +MODULE_DESCRIPTION("RTCAN board driver for esd PCI/PMC/CPCI/PCIe/PCI104 " \ + "CAN cards"); +MODULE_LICENSE("GPL v2"); + +struct rtcan_esd_pci { + struct pci_dev *pci_dev; + struct rtcan_device *slave_dev; + void __iomem *conf_addr; + void __iomem *base_addr; +}; + +#define ESD_PCI_CAN_CLOCK (16000000 / 2) + +#define ESD_PCI_OCR (SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL | \ + SJA_OCR_TX1_INVERT | SJA_OCR_MODE_CLOCK) +#define ESD_PCI_CDR (SJA_CDR_CLK_OFF | SJA_CDR_CBP | \ + SJA_CDR_CAN_MODE) + +#define CHANNEL_SINGLE 0 /* this is a single channel device */ +#define CHANNEL_MASTER 1 /* multi channel device, this device is master */ +#define CHANNEL_SLAVE 2 /* multi channel device, this is slave */ + +#define CHANNEL_OFFSET 0x100 + +#define INTCSR_OFFSET 0x4c /* Offset in PLX9050 conf registers */ +#define INTCSR_LINTI1 (1 << 0) +#define INTCSR_PCI (1 << 6) + +#define INTCSR9056_OFFSET 0x68 /* Offset in PLX9056 conf registers */ +#define INTCSR9056_LINTI (1 << 11) +#define INTCSR9056_PCI (1 << 8) + +#ifndef PCI_DEVICE_ID_PLX_9056 +# define PCI_DEVICE_ID_PLX_9056 0x9056 +#endif + +/* PCI subsystem IDs of esd's SJA1000 based CAN cards */ + +/* CAN-PCI/200: PCI, 33MHz only, bridge: PLX9050 */ +#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004 + +/* CAN-PCI/266: PCI, 33/66MHz, bridge: PLX9056 */ +#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009 + +/* CAN-PMC/266: PMC module, 33/66MHz, bridge: PLX9056 */ +#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e + +/* CAN-CPCI/200: Compact PCI, 33MHz only, bridge: PLX9030 */ +#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b + +/* CAN-PCIE/2000: PCI Express 1x, bridge: PEX8311 = PEX8111 + PLX9056 */ +#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200 + +/* CAN-PCI/104: PCI104 module, 33MHz only, bridge: PLX9030 */ +#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501 + +static struct pci_device_id esd_pci_tbl[] = { + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200}, + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266}, + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266}, + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200}, + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000}, + {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200}, + {0,} +}; + +#define ESD_PCI_BASE_SIZE 0x200 + +MODULE_DEVICE_TABLE(pci, esd_pci_tbl); + + +static u8 rtcan_esd_pci_read_reg(struct rtcan_device *dev, int port) +{ + struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv; + return readb(board->base_addr + port); +} + +static void rtcan_esd_pci_write_reg(struct rtcan_device *dev, int port, u8 val) +{ + struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv; + writeb(val, board->base_addr + port); +} + +static void rtcan_esd_pci_del_chan(struct rtcan_device *dev) +{ + struct rtcan_esd_pci *board; + + if (!dev) + return; + + board = (struct rtcan_esd_pci *)dev->board_priv; + + printk("Removing %s %s device %s\n", + esd_pci_board_name, dev->ctrl_name, dev->name); + + rtcan_sja1000_unregister(dev); + + rtcan_dev_free(dev); +} + +static int rtcan_esd_pci_add_chan(struct pci_dev *pdev, int channel, + struct rtcan_device **master_dev, + void __iomem *conf_addr, + void __iomem *base_addr) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_esd_pci *board; + int ret; + + dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_esd_pci)); + if (dev == NULL) + return -ENOMEM; + + chip = (struct rtcan_sja1000 *)dev->priv; + board = (struct rtcan_esd_pci *)dev->board_priv; + + board->pci_dev = pdev; + board->conf_addr = conf_addr; + board->base_addr = base_addr; + + if (channel == CHANNEL_SLAVE) { + struct rtcan_esd_pci *master_board = + (struct rtcan_esd_pci *)(*master_dev)->board_priv; + master_board->slave_dev = dev; + } + + dev->board_name = esd_pci_board_name; + + chip->read_reg = rtcan_esd_pci_read_reg; + chip->write_reg = rtcan_esd_pci_write_reg; + + dev->can_sys_clock = ESD_PCI_CAN_CLOCK; + + chip->ocr = ESD_PCI_OCR; + chip->cdr = ESD_PCI_CDR; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->irq_num = pdev->irq; + + RTCAN_DBG("%s: base_addr=0x%p conf_addr=0x%p irq=%d ocr=%#x cdr=%#x\n", + RTCAN_DRV_NAME, board->base_addr, board->conf_addr, + chip->irq_num, chip->ocr, chip->cdr); + + /* Register SJA1000 device */ + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR "ERROR %d while trying to register SJA1000 " + "device!\n", ret); + goto failure; + } + + if (channel != CHANNEL_SLAVE) + *master_dev = dev; + + return 0; + + +failure: + rtcan_dev_free(dev); + return ret; +} + +static int esd_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret, channel; + void __iomem *base_addr; + void __iomem *conf_addr; + struct rtcan_device *master_dev = NULL; + + if (!rtdm_available()) + return -ENODEV; + + if ((ret = pci_enable_device (pdev))) + goto failure; + + if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME))) + goto failure; + + RTCAN_DBG("%s: Initializing device %04x:%04x %04x:%04x\n", + RTCAN_DRV_NAME, pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); + + conf_addr = pci_iomap(pdev, 0, ESD_PCI_BASE_SIZE); + if (conf_addr == NULL) { + ret = -ENODEV; + goto failure_release_pci; + } + + base_addr = pci_iomap(pdev, 2, ESD_PCI_BASE_SIZE); + if (base_addr == NULL) { + ret = -ENODEV; + goto failure_iounmap_conf; + } + + /* Check if second channel is available */ + writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD); + writeb(SJA_CDR_CBP, base_addr + CHANNEL_OFFSET + SJA_CDR); + writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD); + if (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) == 0x21) { + writeb(SJA_MOD_SM | SJA_MOD_AFM | SJA_MOD_STM | SJA_MOD_LOM | + SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD); + if (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) == 0x3f) + channel = CHANNEL_MASTER; + else { + writeb(SJA_MOD_RM, + base_addr + CHANNEL_OFFSET + SJA_MOD); + channel = CHANNEL_SINGLE; + } + } else { + writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD); + channel = CHANNEL_SINGLE; + } + + if ((ret = rtcan_esd_pci_add_chan(pdev, channel, &master_dev, + conf_addr, base_addr))) + goto failure_iounmap_base; + + if (channel != CHANNEL_SINGLE) { + channel = CHANNEL_SLAVE; + if ((ret = rtcan_esd_pci_add_chan(pdev, channel, &master_dev, + conf_addr, base_addr + CHANNEL_OFFSET))) + goto failure_iounmap_base; + } + + if ((pdev->device == PCI_DEVICE_ID_PLX_9050) || + (pdev->device == PCI_DEVICE_ID_PLX_9030)) { + /* Enable interrupts in PLX9050 */ + writel(INTCSR_LINTI1 | INTCSR_PCI, conf_addr + INTCSR_OFFSET); + } else { + /* Enable interrupts in PLX9056*/ + writel(INTCSR9056_LINTI | INTCSR9056_PCI, + conf_addr + INTCSR9056_OFFSET); + } + + pci_set_drvdata(pdev, master_dev); + + return 0; + + +failure_iounmap_base: + if (master_dev) + rtcan_esd_pci_del_chan(master_dev); + pci_iounmap(pdev, base_addr); + +failure_iounmap_conf: + pci_iounmap(pdev, conf_addr); + +failure_release_pci: + pci_release_regions(pdev); + +failure: + return ret; +} + +static void esd_pci_remove_one(struct pci_dev *pdev) +{ + struct rtcan_device *dev = pci_get_drvdata(pdev); + struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv; + + if ((pdev->device == PCI_DEVICE_ID_PLX_9050) || + (pdev->device == PCI_DEVICE_ID_PLX_9030)) { + /* Disable interrupts in PLX9050*/ + writel(0, board->conf_addr + INTCSR_OFFSET); + } else { + /* Disable interrupts in PLX9056*/ + writel(0, board->conf_addr + INTCSR9056_OFFSET); + } + + if (board->slave_dev) + rtcan_esd_pci_del_chan(board->slave_dev); + rtcan_esd_pci_del_chan(dev); + + + pci_iounmap(pdev, board->base_addr); + pci_iounmap(pdev, board->conf_addr); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver rtcan_esd_pci_driver = { + .name = RTCAN_DRV_NAME, + .id_table = esd_pci_tbl, + .probe = esd_pci_init_one, + .remove = esd_pci_remove_one, +}; + +module_pci_driver(rtcan_esd_pci_driver); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c new file mode 100644 index 0000000..a0e49fe --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Copyright (C) 2005, 2006, 2009 Sebastian Smolorz + * <smolorz@rts.uni-hannover.de> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; eitherer version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "sja1000-isa" + +#define RTCAN_ISA_MAX_DEV 4 + +static char *isa_board_name = "ISA-Board"; + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RTCAN board driver for standard ISA boards"); +MODULE_LICENSE("GPL"); + +static u16 io[RTCAN_ISA_MAX_DEV]; +static int irq[RTCAN_ISA_MAX_DEV]; +static u32 can_clock[RTCAN_ISA_MAX_DEV]; +static u8 ocr[RTCAN_ISA_MAX_DEV]; +static u8 cdr[RTCAN_ISA_MAX_DEV]; + +module_param_array(io, ushort, NULL, 0444); +module_param_array(irq, int, NULL, 0444); +module_param_array(can_clock, uint, NULL, 0444); +module_param_array(ocr, byte, NULL, 0444); +module_param_array(cdr, byte, NULL, 0444); + +MODULE_PARM_DESC(io, "The io-port address"); +MODULE_PARM_DESC(irq, "The interrupt number"); +MODULE_PARM_DESC(can_clock, "External clock frequency (default 16 MHz)"); +MODULE_PARM_DESC(ocr, "Value of output control register (default 0x1a)"); +MODULE_PARM_DESC(cdr, "Value of clock divider register (default 0xc8"); + +#define RTCAN_ISA_PORT_SIZE 32 + +struct rtcan_isa +{ + u16 io; +}; + +static struct rtcan_device *rtcan_isa_devs[RTCAN_ISA_MAX_DEV]; + +static u8 rtcan_isa_readreg(struct rtcan_device *dev, int port) +{ + struct rtcan_isa *board = (struct rtcan_isa *)dev->board_priv; + return inb(board->io + port); +} + +static void rtcan_isa_writereg(struct rtcan_device *dev, int port, u8 val) +{ + struct rtcan_isa *board = (struct rtcan_isa *)dev->board_priv; + outb(val, board->io + port); +} + + +int __init rtcan_isa_init_one(int idx) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_isa *board; + int ret; + + if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_isa))) == NULL) + return -ENOMEM; + + chip = (struct rtcan_sja1000 *)dev->priv; + board = (struct rtcan_isa *)dev->board_priv; + + dev->board_name = isa_board_name; + + board->io = io[idx]; + + chip->irq_num = irq[idx]; + chip->irq_flags = RTDM_IRQTYPE_SHARED | RTDM_IRQTYPE_EDGE; + + chip->read_reg = rtcan_isa_readreg; + chip->write_reg = rtcan_isa_writereg; + + /* Check and request I/O ports */ + if (!request_region(board->io, RTCAN_ISA_PORT_SIZE, RTCAN_DRV_NAME)) { + ret = -EBUSY; + goto out_dev_free; + } + + /* Clock frequency in Hz */ + if (can_clock[idx]) + dev->can_sys_clock = can_clock[idx] / 2; + else + dev->can_sys_clock = 8000000; /* 16/2 MHz */ + + /* Output control register */ + if (ocr[idx]) + chip->ocr = ocr[idx]; + else + chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL; + + if (cdr[idx]) + chip->cdr = cdr[idx]; + else + chip->cdr = SJA_CDR_CAN_MODE | SJA_CDR_CLK_OFF | SJA_CDR_CBP; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR "ERROR %d while trying to register SJA1000 " + "device!\n", ret); + goto out_free_region; + } + + rtcan_isa_devs[idx] = dev; + return 0; + + out_free_region: + release_region(board->io, RTCAN_ISA_PORT_SIZE); + + out_dev_free: + rtcan_dev_free(dev); + + return ret; +} + +static void rtcan_isa_exit(void); + +/** Init module */ +static int __init rtcan_isa_init(void) +{ + int i, err; + int devices = 0; + + if (!rtdm_available()) + return -ENOSYS; + + for (i = 0; i < RTCAN_ISA_MAX_DEV && io[i] != 0; i++) { + err = rtcan_isa_init_one(i); + if (err) { + rtcan_isa_exit(); + return err; + } + devices++; + } + if (devices) + return 0; + + printk(KERN_ERR "ERROR! No devices specified! " + "Use io=<port1>[,...] irq=<irq1>[,...]\n"); + return -EINVAL; +} + + +/** Cleanup module */ +static void rtcan_isa_exit(void) +{ + int i; + struct rtcan_device *dev; + + for (i = 0; i < RTCAN_ISA_MAX_DEV; i++) { + dev = rtcan_isa_devs[i]; + if (!dev) + continue; + rtcan_sja1000_unregister(dev); + release_region(io[i], RTCAN_ISA_PORT_SIZE); + rtcan_dev_free(dev); + } +} + +module_init(rtcan_isa_init); +module_exit(rtcan_isa_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c new file mode 100644 index 0000000..9c5197f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <asm/io.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "IXXAT-PCI-CAN" + +static char *ixxat_pci_board_name = "IXXAT-PCI"; + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RTCAN board driver for IXXAT-PCI cards"); +MODULE_LICENSE("GPL"); + +struct rtcan_ixxat_pci +{ + struct pci_dev *pci_dev; + struct rtcan_device *slave_dev; + int conf_addr; + void __iomem *base_addr; +}; + +#define IXXAT_PCI_CAN_SYS_CLOCK (16000000 / 2) + +#define CHANNEL_SINGLE 0 /* this is a single channel device */ +#define CHANNEL_MASTER 1 /* multi channel device, this device is master */ +#define CHANNEL_SLAVE 2 /* multi channel device, this is slave */ + +#define CHANNEL_OFFSET 0x200 +#define CHANNEL_MASTER_RESET 0x110 +#define CHANNEL_SLAVE_RESET (CHANNEL_MASTER_RESET + CHANNEL_OFFSET) + +#define IXXAT_INTCSR_OFFSET 0x4c /* Offset in PLX9050 conf registers */ +#define IXXAT_INTCSR_SLAVE 0x41 /* LINT1 and PCI interrupt enabled */ +#define IXXAT_INTCSR_MASTER 0x08 /* LINT2 enabled */ +#define IXXAT_SJA_MOD_MASK 0xa1 /* Mask for reading dual/single channel */ + +/* PCI vender, device and sub-device ID */ +#define IXXAT_PCI_VENDOR_ID 0x10b5 +#define IXXAT_PCI_DEVICE_ID 0x9050 +#define IXXAT_PCI_SUB_SYS_ID 0x2540 + +#define IXXAT_CONF_PORT_SIZE 0x0080 +#define IXXAT_BASE_PORT_SIZE 0x0400 + +static struct pci_device_id ixxat_pci_tbl[] = { + {IXXAT_PCI_VENDOR_ID, IXXAT_PCI_DEVICE_ID, + IXXAT_PCI_VENDOR_ID, IXXAT_PCI_SUB_SYS_ID, 0, 0, 0}, + { } +}; +MODULE_DEVICE_TABLE (pci, ixxat_pci_tbl); + + +static u8 rtcan_ixxat_pci_read_reg(struct rtcan_device *dev, int port) +{ + struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv; + return readb(board->base_addr + port); +} + +static void rtcan_ixxat_pci_write_reg(struct rtcan_device *dev, int port, u8 data) +{ + struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv; + writeb(data, board->base_addr + port); +} + +static void rtcan_ixxat_pci_del_chan(struct rtcan_device *dev) +{ + struct rtcan_ixxat_pci *board; + u8 intcsr; + + if (!dev) + return; + + board = (struct rtcan_ixxat_pci *)dev->board_priv; + + printk("Removing %s %s device %s\n", + ixxat_pci_board_name, dev->ctrl_name, dev->name); + + rtcan_sja1000_unregister(dev); + + /* Disable PCI interrupts */ + intcsr = inb(board->conf_addr + IXXAT_INTCSR_OFFSET); + if (board->slave_dev) { + intcsr &= ~IXXAT_INTCSR_MASTER; + outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET); + writeb(0x1, board->base_addr + CHANNEL_MASTER_RESET); + iounmap(board->base_addr); + } else { + intcsr &= ~IXXAT_INTCSR_SLAVE; + outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET); + writeb(0x1, board->base_addr + CHANNEL_SLAVE_RESET ); + } + rtcan_dev_free(dev); +} + +static int rtcan_ixxat_pci_add_chan(struct pci_dev *pdev, + int channel, + struct rtcan_device **master_dev, + int conf_addr, + void __iomem *base_addr) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_ixxat_pci *board; + u8 intcsr; + int ret; + + dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_ixxat_pci)); + if (dev == NULL) + return -ENOMEM; + + chip = (struct rtcan_sja1000 *)dev->priv; + board = (struct rtcan_ixxat_pci *)dev->board_priv; + + board->pci_dev = pdev; + board->conf_addr = conf_addr; + board->base_addr = base_addr; + + if (channel == CHANNEL_SLAVE) { + struct rtcan_ixxat_pci *master_board = + (struct rtcan_ixxat_pci *)(*master_dev)->board_priv; + master_board->slave_dev = dev; + } + + dev->board_name = ixxat_pci_board_name; + + chip->read_reg = rtcan_ixxat_pci_read_reg; + chip->write_reg = rtcan_ixxat_pci_write_reg; + + /* Clock frequency in Hz */ + dev->can_sys_clock = IXXAT_PCI_CAN_SYS_CLOCK; + + /* Output control register */ + chip->ocr = (SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_INVERT | + SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL); + + /* Clock divider register */ + chip->cdr = SJA_CDR_CAN_MODE; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + /* Enable PCI interrupts */ + intcsr = inb(board->conf_addr + IXXAT_INTCSR_OFFSET); + if (channel == CHANNEL_SLAVE) + intcsr |= IXXAT_INTCSR_SLAVE; + else + intcsr |= IXXAT_INTCSR_MASTER; + outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET); + + /* Register and setup interrupt handling */ + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->irq_num = pdev->irq; + + RTCAN_DBG("%s: base_addr=0x%p conf_addr=%#x irq=%d ocr=%#x cdr=%#x\n", + RTCAN_DRV_NAME, board->base_addr, board->conf_addr, + chip->irq_num, chip->ocr, chip->cdr); + + /* Register SJA1000 device */ + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR "ERROR %d while trying to register SJA1000 device!\n", + ret); + goto failure; + } + + if (channel != CHANNEL_SLAVE) + *master_dev = dev; + + return 0; + + failure: + rtcan_dev_free(dev); + return ret; +} + +static int ixxat_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret, channel, conf_addr; + unsigned long addr; + void __iomem *base_addr; + struct rtcan_device *master_dev = NULL; + + if (!rtdm_available()) + return -ENODEV; + + if ((ret = pci_enable_device (pdev))) + goto failure; + + if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME))) + goto failure; + + RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n", + RTCAN_DRV_NAME, pdev->vendor, pdev->device, + pdev->subsystem_device); + + /* Enable memory and I/O space */ + if ((ret = pci_write_config_word(pdev, 0x04, 0x3))) + goto failure_release_pci; + + conf_addr = pci_resource_start(pdev, 1); + + addr = pci_resource_start(pdev, 2); + base_addr = ioremap(addr, IXXAT_BASE_PORT_SIZE); + if (base_addr == 0) { + ret = -ENODEV; + goto failure_release_pci; + } + + /* Check if second channel is available after reset */ + writeb(0x1, base_addr + CHANNEL_MASTER_RESET); + writeb(0x1, base_addr + CHANNEL_SLAVE_RESET); + udelay(100); + if ( (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) & IXXAT_SJA_MOD_MASK ) != 0x21 || + readb(base_addr + CHANNEL_OFFSET + SJA_SR ) != 0x0c || + readb(base_addr + CHANNEL_OFFSET + SJA_IR ) != 0xe0) + channel = CHANNEL_SINGLE; + else + channel = CHANNEL_MASTER; + + if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel, &master_dev, + conf_addr, base_addr))) + goto failure_iounmap; + + if (channel != CHANNEL_SINGLE) { + channel = CHANNEL_SLAVE; + if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel, + &master_dev, conf_addr, + base_addr + CHANNEL_OFFSET))) + goto failure_iounmap; + } + + pci_set_drvdata(pdev, master_dev); + return 0; + +failure_iounmap: + if (master_dev) + rtcan_ixxat_pci_del_chan(master_dev); + iounmap(base_addr); + +failure_release_pci: + pci_release_regions(pdev); + +failure: + return ret; +} + +static void ixxat_pci_remove_one(struct pci_dev *pdev) +{ + struct rtcan_device *dev = pci_get_drvdata(pdev); + struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv; + + if (board->slave_dev) + rtcan_ixxat_pci_del_chan(board->slave_dev); + rtcan_ixxat_pci_del_chan(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver rtcan_ixxat_pci_driver = { + .name = RTCAN_DRV_NAME, + .id_table = ixxat_pci_tbl, + .probe = ixxat_pci_init_one, + .remove = ixxat_pci_remove_one, +}; + +module_pci_driver(rtcan_ixxat_pci_driver); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c new file mode 100644 index 0000000..965735e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2006 Matthias Fuchs <matthias.fuchs@esd-electronics.com>, + * Jan Kiszka <jan.kiszka@web.de> + * + * RTCAN driver for memory mapped SJA1000 CAN controller + * This code has been tested on esd's CPCI405/EPPC405 PPC405 systems. + * + * This driver is derived from the rtcan-isa driver by + * Wolfgang Grandegger and Sebastian Smolorz. + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; eitherer version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "sja1000-mem" + +#define RTCAN_MEM_MAX_DEV 4 + +static char *mem_board_name = "mem mapped"; + +MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd-electronics.com>"); +MODULE_DESCRIPTION("RTCAN driver for memory mapped SJA1000 controller"); +MODULE_LICENSE("GPL"); + +static u32 mem[RTCAN_MEM_MAX_DEV]; +static int irq[RTCAN_MEM_MAX_DEV]; +static u32 can_clock[RTCAN_MEM_MAX_DEV]; +static u8 ocr[RTCAN_MEM_MAX_DEV]; +static u8 cdr[RTCAN_MEM_MAX_DEV]; + +module_param_array(mem, uint, NULL, 0444); +module_param_array(irq, int, NULL, 0444); +module_param_array(can_clock, uint, NULL, 0444); +module_param_array(ocr, byte, NULL, 0444); +module_param_array(cdr, byte, NULL, 0444); + +MODULE_PARM_DESC(mem, "The io-memory address"); +MODULE_PARM_DESC(irq, "The interrupt number"); +MODULE_PARM_DESC(can_clock, "External clock frequency (default 16 MHz)"); +MODULE_PARM_DESC(ocr, "Value of output control register (default 0x1a)"); +MODULE_PARM_DESC(cdr, "Value of clock divider register (default 0xc8"); + +#define RTCAN_MEM_RANGE 0x80 + +struct rtcan_mem +{ + volatile void __iomem *vmem; +}; + +static struct rtcan_device *rtcan_mem_devs[RTCAN_MEM_MAX_DEV]; + +static u8 rtcan_mem_readreg(struct rtcan_device *dev, int reg) +{ + struct rtcan_mem *board = (struct rtcan_mem *)dev->board_priv; + return readb(board->vmem + reg); +} + +static void rtcan_mem_writereg(struct rtcan_device *dev, int reg, u8 val) +{ + struct rtcan_mem *board = (struct rtcan_mem *)dev->board_priv; + writeb(val, board->vmem + reg); +} + +int __init rtcan_mem_init_one(int idx) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_mem *board; + int ret; + + if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_mem))) == NULL) + return -ENOMEM; + + chip = (struct rtcan_sja1000 *)dev->priv; + board = (struct rtcan_mem *)dev->board_priv; + + dev->board_name = mem_board_name; + + chip->irq_num = irq[idx]; + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->read_reg = rtcan_mem_readreg; + chip->write_reg = rtcan_mem_writereg; + + if (!request_mem_region(mem[idx], RTCAN_MEM_RANGE, RTCAN_DRV_NAME)) { + ret = -EBUSY; + goto out_dev_free; + } + + /* ioremap io memory */ + if (!(board->vmem = ioremap(mem[idx], RTCAN_MEM_RANGE))) { + ret = -EBUSY; + goto out_release_mem; + } + + /* Clock frequency in Hz */ + if (can_clock[idx]) + dev->can_sys_clock = can_clock[idx] / 2; + else + dev->can_sys_clock = 8000000; /* 16/2 MHz */ + + /* Output control register */ + if (ocr[idx]) + chip->ocr = ocr[idx]; + else + chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL; + + if (cdr[idx]) + chip->cdr = cdr[idx]; + else + chip->cdr = SJA_CDR_CAN_MODE | SJA_CDR_CLK_OFF | SJA_CDR_CBP; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR "ERROR %d while trying to register SJA1000 " + "device!\n", ret); + goto out_iounmap; + } + + rtcan_mem_devs[idx] = dev; + return 0; + + out_iounmap: + iounmap((void *)board->vmem); + + out_release_mem: + release_mem_region(mem[idx], RTCAN_MEM_RANGE); + + out_dev_free: + rtcan_dev_free(dev); + + return ret; +} + +static void rtcan_mem_exit(void); + +/** Init module */ +static int __init rtcan_mem_init(void) +{ + int i, err; + int devices = 0; + + if (!rtdm_available()) + return -ENOSYS; + + for (i = 0; i < RTCAN_MEM_MAX_DEV && mem[i] != 0; i++) { + err = rtcan_mem_init_one(i); + if (err) { + rtcan_mem_exit(); + return err; + } + devices++; + } + if (devices) + return 0; + + printk(KERN_ERR "ERROR! No devices specified! " + "Use mem=<port1>[,...] irq=<irq1>[,...]\n"); + return -EINVAL; +} + + +/** Cleanup module */ +static void rtcan_mem_exit(void) +{ + int i; + struct rtcan_device *dev; + volatile void __iomem *vmem; + + for (i = 0; i < RTCAN_MEM_MAX_DEV; i++) { + dev = rtcan_mem_devs[i]; + if (!dev) + continue; + vmem = ((struct rtcan_mem *)dev->board_priv)->vmem; + rtcan_sja1000_unregister(dev); + iounmap((void *)vmem); + release_mem_region(mem[i], RTCAN_MEM_RANGE); + rtcan_dev_free(dev); + } +} + +module_init(rtcan_mem_init); +module_exit(rtcan_mem_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c new file mode 100644 index 0000000..d1e3f44 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c @@ -0,0 +1,389 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Derived from the PCAN project file driver/src/pcan_dongle.c: + * + * Copyright (C) 2001-2006 PEAK System-Technik GmbH + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/version.h> +#include <linux/delay.h> +#include <linux/pnp.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "PEAK-Dongle" + +#define RTCAN_PEAK_DNG_MAX_DEV 1 + +static char *dongle_board_name = "PEAK-Dongle"; + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RTCAN board driver for PEAK-Dongle"); +MODULE_LICENSE("GPL"); + +static char *type[RTCAN_PEAK_DNG_MAX_DEV]; +static ushort io[RTCAN_PEAK_DNG_MAX_DEV]; +static char irq[RTCAN_PEAK_DNG_MAX_DEV]; + +module_param_array(type, charp, NULL, 0444); +module_param_array(io, ushort, NULL, 0444); +module_param_array(irq, byte, NULL, 0444); + +MODULE_PARM_DESC(type, "The type of interface (sp, epp)"); +MODULE_PARM_DESC(io, "The io-port address"); +MODULE_PARM_DESC(irq, "The interrupt number"); + +#define DONGLE_TYPE_SP 0 +#define DONGLE_TYPE_EPP 1 + +#define DNG_PORT_SIZE 4 /* the address range of the dongle-port */ +#define ECR_PORT_SIZE 1 /* size of the associated ECR register */ + +struct rtcan_peak_dng +{ + u16 ioport; + u16 ecr; /* ECR register in case of EPP */ + u8 old_data; /* the overwritten contents of the port registers */ + u8 old_ctrl; + u8 old_ecr; + u8 type; +}; + +static struct rtcan_device *rtcan_peak_dng_devs[RTCAN_PEAK_DNG_MAX_DEV]; + +static u16 dng_ports[] = {0x378, 0x278, 0x3bc, 0x2bc}; +static u8 dng_irqs[] = {7, 5, 7, 5}; + +static unsigned char nibble_decode[32] = +{ + 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, + 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 +}; + +/* Enable and disable irqs */ +static inline void rtcan_parport_disable_irq(u32 port) +{ + u32 pc = port + 2; + outb(inb(pc) & ~0x10, pc); +} + +static inline void rtcan_parport_enable_irq(u32 port) +{ + u32 pc = port + 2; + outb(inb(pc) | 0x10, pc); +} + +/* Functions for SP port */ +static u8 rtcan_peak_dng_sp_readreg(struct rtcan_device *dev, int port) +{ + struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv; + u32 pa = dng->ioport; + u32 pb = pa + 1; + u32 pc = pb + 1; + u8 b0, b1 ; + u8 irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */ + + outb((0x0B ^ 0x0D) | irq_enable, pc); + outb((port & 0x1F) | 0x80, pa); + outb((0x0B ^ 0x0C) | irq_enable, pc); + b1=nibble_decode[inb(pb)>>3]; + outb(0x40, pa); + b0=nibble_decode[inb(pb)>>3]; + outb((0x0B ^ 0x0D) | irq_enable, pc); + + return (b1 << 4) | b0 ; +} + +static void rtcan_peak_dng_writereg(struct rtcan_device *dev, int port, u8 data) +{ + struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv; + u32 pa = dng->ioport; + u32 pc = pa + 2; + u8 irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */ + + outb((0x0B ^ 0x0D) | irq_enable, pc); + outb(port & 0x1F, pa); + outb((0x0B ^ 0x0C) | irq_enable, pc); + outb(data, pa); + outb((0x0B ^ 0x0D) | irq_enable, pc); +} + +/* Functions for EPP port */ +static u8 rtcan_peak_dng_epp_readreg(struct rtcan_device *dev, int port) +{ + struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv; + u32 pa = dng->ioport; + u32 pc = pa + 2; + u8 val; + u8 irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */ + + outb((0x0B ^ 0x0F) | irq_enable, pc); + outb((port & 0x1F) | 0x80, pa); + outb((0x0B ^ 0x2E) | irq_enable, pc); + val = inb(pa); + outb((0x0B ^ 0x0F) | irq_enable, pc); + + return val; +} + + +/* to switch epp on or restore register */ +static void dongle_set_ecr(u16 port, struct rtcan_peak_dng *dng) +{ + u32 ecr = dng->ecr; + + dng->old_ecr = inb(ecr); + outb((dng->old_ecr & 0x1F) | 0x20, ecr); + + if (dng->old_ecr == 0xff) + printk(KERN_DEBUG "%s: realy ECP mode configured?\n", RTCAN_DRV_NAME); +} + +static void dongle_restore_ecr(u16 port, struct rtcan_peak_dng *dng) +{ + u32 ecr = dng->ecr; + + outb(dng->old_ecr, ecr); + + printk(KERN_DEBUG "%s: restore ECR\n", RTCAN_DRV_NAME); +} + +static inline void rtcan_peak_dng_enable(struct rtcan_device *dev) +{ + struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv; + u32 port = dng->ioport; + + /* save old port contents */ + dng->old_data = inb(port); + dng->old_ctrl = inb(port + 2); + + /* switch to epp mode if possible */ + if (dng->type == DONGLE_TYPE_EPP) + dongle_set_ecr(port, dng); + + rtcan_parport_enable_irq(port); +} + +static inline void rtcan_peak_dng_disable(struct rtcan_device *dev) +{ + struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv; + u32 port = dng->ioport; + + rtcan_parport_disable_irq(port); + + if (dng->type == DONGLE_TYPE_EPP) + dongle_restore_ecr(port, dng); + + /* restore port state */ + outb(dng->old_data, port); + outb(dng->old_ctrl, port + 2); +} + +/** Init module */ +int __init rtcan_peak_dng_init_one(int idx) +{ + int ret, dtype; + struct rtcan_device *dev; + struct rtcan_sja1000 *sja; + struct rtcan_peak_dng *dng; + + if (strncmp(type[idx], "sp", 2) == 0) + dtype = DONGLE_TYPE_SP; + else if (strncmp(type[idx], "epp", 3) == 0) + dtype = DONGLE_TYPE_EPP; + else { + printk("%s: type %s is invalid, use \"sp\" or \"epp\".", + RTCAN_DRV_NAME, type[idx]); + return -EINVAL; + } + + if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_peak_dng))) == NULL) + return -ENOMEM; + + sja = (struct rtcan_sja1000 *)dev->priv; + dng = (struct rtcan_peak_dng *)dev->board_priv; + + dev->board_name = dongle_board_name; + + if (io[idx]) + dng->ioport = io[idx]; + else + dng->ioport = dng_ports[idx]; + + if (irq[idx]) + sja->irq_num = irq[idx]; + else + sja->irq_num = dng_irqs[idx]; + sja->irq_flags = 0; + + if (dtype == DONGLE_TYPE_SP) { + sja->read_reg = rtcan_peak_dng_sp_readreg; + sja->write_reg = rtcan_peak_dng_writereg; + dng->ecr = 0; /* set to anything */ + } else { + sja->read_reg = rtcan_peak_dng_epp_readreg; + sja->write_reg = rtcan_peak_dng_writereg; + dng->ecr = dng->ioport + 0x402; + } + + /* Check and request I/O ports */ + if (!request_region(dng->ioport, DNG_PORT_SIZE, RTCAN_DRV_NAME)) { + ret = -EBUSY; + goto out_dev_free; + } + + if (dng->type == DONGLE_TYPE_EPP) { + if (!request_region(dng->ecr, ECR_PORT_SIZE, RTCAN_DRV_NAME)) { + ret = -EBUSY; + goto out_free_region; + } + } + + /* Clock frequency in Hz */ + dev->can_sys_clock = 8000000; /* 16/2 MHz */ + + /* Output control register */ + sja->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL; + + sja->cdr = SJA_CDR_CAN_MODE; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + rtcan_peak_dng_enable(dev); + + /* Register RTDM device */ + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR "ERROR while trying to register SJA1000 device %d!\n", + ret); + goto out_free_region2; + } + + rtcan_peak_dng_devs[idx] = dev; + return 0; + + out_free_region2: + if (dng->type == DONGLE_TYPE_EPP) + release_region(dng->ecr, ECR_PORT_SIZE); + + out_free_region: + release_region(dng->ioport, DNG_PORT_SIZE); + + out_dev_free: + rtcan_dev_free(dev); + + return ret; +} + +void rtcan_peak_dng_exit_one(struct rtcan_device *dev) +{ + struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv; + + rtcan_sja1000_unregister(dev); + rtcan_peak_dng_disable(dev); + if (dng->type == DONGLE_TYPE_EPP) + release_region(dng->ecr, ECR_PORT_SIZE); + release_region(dng->ioport, DNG_PORT_SIZE); + rtcan_dev_free(dev); +} + +static const struct pnp_device_id rtcan_peak_dng_pnp_tbl[] = { + /* Standard LPT Printer Port */ + {.id = "PNP0400", .driver_data = 0}, + /* ECP Printer Port */ + {.id = "PNP0401", .driver_data = 0}, + { } +}; + +static int rtcan_peak_dng_pnp_probe(struct pnp_dev *dev, + const struct pnp_device_id *id) +{ + return 0; +} + +static struct pnp_driver rtcan_peak_dng_pnp_driver = { + .name = RTCAN_DRV_NAME, + .id_table = rtcan_peak_dng_pnp_tbl, + .probe = rtcan_peak_dng_pnp_probe, +}; + +static int pnp_registered; + +/** Cleanup module */ +static void rtcan_peak_dng_exit(void) +{ + int i; + struct rtcan_device *dev; + + for (i = 0, dev = rtcan_peak_dng_devs[i]; + i < RTCAN_PEAK_DNG_MAX_DEV && dev != NULL; + i++) + rtcan_peak_dng_exit_one(dev); + + if (pnp_registered) + pnp_unregister_driver(&rtcan_peak_dng_pnp_driver); +} + +/** Init module */ +static int __init rtcan_peak_dng_init(void) +{ + int i, ret = -EINVAL, done = 0; + + if (!rtdm_available()) + return -ENOSYS; + + if (pnp_register_driver(&rtcan_peak_dng_pnp_driver) == 0) + pnp_registered = 1; + + for (i = 0; + i < RTCAN_PEAK_DNG_MAX_DEV && type[i] != 0; + i++) { + + if ((ret = rtcan_peak_dng_init_one(i)) != 0) { + printk(KERN_ERR "%s: Init failed with %d\n", RTCAN_DRV_NAME, ret); + goto cleanup; + } + done++; + } + if (done) + return 0; + + printk(KERN_ERR "%s: Please specify type=epp or type=sp\n", + RTCAN_DRV_NAME); + +cleanup: + rtcan_peak_dng_exit(); + return ret; +} + +module_init(rtcan_peak_dng_init); +module_exit(rtcan_peak_dng_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c new file mode 100644 index 0000000..19f728b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c @@ -0,0 +1,361 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * Derived from the PCAN project file driver/src/pcan_pci.c: + * + * Copyright (C) 2001-2006 PEAK System-Technik GmbH + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <asm/io.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DEV_NAME "rtcan%d" +#define RTCAN_DRV_NAME "PEAK-PCI-CAN" + +static char *peak_pci_board_name = "PEAK-PCI"; + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RTCAN board driver for PEAK-PCI cards"); +MODULE_LICENSE("GPL"); + +struct rtcan_peak_pci +{ + struct pci_dev *pci_dev; + struct rtcan_device *slave_dev; + int channel; + volatile void __iomem *base_addr; + volatile void __iomem *conf_addr; +}; + +#define PEAK_PCI_CAN_SYS_CLOCK (16000000 / 2) + +#define PELICAN_SINGLE (SJA_CDR_CAN_MODE | SJA_CDR_CBP | 0x07 | SJA_CDR_CLK_OFF) +#define PELICAN_MASTER (SJA_CDR_CAN_MODE | SJA_CDR_CBP | 0x07 ) +#define PELICAN_DEFAULT (SJA_CDR_CAN_MODE ) + +#define CHANNEL_SINGLE 0 /* this is a single channel device */ +#define CHANNEL_MASTER 1 /* multi channel device, this device is master */ +#define CHANNEL_SLAVE 2 /* multi channel device, this is slave */ + +// important PITA registers +#define PITA_ICR 0x00 // interrupt control register +#define PITA_GPIOICR 0x18 // general purpose IO interface control register +#define PITA_MISC 0x1C // miscellanoes register + +#define PEAK_PCI_VENDOR_ID 0x001C // the PCI device and vendor IDs +#define PEAK_PCI_DEVICE_ID 0x0001 // Device ID for PCI and older PCIe cards +#define PEAK_PCIE_DEVICE_ID 0x0003 // Device ID for newer PCIe cards (IPEH-003027) +#define PEAK_CPCI_DEVICE_ID 0x0004 // for nextgen cPCI slot cards +#define PEAK_MPCI_DEVICE_ID 0x0005 // for nextgen miniPCI slot cards +#define PEAK_PC_104P_DEVICE_ID 0x0006 // PCAN-PC/104+ cards +#define PEAK_PCI_104E_DEVICE_ID 0x0007 // PCAN-PCI/104 Express cards +#define PEAK_MPCIE_DEVICE_ID 0x0008 // The miniPCIe slot cards +#define PEAK_PCIE_OEM_ID 0x0009 // PCAN-PCI Express OEM + +#define PCI_CONFIG_PORT_SIZE 0x1000 // size of the config io-memory +#define PCI_PORT_SIZE 0x0400 // size of a channel io-memory + +static struct pci_device_id peak_pci_tbl[] = { + {PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, + { } +}; +MODULE_DEVICE_TABLE (pci, peak_pci_tbl); + + +static u8 rtcan_peak_pci_read_reg(struct rtcan_device *dev, int port) +{ + struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv; + return readb(board->base_addr + ((unsigned long)port << 2)); +} + +static void rtcan_peak_pci_write_reg(struct rtcan_device *dev, int port, u8 data) +{ + struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv; + writeb(data, board->base_addr + ((unsigned long)port << 2)); +} + +static void rtcan_peak_pci_irq_ack(struct rtcan_device *dev) +{ + struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv; + u16 pita_icr_low; + + /* Select and clear in Pita stored interrupt */ + pita_icr_low = readw(board->conf_addr + PITA_ICR); + if (board->channel == CHANNEL_SLAVE) { + if (pita_icr_low & 0x0001) + writew(0x0001, board->conf_addr + PITA_ICR); + } + else { + if (pita_icr_low & 0x0002) + writew(0x0002, board->conf_addr + PITA_ICR); + } +} + +static void rtcan_peak_pci_del_chan(struct rtcan_device *dev, + int init_step) +{ + struct rtcan_peak_pci *board; + u16 pita_icr_high; + + if (!dev) + return; + + board = (struct rtcan_peak_pci *)dev->board_priv; + + switch (init_step) { + case 0: /* Full cleanup */ + printk("Removing %s %s device %s\n", + peak_pci_board_name, dev->ctrl_name, dev->name); + rtcan_sja1000_unregister(dev); + fallthrough; + case 5: + pita_icr_high = readw(board->conf_addr + PITA_ICR + 2); + if (board->channel == CHANNEL_SLAVE) { + pita_icr_high &= ~0x0001; + } else { + pita_icr_high &= ~0x0002; + } + writew(pita_icr_high, board->conf_addr + PITA_ICR + 2); + fallthrough; + case 4: + iounmap((void *)board->base_addr); + fallthrough; + case 3: + if (board->channel != CHANNEL_SLAVE) + iounmap((void *)board->conf_addr); + fallthrough; + case 2: + rtcan_dev_free(dev); + fallthrough; + case 1: + break; + } + +} + +static int rtcan_peak_pci_add_chan(struct pci_dev *pdev, int channel, + struct rtcan_device **master_dev) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_peak_pci *board; + u16 pita_icr_high; + unsigned long addr; + int ret, init_step = 1; + + dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct rtcan_peak_pci)); + if (dev == NULL) + return -ENOMEM; + init_step = 2; + + chip = (struct rtcan_sja1000 *)dev->priv; + board = (struct rtcan_peak_pci *)dev->board_priv; + + board->pci_dev = pdev; + board->channel = channel; + + if (channel != CHANNEL_SLAVE) { + + addr = pci_resource_start(pdev, 0); + board->conf_addr = ioremap(addr, PCI_CONFIG_PORT_SIZE); + if (board->conf_addr == 0) { + ret = -ENODEV; + goto failure; + } + init_step = 3; + + /* Set GPIO control register */ + writew(0x0005, board->conf_addr + PITA_GPIOICR + 2); + + if (channel == CHANNEL_MASTER) + writeb(0x00, board->conf_addr + PITA_GPIOICR); /* enable both */ + else + writeb(0x04, board->conf_addr + PITA_GPIOICR); /* enable single */ + + writeb(0x05, board->conf_addr + PITA_MISC + 3); /* toggle reset */ + mdelay(5); + writeb(0x04, board->conf_addr + PITA_MISC + 3); /* leave parport mux mode */ + } else { + struct rtcan_peak_pci *master_board = + (struct rtcan_peak_pci *)(*master_dev)->board_priv; + master_board->slave_dev = dev; + board->conf_addr = master_board->conf_addr; + } + + addr = pci_resource_start(pdev, 1); + if (channel == CHANNEL_SLAVE) + addr += 0x400; + + board->base_addr = ioremap(addr, PCI_PORT_SIZE); + if (board->base_addr == 0) { + ret = -ENODEV; + goto failure; + } + init_step = 4; + + dev->board_name = peak_pci_board_name; + + chip->read_reg = rtcan_peak_pci_read_reg; + chip->write_reg = rtcan_peak_pci_write_reg; + chip->irq_ack = rtcan_peak_pci_irq_ack; + + /* Clock frequency in Hz */ + dev->can_sys_clock = PEAK_PCI_CAN_SYS_CLOCK; + + /* Output control register */ + chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL; + + /* Clock divider register */ + if (channel == CHANNEL_MASTER) + chip->cdr = PELICAN_MASTER; + else + chip->cdr = PELICAN_SINGLE; + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + + /* Register and setup interrupt handling */ + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->irq_num = pdev->irq; + pita_icr_high = readw(board->conf_addr + PITA_ICR + 2); + if (channel == CHANNEL_SLAVE) { + pita_icr_high |= 0x0001; + } else { + pita_icr_high |= 0x0002; + } + writew(pita_icr_high, board->conf_addr + PITA_ICR + 2); + init_step = 5; + + printk("%s: base_addr=%p conf_addr=%p irq=%d\n", RTCAN_DRV_NAME, + board->base_addr, board->conf_addr, chip->irq_num); + + /* Register SJA1000 device */ + ret = rtcan_sja1000_register(dev); + if (ret) { + printk(KERN_ERR + "ERROR %d while trying to register SJA1000 device!\n", ret); + goto failure; + } + + if (channel != CHANNEL_SLAVE) + *master_dev = dev; + + return 0; + + failure: + rtcan_peak_pci_del_chan(dev, init_step); + return ret; +} + +static int peak_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret; + u16 sub_sys_id; + struct rtcan_device *master_dev = NULL; + + if (!rtdm_available()) + return -ENODEV; + + printk("%s: initializing device %04x:%04x\n", + RTCAN_DRV_NAME, pdev->vendor, pdev->device); + + if ((ret = pci_enable_device (pdev))) + goto failure; + + if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME))) + goto failure; + + if ((ret = pci_read_config_word(pdev, 0x2e, &sub_sys_id))) + goto failure_cleanup; + + /* Enable memory space */ + if ((ret = pci_write_config_word(pdev, 0x04, 2))) + goto failure_cleanup; + + if ((ret = pci_write_config_word(pdev, 0x44, 0))) + goto failure_cleanup; + + if (sub_sys_id > 3) { + if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_MASTER, + &master_dev))) + goto failure_cleanup; + if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_SLAVE, + &master_dev))) + goto failure_cleanup; + } else { + if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_SINGLE, + &master_dev))) + goto failure_cleanup; + } + + pci_set_drvdata(pdev, master_dev); + return 0; + + failure_cleanup: + if (master_dev) + rtcan_peak_pci_del_chan(master_dev, 0); + + pci_release_regions(pdev); + + failure: + return ret; + +} + +static void peak_pci_remove_one(struct pci_dev *pdev) +{ + struct rtcan_device *dev = pci_get_drvdata(pdev); + struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv; + + if (board->slave_dev) + rtcan_peak_pci_del_chan(board->slave_dev, 0); + rtcan_peak_pci_del_chan(dev, 0); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver rtcan_peak_pci_driver = { + .name = RTCAN_DRV_NAME, + .id_table = peak_pci_tbl, + .probe = peak_pci_init_one, + .remove = peak_pci_remove_one, +}; + +module_pci_driver(rtcan_peak_pci_driver); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c new file mode 100644 index 0000000..4da14f2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c @@ -0,0 +1,593 @@ +/* + * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su> + * + * Derived from the ems_pci.c driver: + * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com> + * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com> + * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the version 2 of the GNU General Public License + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/io.h> + +#include <rtdm/driver.h> + +/* CAN device profile */ +#include <rtdm/can.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + +#define RTCAN_DRV_NAME "rt_sja1000_plx_pci" +#define RTCAN_DEV_NAME "rtcan%d" + +MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>"); +MODULE_DESCRIPTION("RTCAN driver for PLX90xx PCI-bridge cards with " + "the SJA1000 chips"); +MODULE_LICENSE("GPL v2"); + +#define PLX_PCI_MAX_CHAN 2 + +struct plx_pci_card { + int channels; /* detected channels count */ + struct rtcan_device *rtcan_dev[PLX_PCI_MAX_CHAN]; + void __iomem *conf_addr; + + /* Pointer to device-dependent reset function */ + void (*reset_func)(struct pci_dev *pdev); +}; + +#define PLX_PCI_CAN_CLOCK (16000000 / 2) + +/* PLX9030/9050/9052 registers */ +#define PLX_INTCSR 0x4c /* Interrupt Control/Status */ +#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response, + * Serial EEPROM, and Initialization + * Control register + */ + +#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */ +#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */ +#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */ +#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */ + +/* PLX9056 registers */ +#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */ +#define PLX9056_CNTRL 0x6c /* Control / Software Reset */ + +#define PLX9056_LINTI (1 << 11) +#define PLX9056_PCI_INT_EN (1 << 8) +#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */ + +/* + * The board configuration is probably following: + * RX1 is connected to ground. + * TX1 is not connected. + * CLKO is not connected. + * Setting the OCR register to 0xDA is a good idea. + * This means normal output mode, push-pull and the correct polarity. + */ +#define PLX_PCI_OCR (SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL) + +/* + * In the CDR register, you should set CBP to 1. + * You will probably also want to set the clock divider value to 7 + * (meaning direct oscillator output) because the second SJA1000 chip + * is driven by the first one CLKOUT output. + */ +#define PLX_PCI_CDR (SJA_CDR_CBP | SJA_CDR_CAN_MODE) + +/* SJA1000 Control Register in the BasicCAN Mode */ +#define SJA_CR 0x00 + +/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/ +#define REG_CR_BASICCAN_INITIAL 0x21 +#define REG_CR_BASICCAN_INITIAL_MASK 0xa1 +#define REG_SR_BASICCAN_INITIAL 0x0c +#define REG_IR_BASICCAN_INITIAL 0xe0 + +/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/ +#define REG_MOD_PELICAN_INITIAL 0x01 +#define REG_SR_PELICAN_INITIAL 0x3c +#define REG_IR_PELICAN_INITIAL 0x00 + +#define ADLINK_PCI_VENDOR_ID 0x144A +#define ADLINK_PCI_DEVICE_ID 0x7841 + +#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004 +#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009 +#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e +#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b +#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200 +#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501 + +#define MARATHON_PCI_DEVICE_ID 0x2715 + +#define TEWS_PCI_VENDOR_ID 0x1498 +#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A + +static void plx_pci_reset_common(struct pci_dev *pdev); +static void plx_pci_reset_marathon(struct pci_dev *pdev); +static void plx9056_pci_reset_common(struct pci_dev *pdev); + +struct plx_pci_channel_map { + u32 bar; + u32 offset; + u32 size; /* 0x00 - auto, e.g. length of entire bar */ +}; + +struct plx_pci_card_info { + const char *name; + int channel_count; + u32 can_clock; + u8 ocr; /* output control register */ + u8 cdr; /* clock divider register */ + + /* Parameters for mapping local configuration space */ + struct plx_pci_channel_map conf_map; + + /* Parameters for mapping the SJA1000 chips */ + struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN]; + + /* Pointer to device-dependent reset function */ + void (*reset_func)(struct pci_dev *pdev); +}; + +static struct plx_pci_card_info plx_pci_card_info_adlink = { + "Adlink PCI-7841/cPCI-7841", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} }, + &plx_pci_reset_common + /* based on PLX9052 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_adlink_se = { + "Adlink PCI-7841/cPCI-7841 SE", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} }, + &plx_pci_reset_common + /* based on PLX9052 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_esd200 = { + "esd CAN-PCI/CPCI/PCI104/200", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, + &plx_pci_reset_common + /* based on PLX9030/9050 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_esd266 = { + "esd CAN-PCI/PMC/266", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, + &plx9056_pci_reset_common + /* based on PLX9056 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_esd2000 = { + "esd CAN-PCIe/2000", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, + &plx9056_pci_reset_common + /* based on PEX8311 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_marathon = { + "Marathon CAN-bus-PCI", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} }, + &plx_pci_reset_marathon + /* based on PLX9052 */ +}; + +static struct plx_pci_card_info plx_pci_card_info_tews = { + "TEWS TECHNOLOGIES TPMC810", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} }, + &plx_pci_reset_common + /* based on PLX9030 */ +}; + +static const struct pci_device_id plx_pci_tbl[] = { + { + /* Adlink PCI-7841/cPCI-7841 */ + ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_NETWORK_OTHER << 8, ~0, + (kernel_ulong_t)&plx_pci_card_info_adlink + }, + { + /* Adlink PCI-7841/cPCI-7841 SE */ + ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_OTHER << 8, ~0, + (kernel_ulong_t)&plx_pci_card_info_adlink_se + }, + { + /* esd CAN-PCI/200 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd200 + }, + { + /* esd CAN-CPCI/200 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd200 + }, + { + /* esd CAN-PCI104/200 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd200 + }, + { + /* esd CAN-PCI/266 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd266 + }, + { + /* esd CAN-PMC/266 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd266 + }, + { + /* esd CAN-PCIE/2000 */ + PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, + PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_esd2000 + }, + { + /* Marathon CAN-bus-PCI card */ + PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_marathon + }, + { + /* TEWS TECHNOLOGIES TPMC810 card */ + TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_tews + }, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, plx_pci_tbl); + +static u8 plx_pci_read_reg(struct rtcan_device *dev, int port) +{ + return ioread8((void* __iomem)dev->base_addr + port); +} + +static void plx_pci_write_reg(struct rtcan_device *dev, int port, u8 val) +{ + iowrite8(val, (void* __iomem)dev->base_addr + port); +} + +/* + * Check if a CAN controller is present at the specified location + * by trying to switch 'em from the Basic mode into the PeliCAN mode. + * Also check states of some registers in reset mode. + */ +static inline int plx_pci_check_sja1000(struct rtcan_device *dev) +{ + int flag = 0; + + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + + /* + * Check registers after hardware reset (the Basic mode) + * See states on p. 10 of the Datasheet. + */ + if ((chip->read_reg(dev, SJA_CR) & REG_CR_BASICCAN_INITIAL_MASK) == + REG_CR_BASICCAN_INITIAL && + (chip->read_reg(dev, SJA_SR) == REG_SR_BASICCAN_INITIAL) && + (chip->read_reg(dev, SJA_IR) == REG_IR_BASICCAN_INITIAL)) + flag = 1; + + /* Bring the SJA1000 into the PeliCAN mode*/ + chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE); + + /* + * Check registers after reset in the PeliCAN mode. + * See states on p. 23 of the Datasheet. + */ + if (chip->read_reg(dev, SJA_MOD) == REG_MOD_PELICAN_INITIAL && + chip->read_reg(dev, SJA_SR) == REG_SR_PELICAN_INITIAL && + chip->read_reg(dev, SJA_IR) == REG_IR_PELICAN_INITIAL) + return flag; + + return 0; +} + +/* + * PLX9030/50/52 software reset + * Also LRESET# asserts and brings to reset device on the Local Bus (if wired). + * For most cards it's enough for reset the SJA1000 chips. + */ +static void plx_pci_reset_common(struct pci_dev *pdev) +{ + struct plx_pci_card *card = pci_get_drvdata(pdev); + u32 cntrl; + + cntrl = ioread32(card->conf_addr + PLX_CNTRL); + cntrl |= PLX_PCI_RESET; + iowrite32(cntrl, card->conf_addr + PLX_CNTRL); + udelay(100); + cntrl ^= PLX_PCI_RESET; + iowrite32(cntrl, card->conf_addr + PLX_CNTRL); +}; + +/* + * PLX9056 software reset + * Assert LRESET# and reset device(s) on the Local Bus (if wired). + */ +static void plx9056_pci_reset_common(struct pci_dev *pdev) +{ + struct plx_pci_card *card = pci_get_drvdata(pdev); + u32 cntrl; + + /* issue a local bus reset */ + cntrl = ioread32(card->conf_addr + PLX9056_CNTRL); + cntrl |= PLX_PCI_RESET; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); + udelay(100); + cntrl ^= PLX_PCI_RESET; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); + + /* reload local configuration from EEPROM */ + cntrl |= PLX9056_PCI_RCR; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); + + /* + * There is no safe way to poll for the end + * of reconfiguration process. Waiting for 10ms + * is safe. + */ + mdelay(10); + + cntrl ^= PLX9056_PCI_RCR; + iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); +}; + +/* Special reset function for Marathon card */ +static void plx_pci_reset_marathon(struct pci_dev *pdev) +{ + void __iomem *reset_addr; + int i; + int reset_bar[2] = {3, 5}; + + plx_pci_reset_common(pdev); + + for (i = 0; i < 2; i++) { + reset_addr = pci_iomap(pdev, reset_bar[i], 0); + if (!reset_addr) { + dev_err(&pdev->dev, "Failed to remap reset " + "space %d (BAR%d)\n", i, reset_bar[i]); + } else { + /* reset the SJA1000 chip */ + iowrite8(0x1, reset_addr); + udelay(100); + pci_iounmap(pdev, reset_addr); + } + } +} + +static void plx_pci_del_card(struct pci_dev *pdev) +{ + struct plx_pci_card *card = pci_get_drvdata(pdev); + struct rtcan_device *dev; + int i = 0; + + for (i = 0; i < card->channels; i++) { + dev = card->rtcan_dev[i]; + if (!dev) + continue; + + dev_info(&pdev->dev, "Removing %s\n", dev->name); + rtcan_sja1000_unregister(dev); + if (dev->base_addr) + pci_iounmap(pdev, (void* __iomem)dev->base_addr); + rtcan_dev_free(dev); + } + + card->reset_func(pdev); + + /* + * Disable interrupts from PCI-card and disable local + * interrupts + */ + if (pdev->device != PCI_DEVICE_ID_PLX_9056) + iowrite32(0x0, card->conf_addr + PLX_INTCSR); + else + iowrite32(0x0, card->conf_addr + PLX9056_INTCSR); + + if (card->conf_addr) + pci_iounmap(pdev, card->conf_addr); + + kfree(card); + + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +/* + * Probe PLX90xx based device for the SJA1000 chips and register each + * available CAN channel to SJA1000 Socket-CAN subsystem. + */ +static int plx_pci_add_card(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct rtcan_sja1000 *chip; + struct rtcan_device *dev; + struct plx_pci_card *card; + struct plx_pci_card_info *ci; + int err, i; + u32 val; + void __iomem *addr; + + if (!rtdm_available()) + return -ENODEV; + + ci = (struct plx_pci_card_info *)ent->driver_data; + + if (pci_enable_device(pdev) < 0) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return -ENODEV; + } + + dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n", + ci->name, PCI_SLOT(pdev->devfn)); + + /* Allocate card structures to hold addresses, ... */ + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (!card) { + dev_err(&pdev->dev, "Unable to allocate memory\n"); + pci_disable_device(pdev); + return -ENOMEM; + } + + pci_set_drvdata(pdev, card); + + card->channels = 0; + + /* Remap PLX90xx configuration space */ + addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size); + if (!addr) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to remap configuration space " + "(BAR%d)\n", ci->conf_map.bar); + goto failure_cleanup; + } + card->conf_addr = addr + ci->conf_map.offset; + + ci->reset_func(pdev); + card->reset_func = ci->reset_func; + + /* Detect available channels */ + for (i = 0; i < ci->channel_count; i++) { + struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i]; + + dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), + sizeof(struct plx_pci_card)); + if (!dev) { + err = -ENOMEM; + goto failure_cleanup; + } + + strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ); + dev->board_name = (char *)ci->name; + + card->rtcan_dev[i] = dev; + chip = card->rtcan_dev[i]->priv; + chip->irq_flags = RTDM_IRQTYPE_SHARED; + chip->irq_num = pdev->irq; + + /* + * Remap IO space of the SJA1000 chips + * This is device-dependent mapping + */ + addr = pci_iomap(pdev, cm->bar, cm->size); + if (!addr) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar); + goto failure_cleanup; + } + + dev->base_addr = (unsigned long)(addr + cm->offset); + chip->read_reg = plx_pci_read_reg; + chip->write_reg = plx_pci_write_reg; + + /* Check if channel is present */ + if (plx_pci_check_sja1000(dev)) { + dev->can_sys_clock = ci->can_clock; + chip->ocr = ci->ocr; + chip->cdr = ci->cdr; + + /* Register SJA1000 device */ + err = rtcan_sja1000_register(dev); + if (err) { + dev_err(&pdev->dev, "Registering device failed " + "(err=%d)\n", err); + rtcan_dev_free(dev); + goto failure_cleanup; + } + + card->channels++; + + dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d " + "registered as %s\n", i + 1, + (void* __iomem)dev->base_addr, chip->irq_num, + dev->name); + } else { + dev_err(&pdev->dev, "Channel #%d not detected\n", + i + 1); + rtcan_dev_free(dev); + } + } + + if (!card->channels) { + err = -ENODEV; + goto failure_cleanup; + } + + /* + * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, + * Local_2 interrupts from the SJA1000 chips + */ + if (pdev->device != PCI_DEVICE_ID_PLX_9056) { + val = ioread32(card->conf_addr + PLX_INTCSR); + if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH) + val |= PLX_LINT1_EN | PLX_PCI_INT_EN; + else + val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; + iowrite32(val, card->conf_addr + PLX_INTCSR); + } else { + iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN, + card->conf_addr + PLX9056_INTCSR); + } + return 0; + +failure_cleanup: + dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); + + plx_pci_del_card(pdev); + + return err; +} + +static struct pci_driver plx_pci_driver = { + .name = RTCAN_DRV_NAME, + .id_table = plx_pci_tbl, + .probe = plx_pci_add_card, + .remove = plx_pci_del_card, +}; + +module_pci_driver(plx_pci_driver); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c new file mode 100644 index 0000000..0f49551 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c @@ -0,0 +1,842 @@ +/* + * Copyright (C) 2005, 2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * Parts of this software are based on the following: + * + * - RTAI CAN device driver for SJA1000 controllers by Jan Kiszka + * + * - linux-can.patch, a CAN socket framework for Linux, + * Copyright (C) 2004, 2005, Robert Schwebel, Benedikt Spranger, + * Marc Kleine-Budde, Sascha Hauer, Pengutronix + * + * - RTnet (www.rtnet.org) + * + * - serial device driver and profile included in Xenomai (RTDM), + * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>. + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> + +#include <rtdm/driver.h> +#include <rtdm/can.h> + +#include <rtcan_socket.h> +#include <rtcan_dev.h> +#include <rtcan_raw.h> +#include <rtcan_list.h> +#include <rtcan_sja1000.h> +#include <rtcan_sja1000_regs.h> + + +#define BTR0_BRP_MASK 0x3f +#define BTR0_SJW_SHIFT 6 +#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT) + +#define BTR1_TSEG1_MASK 0xf +#define BTR1_TSEG2_SHIFT 4 +#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT) +#define BTR1_SAM_SHIFT 7 + +#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK) +#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & BTR0_SJW_MASK) + +#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK) +#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & BTR1_TSEG2_MASK) +#define BTR1_SET_SAM(sam) (((sam) & 1) << BTR1_SAM_SHIFT) + +/* Value for the interrupt enable register */ +#define SJA1000_IER SJA_IER_RIE | SJA_IER_TIE | \ + SJA_IER_EIE | SJA_IER_WUIE | \ + SJA_IER_EPIE | SJA_IER_BEIE | \ + SJA_IER_ALIE | SJA_IER_DOIE + +static char *sja_ctrl_name = "SJA1000"; + +#define STATE_OPERATING(state) \ + ((state) != CAN_STATE_STOPPED && (state) != CAN_STATE_BUS_OFF) + +#define STATE_RESET(state) \ + ((state) == CAN_STATE_STOPPED || (state) == CAN_STATE_BUS_OFF) + + +MODULE_AUTHOR("Sebastian.Smolorz@stud.uni-hannover.de"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RT-Socket-CAN driver for SJA1000"); + +#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD +static struct can_bittiming_const sja1000_bittiming_const = { + .name = "sja1000", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; +#endif + +static inline void rtcan_sja_rx_interrupt(struct rtcan_device *dev, + struct rtcan_skb *skb) +{ + int i; + /* "Real" size of the payload */ + u8 size; + /* Content of frame information register */ + u8 fir; + /* Ring buffer frame within skb */ + struct rtcan_rb_frame *frame = &skb->rb_frame; + struct rtcan_sja1000 *chip = dev->priv; + + /* Read out frame information register */ + fir = chip->read_reg(dev, SJA_FIR); + + /* Extract data length code */ + frame->can_dlc = fir & SJA_FIR_DLC_MASK; + + /* If DLC exceeds 8 bytes adjust it to 8 (for the payload size) */ + size = (frame->can_dlc > 8) ? 8 : frame->can_dlc; + + + if (fir & SJA_FIR_EFF) { + /* Extended frame */ + frame->can_id = CAN_EFF_FLAG; + + /* Read ID */ + frame->can_id |= chip->read_reg(dev, SJA_ID1) << 21; + frame->can_id |= chip->read_reg(dev, SJA_ID2) << 13; + frame->can_id |= chip->read_reg(dev, SJA_ID3) << 5; + frame->can_id |= chip->read_reg(dev, SJA_ID4) >> 3; + + if (!(fir & SJA_FIR_RTR)) { + /* No RTR, read data bytes */ + for (i = 0; i < size; i++) + frame->data[i] = chip->read_reg(dev, + SJA_DATA_EFF(i)); + } + + } else { + /* Standard frame */ + + /* Read ID */ + frame->can_id = chip->read_reg(dev, SJA_ID1) << 3; + frame->can_id |= chip->read_reg(dev, SJA_ID2) >> 5; + + if (!(fir & SJA_FIR_RTR)) { + /* No RTR, read data bytes */ + for (i = 0; i < size; i++) + frame->data[i] = chip->read_reg(dev, SJA_DATA_SFF(i)); + } + } + + /* Release Receive Buffer */ + chip->write_reg(dev, SJA_CMR, SJA_CMR_RRB); + + + /* RTR? */ + if (fir & SJA_FIR_RTR) { + frame->can_id |= CAN_RTR_FLAG; + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE; + } else + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + size; + + /* Store the interface index */ + frame->can_ifindex = dev->ifindex; +} + + +static inline void rtcan_sja_err_interrupt(struct rtcan_device *dev, + struct rtcan_sja1000 *chip, + struct rtcan_skb *skb, + u8 irq_source) +{ + struct rtcan_rb_frame *frame = &skb->rb_frame; + can_state_t state = dev->state; + u8 status, txerr, rxerr; + + status = chip->read_reg(dev, SJA_SR); + txerr = chip->read_reg(dev, SJA_TXERR); + rxerr = chip->read_reg(dev, SJA_RXERR); + + skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC; + + frame->can_id = CAN_ERR_FLAG; + frame->can_dlc = CAN_ERR_DLC; + + memset(&frame->data[0], 0, frame->can_dlc); + + /* Data overrun interrupt? */ + if (irq_source & SJA_IR_DOI) { + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + + /* Arbitratio lost interrupt? */ + if (irq_source & SJA_IR_ALI) { + frame->can_id |= CAN_ERR_LOSTARB; + frame->data[0] = chip->read_reg(dev, SJA_ALC) & 0x1f; + } + + /* Bus error interrupt? */ + if (irq_source & SJA_IR_BEI) { + u8 ecc = chip->read_reg(dev, SJA_ECC); + + frame->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (ecc & SJA_ECC_ERR_MASK) { + case SJA_ECC_ERR_BIT: + frame->data[2] |= CAN_ERR_PROT_BIT; + break; + case SJA_ECC_ERR_FORM: + frame->data[2] |= CAN_ERR_PROT_FORM; + break; + case SJA_ECC_ERR_STUFF: + frame->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + frame->data[2] |= CAN_ERR_PROT_UNSPEC; + frame->data[3] = ecc & SJA_ECC_SEG_MASK; + break; + } + /* Error occured during transmission? */ + if ((ecc & SJA_ECC_DIR) == 0) + frame->data[2] |= CAN_ERR_PROT_TX; + } + + /* Error passive interrupt? */ + if (unlikely(irq_source & SJA_IR_EPI)) { + if (state == CAN_STATE_BUS_WARNING) { + state = CAN_STATE_BUS_PASSIVE; + } else { + state = CAN_STATE_BUS_WARNING; + } + } + + /* Error warning interrupt? */ + if (irq_source & SJA_IR_EI) { + + /* Test bus status (bus-off condition) */ + if (status & SJA_SR_BS) { + /* Bus-off */ + state = CAN_STATE_BUS_OFF; + frame->can_id |= CAN_ERR_BUSOFF; + /* Only allow error warning interrupts + (otherwise an EPI would arise during bus-off + recovery) */ + chip->write_reg(dev, SJA_IER, SJA_IER_EIE); + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + } + + /* Test error status (error warning limit) */ + else if (status & SJA_SR_ES) + /* error warning limit reached */ + state = CAN_STATE_BUS_WARNING; + + /* Re-entrance into error active state from bus-warn? */ + else if (state == CAN_STATE_BUS_WARNING) + state = CAN_STATE_ACTIVE; + + else + /* Bus-off recovery complete, enable all interrupts again */ + chip->write_reg(dev, SJA_IER, SJA1000_IER); + } + + if (state != dev->state && + (state == CAN_STATE_BUS_WARNING || state == CAN_STATE_BUS_PASSIVE)) { + frame->can_id |= CAN_ERR_PROT; + if (txerr > rxerr) + frame->data[1] = CAN_ERR_CRTL_TX_WARNING; + else + frame->data[1] = CAN_ERR_CRTL_RX_WARNING; + } + + dev->state = state; + frame->can_ifindex = dev->ifindex; +} + +static int rtcan_sja_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtcan_device *dev; + struct rtcan_sja1000 *chip; + struct rtcan_skb skb; + int recv_lock_free = 1; + int irq_count = 0; + int ret = RTDM_IRQ_NONE; + u8 irq_source; + + + /* Get the ID of the device which registered this IRQ. */ + dev = (struct rtcan_device *)rtdm_irq_get_arg(irq_handle, void); + chip = (struct rtcan_sja1000 *)dev->priv; + + /* Take spinlock protecting HW register access and device structures. */ + rtdm_lock_get(&dev->device_lock); + + /* Loop as long as the device reports an event */ + while ((irq_source = chip->read_reg(dev, SJA_IR))) { + ret = RTDM_IRQ_HANDLED; + irq_count++; + + /* Now look up which interrupts appeared */ + + /* Wake-up interrupt? */ + if (irq_source & SJA_IR_WUI) + dev->state = dev->state_before_sleep; + + /* Error Interrupt? */ + if (irq_source & (SJA_IR_EI | SJA_IR_DOI | SJA_IR_EPI | + SJA_IR_ALI | SJA_IR_BEI)) { + + /* Check error condition and fill error frame */ + if (!((irq_source & SJA_IR_BEI) && (chip->bus_err_on-- < 2))) { + rtcan_sja_err_interrupt(dev, chip, &skb, irq_source); + + if (recv_lock_free) { + recv_lock_free = 0; + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + } + /* Pass error frame out to the sockets */ + rtcan_rcv(dev, &skb); + } + } + + /* Transmit Interrupt? */ + if (irq_source & SJA_IR_TI) { + /* Wake up a sender */ + rtdm_sem_up(&dev->tx_sem); + dev->tx_count++; + + if (rtcan_loopback_pending(dev)) { + + if (recv_lock_free) { + recv_lock_free = 0; + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + } + + rtcan_loopback(dev); + } + } + + /* Receive Interrupt? */ + if (irq_source & SJA_IR_RI) { + + /* Read out HW registers */ + rtcan_sja_rx_interrupt(dev, &skb); + + /* Take more locks. Ensure that they are taken and + * released only once in the IRQ handler. */ + /* WARNING: Nested locks are dangerous! But they are + * nested only in this routine so a deadlock should + * not be possible. */ + if (recv_lock_free) { + recv_lock_free = 0; + rtdm_lock_get(&rtcan_recv_list_lock); + rtdm_lock_get(&rtcan_socket_lock); + } + + /* Pass received frame out to the sockets */ + rtcan_rcv(dev, &skb); + } + } + + if (chip->irq_ack) + chip->irq_ack(dev); + + /* Release spinlocks */ + if (!recv_lock_free) { + rtdm_lock_put(&rtcan_socket_lock); + rtdm_lock_put(&rtcan_recv_list_lock); + } + rtdm_lock_put(&dev->device_lock); + + return ret; +} + + + +/* + * Inline function to decide if controller is operating + * + * Catch the very unlikely case that setting stop mode + * returned without success before this call but in the + * meantime the controller went into reset mode. + */ +static inline int rtcan_sja_is_operating(struct rtcan_device *dev, + can_state_t *state) +{ + int is_operating = STATE_OPERATING(*state); + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + + if (unlikely(is_operating && chip->read_reg(dev, SJA_MOD) & SJA_MOD_RM)) { + *state = CAN_STATE_STOPPED; + is_operating = 0; + /* Disable the controller's interrupts */ + chip->write_reg(dev, SJA_IER, 0x00); + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + } + + return is_operating; +} + + +/* + * Set controller into reset mode. + * + * According to the SJA1000 specification, it is necessary to check the + * reset mode bit in PeliCAN mode after having set it. So we do. But if + * using a ISA card like the PHYTEC eNET card this should not be necessary + * because the CAN controller clock of this card (16 MHz) is twice as high + * as the ISA bus clock. + */ +static int rtcan_sja_mode_stop(struct rtcan_device *dev, + rtdm_lockctx_t *lock_ctx) +{ + int ret = 0; + /* Max. 50 loops busy sleep. If the controller is stopped while in + * sleep mode 20-40 loops are needed (tested on PHYTEC eNET). */ + int wait_loop = 50; + can_state_t state; + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + + state = dev->state; + /* If controller is not operating anyway, go out */ + if (STATE_RESET(state)) + goto out; + + /* Disable the controller's interrupts */ + chip->write_reg(dev, SJA_IER, 0x00); + + /* Set reset mode bit */ + chip->write_reg(dev, SJA_MOD, SJA_MOD_RM); + + /* Read reset mode bit, multiple tests */ + do { + if (chip->read_reg(dev, SJA_MOD) & SJA_MOD_RM) + break; + + if (lock_ctx) + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + /* Busy sleep 1 microsecond */ + rtdm_task_busy_sleep(1000); + if (lock_ctx) + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + } while(--wait_loop); + + + if (wait_loop) { + /* Volatile state could have changed while we slept busy. */ + dev->state = CAN_STATE_STOPPED; + /* Wake up waiting senders */ + rtdm_sem_destroy(&dev->tx_sem); + } else { + ret = -EAGAIN; + /* Enable interrupts again as we did not succeed */ + chip->write_reg(dev, SJA_IER, SJA1000_IER); + } + + out: + return ret; +} + + + +/* + * Set controller into operating mode. + * + * If coming from CAN_STATE_SLEEPING, the controller must wait + * some time to avoid bus errors. Measured on an PHYTEC eNET card, + * this time was 110 microseconds. + */ +static int rtcan_sja_mode_start(struct rtcan_device *dev, + rtdm_lockctx_t *lock_ctx) +{ + int ret = 0; + u8 mod_reg; + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + + /* We won't forget that state in the device structure is volatile and + * access to it will not be optimized by the compiler. So ... */ + + mod_reg = 0; + if (dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY) + mod_reg |= SJA_MOD_LOM; + if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK) + mod_reg |= SJA_MOD_STM; + + switch (dev->state) { + + case CAN_STATE_ACTIVE: + case CAN_STATE_BUS_WARNING: + case CAN_STATE_BUS_PASSIVE: + break; + + case CAN_STATE_STOPPED: + /* Clear error counters */ + chip->write_reg(dev, SJA_RXERR , 0); + chip->write_reg(dev, SJA_TXERR , 0); + /* Clear error code capture (i.e. read it) */ + chip->read_reg(dev, SJA_ECC); + /* Set error active state */ + dev->state = CAN_STATE_ACTIVE; + /* Set up sender "mutex" */ + rtdm_sem_init(&dev->tx_sem, 1); + /* Enable interrupts */ + chip->write_reg(dev, SJA_IER, SJA1000_IER); + + /* Clear reset mode bit in SJA1000 */ + chip->write_reg(dev, SJA_MOD, mod_reg); + + break; + + case CAN_STATE_SLEEPING: + /* Trigger Wake-up interrupt */ + chip->write_reg(dev, SJA_MOD, mod_reg); + + /* Ok, coming from sleep mode is problematic. We have to wait + * for the SJA1000 to get on both feet again. */ + rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx); + rtdm_task_busy_sleep(110000); + rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx); + + /* Meanwhile, the Wake-up interrupt was serviced and has set the + * right state. As we don't want to set it back jump out. */ + goto out; + + break; + + case CAN_STATE_BUS_OFF: + /* Trigger bus-off recovery */ + chip->write_reg(dev, SJA_MOD, mod_reg); + /* Set up sender "mutex" */ + rtdm_sem_init(&dev->tx_sem, 1); + /* Set error active state */ + dev->state = CAN_STATE_ACTIVE; + + break; + + default: + /* Never reached, but we don't want nasty compiler warnings ... */ + break; + } + + out: + return ret; +} + +can_state_t rtcan_sja_get_state(struct rtcan_device *dev) +{ + can_state_t state = dev->state; + rtcan_sja_is_operating(dev, &state); + return state; +} + +int rtcan_sja_set_mode(struct rtcan_device *dev, + can_mode_t mode, + rtdm_lockctx_t *lock_ctx) +{ + int ret = 0; + can_state_t state; + struct rtcan_sja1000 *chip = (struct rtcan_sja1000*)dev->priv; + + switch (mode) { + + case CAN_MODE_STOP: + ret = rtcan_sja_mode_stop(dev, lock_ctx); + break; + + case CAN_MODE_START: + ret = rtcan_sja_mode_start(dev, lock_ctx); + break; + + case CAN_MODE_SLEEP: + + state = dev->state; + + /* Controller must operate, otherwise go out */ + if (!rtcan_sja_is_operating(dev, &state)) { + ret = -ENETDOWN; + goto mode_sleep_out; + } + + /* Is controller sleeping yet? If yes, go out */ + if (state == CAN_STATE_SLEEPING) + goto mode_sleep_out; + + /* Remember into which state to return when we + * wake up */ + dev->state_before_sleep = state; + + /* Let's take a nap. (Now I REALLY understand + * the meaning of interrupts ...) */ + state = CAN_STATE_SLEEPING; + chip->write_reg(dev, SJA_MOD, + chip->read_reg(dev, SJA_MOD) | SJA_MOD_SM); + + mode_sleep_out: + dev->state = state; + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +int rtcan_sja_set_bit_time(struct rtcan_device *dev, + struct can_bittime *bit_time, + rtdm_lockctx_t *lock_ctx) +{ + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + u8 btr0, btr1; + + switch (bit_time->type) { + case CAN_BITTIME_BTR: + btr0 = bit_time->btr.btr0; + btr1 = bit_time->btr.btr1; + break; + + case CAN_BITTIME_STD: + btr0 = (BTR0_SET_BRP(bit_time->std.brp) | + BTR0_SET_SJW(bit_time->std.sjw)); + btr1 = (BTR1_SET_TSEG1(bit_time->std.prop_seg + + bit_time->std.phase_seg1) | + BTR1_SET_TSEG2(bit_time->std.phase_seg2) | + BTR1_SET_SAM(bit_time->std.sam)); + + break; + + default: + return -EINVAL; + } + + printk("%s: btr0=%#x btr1=%#x\n", __func__, btr0, btr1); + chip->write_reg(dev, SJA_BTR0, btr0); + chip->write_reg(dev, SJA_BTR1, btr1); + + return 0; +} + +void rtcan_sja_enable_bus_err(struct rtcan_device *dev) +{ + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + + if (chip->bus_err_on < 2) { + if (chip->bus_err_on < 1) + chip->read_reg(dev, SJA_ECC); + chip->bus_err_on = 2; + } +} + +/* + * Start a transmission to a SJA1000 device + */ +static int rtcan_sja_start_xmit(struct rtcan_device *dev, + can_frame_t *frame) +{ + int i; + /* "Real" size of the payload */ + u8 size; + /* Content of frame information register */ + u8 fir; + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + + /* Get DLC */ + fir = frame->can_dlc; + + /* If DLC exceeds 8 bytes adjust it to 8 (for the payload) */ + size = (fir > 8) ? 8 : fir; + + + if (frame->can_id & CAN_EFF_FLAG) { + /* Send extended frame */ + fir |= SJA_FIR_EFF; + + /* Write ID */ + chip->write_reg(dev, SJA_ID1, frame->can_id >> 21); + chip->write_reg(dev, SJA_ID2, frame->can_id >> 13); + chip->write_reg(dev, SJA_ID3, frame->can_id >> 5); + chip->write_reg(dev, SJA_ID4, frame->can_id << 3); + + /* RTR? */ + if (frame->can_id & CAN_RTR_FLAG) + fir |= SJA_FIR_RTR; + + else { + /* No RTR, write data bytes */ + for (i = 0; i < size; i++) + chip->write_reg(dev, SJA_DATA_EFF(i), + frame->data[i]); + } + + } else { + /* Send standard frame */ + + /* Write ID */ + chip->write_reg(dev, SJA_ID1, frame->can_id >> 3); + chip->write_reg(dev, SJA_ID2, frame->can_id << 5); + + /* RTR? */ + if (frame->can_id & CAN_RTR_FLAG) + fir |= SJA_FIR_RTR; + + else { + /* No RTR, write data bytes */ + for (i = 0; i < size; i++) + chip->write_reg(dev, SJA_DATA_SFF(i), + frame->data[i]); + } + } + + + /* Write frame information register */ + chip->write_reg(dev, SJA_FIR, fir); + + /* Push the 'send' button */ + if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK) + chip->write_reg(dev, SJA_CMR, SJA_CMR_SRR); + else + chip->write_reg(dev, SJA_CMR, SJA_CMR_TR); + + return 0; +} + + + +/* + * SJA1000 chip configuration + */ +static void sja1000_chip_config(struct rtcan_device *dev) +{ + struct rtcan_sja1000 *chip = (struct rtcan_sja1000* )dev->priv; + + chip->write_reg(dev, SJA_CDR, chip->cdr); + chip->write_reg(dev, SJA_OCR, chip->ocr); + + chip->write_reg(dev, SJA_AMR0, 0xFF); + chip->write_reg(dev, SJA_AMR1, 0xFF); + chip->write_reg(dev, SJA_AMR2, 0xFF); + chip->write_reg(dev, SJA_AMR3, 0xFF); +} + + +int rtcan_sja1000_register(struct rtcan_device *dev) +{ + int ret; + struct rtcan_sja1000 *chip = dev->priv; + + if (chip == NULL) + return -EINVAL; + + /* Set dummy state for following call */ + dev->state = CAN_STATE_ACTIVE; + /* Enter reset mode */ + rtcan_sja_mode_stop(dev, NULL); + + if ((chip->read_reg(dev, SJA_SR) & + (SJA_SR_RBS | SJA_SR_DOS | SJA_SR_TBS)) != SJA_SR_TBS) { + printk("ERROR! No SJA1000 device found!\n"); + return -ENODEV; + } + + dev->ctrl_name = sja_ctrl_name; + + dev->hard_start_xmit = rtcan_sja_start_xmit; + dev->do_set_mode = rtcan_sja_set_mode; + dev->do_get_state = rtcan_sja_get_state; + dev->do_set_bit_time = rtcan_sja_set_bit_time; + dev->do_enable_bus_err = rtcan_sja_enable_bus_err; +#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD + dev->bittiming_const = &sja1000_bittiming_const; +#endif + + chip->bus_err_on = 1; + + ret = rtdm_irq_request(&dev->irq_handle, + chip->irq_num, rtcan_sja_interrupt, + chip->irq_flags, sja_ctrl_name, dev); + if (ret) { + printk(KERN_ERR "ERROR %d: IRQ %d is %s!\n", + ret, chip->irq_num, ret == -EBUSY ? + "busy, check shared interrupt support" : "invalid"); + return ret; + } + + sja1000_chip_config(dev); + + /* Register RTDM device */ + ret = rtcan_dev_register(dev); + if (ret) { + printk(KERN_ERR + "ERROR %d while trying to register RTCAN device!\n", ret); + goto out_irq_free; + } + + rtcan_sja_create_proc(dev); + + return 0; + + out_irq_free: + rtdm_irq_free(&dev->irq_handle); + + return ret; +} + + +/* Cleanup module */ +void rtcan_sja1000_unregister(struct rtcan_device *dev) +{ + printk("Unregistering SJA1000 device %s\n", dev->name); + + rtdm_irq_disable(&dev->irq_handle); + rtcan_sja_mode_stop(dev, NULL); + rtdm_irq_free(&dev->irq_handle); + rtcan_sja_remove_proc(dev); + rtcan_dev_unregister(dev); +} + +int __init rtcan_sja_init(void) +{ + if (!rtdm_available()) + return -ENOSYS; + + printk("RTCAN SJA1000 driver initialized\n"); + return 0; +} + + +void __exit rtcan_sja_exit(void) +{ + printk("%s removed\n", sja_ctrl_name); +} + +module_init(rtcan_sja_init); +module_exit(rtcan_sja_exit); + +EXPORT_SYMBOL_GPL(rtcan_sja1000_register); +EXPORT_SYMBOL_GPL(rtcan_sja1000_unregister); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h new file mode 100644 index 0000000..84eb41b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2006, Wolfgang Grandegger <wg@grandegger.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __SJA1000_H_ +#define __SJA1000_H_ + +#include <rtcan_dev.h> + +struct rtcan_sja1000 { + unsigned char (*read_reg)(struct rtcan_device *dev, int off); + void (*write_reg)(struct rtcan_device *dev, int off, unsigned char val); + void (*irq_ack)(struct rtcan_device *dev); + unsigned short irq_num; + unsigned short irq_flags; + unsigned char ocr; + unsigned char cdr; + char bus_err_on; +}; + +#ifdef CONFIG_FS_PROCFS +int rtcan_sja_create_proc(struct rtcan_device* dev); +void rtcan_sja_remove_proc(struct rtcan_device* dev); +#else +static inline int rtcan_sja_create_proc(struct rtcan_device* dev) +{ return 0; } +static inline void rtcan_sja_remove_proc(struct rtcan_device* dev) { } +#endif +int rtcan_sja1000_register(struct rtcan_device *dev); +void rtcan_sja1000_unregister(struct rtcan_device *dev); + + +#endif /* __SJA1000_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c new file mode 100644 index 0000000..57fd807 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/delay.h> + +#include <rtdm/driver.h> + +#include <rtcan_dev.h> +#include <rtcan_internal.h> +#include <rtcan_sja1000.h> + +#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG + +static int rtcan_sja_proc_regs(struct seq_file *p, void *data) +{ + struct rtcan_device *dev = (struct rtcan_device *)data; + struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv; + int i; + + seq_printf(p, "SJA1000 registers"); + for (i = 0; i < 0x20; i++) { + if ((i % 0x10) == 0) + seq_printf(p, "\n%02x:", i); + seq_printf(p, " %02x", chip->read_reg(dev, i)); + } + seq_printf(p, "\n"); + return 0; +} + +static int rtcan_sja_proc_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, rtcan_sja_proc_regs, pde_data(inode)); +} + +static const DEFINE_PROC_OPS(rtcan_sja_proc_regs_ops, + rtcan_sja_proc_regs_open, + single_release, + seq_read, + NULL); + +int rtcan_sja_create_proc(struct rtcan_device* dev) +{ + if (!dev->proc_root) + return -EINVAL; + + proc_create_data("registers", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root, + &rtcan_sja_proc_regs_ops, dev); + return 0; +} + +void rtcan_sja_remove_proc(struct rtcan_device* dev) +{ + if (!dev->proc_root) + return; + + remove_proc_entry("registers", dev->proc_root); +} + +#else /* !CONFIG_XENO_DRIVERS_CAN_DEBUG */ + +void rtcan_sja_remove_proc(struct rtcan_device* dev) +{ +} + +int rtcan_sja_create_proc(struct rtcan_device* dev) +{ + return 0; +} +#endif /* CONFIG_XENO_DRIVERS_CAN_DEBUG */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h new file mode 100644 index 0000000..9f2f871 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2005,2006 Sebastian Smolorz + * <Sebastian.Smolorz@stud.uni-hannover.de> + * + * Based on drivers/can/sja1000.h in linux-can.patch, a CAN socket + * framework for Linux: + * + * Copyright (C) 2005, Sascha Hauer, Pengutronix + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __SJA1000_REGS_H_ +#define __SJA1000_REGS_H_ + + +/* PeliCAN mode address map */ + +/* reset and operating mode */ +#define SJA_MOD 0 /* Mode register */ +#define SJA_CMR 1 /* Command register */ +#define SJA_SR 2 /* Status register */ +#define SJA_IR 3 /* Interrupt register */ +#define SJA_IER 4 /* Interrupt enable register */ +#define SJA_BTR0 6 /* Bus timing register 0 */ +#define SJA_BTR1 7 /* Bus timing register 1 */ +#define SJA_OCR 8 /* Output control register */ +#define SJA_ALC 11 /* Arbitration lost capture */ +#define SJA_ECC 12 /* Error code capture register */ +#define SJA_RXERR 14 /* Receive error counter */ +#define SJA_TXERR 15 /* Transmit error counter */ +#define SJA_CDR 31 /* Clock divider register */ + +/* reset mode */ +#define SJA_ACR0 16 /* Acceptance code register 0 */ +#define SJA_ACR1 17 /* Acceptance code register 1 */ +#define SJA_ACR2 18 /* Acceptance code register 2 */ +#define SJA_ACR3 19 /* Acceptance code register 3 */ +#define SJA_AMR0 20 /* Acceptance mask register 0 */ +#define SJA_AMR1 21 /* Acceptance mask register 1 */ +#define SJA_AMR2 22 /* Acceptance mask register 2 */ +#define SJA_AMR3 23 /* Acceptance mask register 3 */ + +/* operating mode */ +#define SJA_FIR 16 /* Frame information register */ +#define SJA_ID1 17 /* Identifier 1 */ +#define SJA_ID2 18 /* Identifier 2 */ +#define SJA_ID3 19 /* Identifier 3 (EFF only) */ +#define SJA_ID4 20 /* Identifier 4 (EFF only) */ + +#define SJA_DATA_SFF(x) (19 + (x)) /* Data registers in case of standard + * frame format; 0 <= x <= 7 */ +#define SJA_DATA_EFF(x) (21 + (x)) /* Data registers in case of extended + * frame format; 0 <= x <= 7 */ + +/* Mode register */ +enum SJA1000_PELI_MOD { + SJA_MOD_RM = 1, /* Reset Mode */ + SJA_MOD_LOM = 1<<1, /* Listen Only Mode */ + SJA_MOD_STM = 1<<2, /* Self Test Mode */ + SJA_MOD_AFM = 1<<3, /* Acceptance Filter Mode */ + SJA_MOD_SM = 1<<4 /* Sleep Mode */ +}; + +/* Command register */ +enum SJA1000_PELI_CMR { + SJA_CMR_TR = 1, /* Transmission request */ + SJA_CMR_AT = 1<<1, /* Abort Transmission */ + SJA_CMR_RRB = 1<<2, /* Release Receive Buffer */ + SJA_CMR_CDO = 1<<3, /* Clear Data Overrun */ + SJA_CMR_SRR = 1<<4 /* Self reception request */ +}; + +/* Status register */ +enum SJA1000_PELI_SR { + SJA_SR_RBS = 1, /* Receive Buffer Status */ + SJA_SR_DOS = 1<<1, /* Data Overrun Status */ + SJA_SR_TBS = 1<<2, /* Transmit Buffer Status */ + SJA_SR_ES = 1<<6, /* Error Status */ + SJA_SR_BS = 1<<7 /* Bus Status */ +}; + +/* Interrupt register */ +enum SJA1000_PELI_IR { + SJA_IR_RI = 1, /* Receive Interrupt */ + SJA_IR_TI = 1<<1, /* Transmit Interrupt */ + SJA_IR_EI = 1<<2, /* Error Warning Interrupt */ + SJA_IR_DOI = 1<<3, /* Data Overrun Interrupt */ + SJA_IR_WUI = 1<<4, /* Wake-Up Interrupt */ + SJA_IR_EPI = 1<<5, /* Error Passive Interrupt */ + SJA_IR_ALI = 1<<6, /* Arbitration Lost Interrupt */ + SJA_IR_BEI = 1<<7, /* Bus Error Interrupt */ +}; + +/* Interrupt enable register */ +enum SJA1000_PELI_IER { + SJA_IER_RIE = 1, /* Receive Interrupt Enable */ + SJA_IER_TIE = 1<<1, /* Transmit Interrupt Enable */ + SJA_IER_EIE = 1<<2, /* Error Warning Interrupt Enable */ + SJA_IER_DOIE = 1<<3, /* Data Overrun Interrupt Enable */ + SJA_IER_WUIE = 1<<4, /* Wake-Up Interrupt Enable */ + SJA_IER_EPIE = 1<<5, /* Error Passive Interrupt Enable */ + SJA_IER_ALIE = 1<<6, /* Arbitration Lost Interrupt Enable */ + SJA_IER_BEIE = 1<<7, /* Bus Error Interrupt Enable */ +}; + +/* Bus timing register 0 */ +enum SJA1000_PELI_BTR0 { + /* Period of the CAN system clock t_SCl + * (t_CLK = time period of XTAL frequency) */ + SJA_BTR0_T_SCL_2_T_CLK = 0, /* t_SCl = 2 x t_CLK */ + SJA_BTR0_T_SCL_4_T_CLK = 1, /* t_SCl = 4 x t_CLK */ + SJA_BTR0_T_SCL_6_T_CLK = 2, /* t_SCl = 6 x t_CLK */ + SJA_BTR0_T_SCL_8_T_CLK = 3, /* t_SCl = 8 x t_CLK */ + SJA_BTR0_T_SCL_10_T_CLK = 4, /* t_SCl = 10 x t_CLK */ + SJA_BTR0_T_SCL_12_T_CLK = 5, /* t_SCl = 12 x t_CLK */ + SJA_BTR0_T_SCL_14_T_CLK = 6, /* t_SCl = 14 x t_CLK */ + SJA_BTR0_T_SCL_16_T_CLK = 7, /* t_SCl = 16 x t_CLK */ + SJA_BTR0_T_SCL_20_T_CLK = 9, /* t_SCl = 20 x t_CLK */ + SJA_BTR0_T_SCL_40_T_CLK = 19, /* t_SCl = 40 x t_CLK */ + SJA_BTR0_T_SCL_100_T_CLK = 49, /* t_SCl = 100 x t_CLK */ + +}; + +/* Bus timing register 1 */ +enum SJA1000_PELI_BTR1 { + /* Time segment 1 */ + SJA_BTR1_T_SEG1_1_T_SCL = 0, /* t_SEG1 = 1 x t_SCl */ + SJA_BTR1_T_SEG1_2_T_SCL = 1, /* t_SEG1 = 2 x t_SCl */ + SJA_BTR1_T_SEG1_3_T_SCL = 2, /* t_SEG1 = 3 x t_SCl */ + SJA_BTR1_T_SEG1_4_T_SCL = 3, /* t_SEG1 = 4 x t_SCl */ + SJA_BTR1_T_SEG1_5_T_SCL = 4, /* t_SEG1 = 5 x t_SCl */ + SJA_BTR1_T_SEG1_6_T_SCL = 5, /* t_SEG1 = 6 x t_SCl */ + SJA_BTR1_T_SEG1_7_T_SCL = 6, /* t_SEG1 = 7 x t_SCl */ + SJA_BTR1_T_SEG1_8_T_SCL = 7, /* t_SEG1 = 8 x t_SCl */ + /* Time segment 2 */ + SJA_BTR1_T_SEG2_1_T_SCL = 0<<4, /* t_SEG2 = 1 x t_SCl */ + SJA_BTR1_T_SEG2_2_T_SCL = 1<<4, /* t_SEG2 = 2 x t_SCl */ + SJA_BTR1_T_SEG2_3_T_SCL = 2<<4, /* t_SEG2 = 3 x t_SCl */ + SJA_BTR1_T_SEG2_4_T_SCL = 3<<4, /* t_SEG2 = 4 x t_SCl */ + SJA_BTR1_T_SEG2_5_T_SCL = 4<<4, /* t_SEG2 = 5 x t_SCl */ + SJA_BTR1_T_SEG2_6_T_SCL = 5<<4, /* t_SEG2 = 6 x t_SCl */ + SJA_BTR1_T_SEG2_7_T_SCL = 6<<4, /* t_SEG2 = 7 x t_SCl */ + SJA_BTR1_T_SEG2_8_T_SCL = 7<<4, /* t_SEG2 = 8 x t_SCl */ +}; + +/* One bit time = t_SCl + t_SEG1 + t_SEG2 */ + + +/* Output control register */ +enum SJA1000_PELI_OCR { + SJA_OCR_MODE_BIPHASE = 0, + SJA_OCR_MODE_TEST = 1, + SJA_OCR_MODE_NORMAL = 2, + SJA_OCR_MODE_CLOCK = 3, + SJA_OCR_TX0_INVERT = 1<<2, + SJA_OCR_TX0_PULLDOWN = 1<<3, + SJA_OCR_TX0_PULLUP = 2<<3, + SJA_OCR_TX0_PUSHPULL = 3<<3, + SJA_OCR_TX1_INVERT = 1<<5, + SJA_OCR_TX1_PULLDOWN = 1<<6, + SJA_OCR_TX1_PULLUP = 2<<6, + SJA_OCR_TX1_PUSHPULL = 3<<6 +}; + +/* Error code capture register */ +enum SJA1000_PELI_ECC { + /* The segmentation field gives information about the location of + * errors on the bus */ + SJA_ECC_SEG_MASK = 31, /* Segmentation field mask */ + SJA_ECC_DIR = 1<<5, /* Transfer direction */ + SJA_ECC_ERR_BIT = 0<<6, + SJA_ECC_ERR_FORM = 1<<6, + SJA_ECC_ERR_STUFF = 2<<6, + SJA_ECC_ERR_MASK = 3<<6 /* Error code mask */ +}; + +/* Frame information register */ +enum SJA1000_PELI_FIR { + SJA_FIR_DLC_MASK = 15, /* Data length code mask */ + SJA_FIR_RTR = 1<<6, /* Remote transmission request */ + SJA_FIR_EFF = 1<<7 /* Extended frame format */ +}; + +/* Clock divider register */ +enum SJA1000_PELI_CDR { + SJA_CDR_CLKOUT_MASK = 0x07, + SJA_CDR_CLK_OFF = 1<<3, /* Clock off (CLKOUT pin) */ + SJA_CDR_CBP = 1<<6, /* CAN input comparator bypass */ + SJA_CDR_CAN_MODE = 1<<7 /* CAN mode: 1 = PeliCAN */ +}; + +#endif /* __SJA1000_REGS_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig new file mode 100644 index 0000000..c257444 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig @@ -0,0 +1,72 @@ +menu "Real-time GPIO drivers" + +config XENO_DRIVERS_GPIO + bool "GPIO controller" + depends on GPIOLIB + help + + Real-time capable GPIO module. + +if XENO_DRIVERS_GPIO + +config XENO_DRIVERS_GPIO_BCM2835 + depends on MACH_BCM2708 || ARCH_BCM2835 + tristate "Support for BCM2835 GPIOs" + help + + Enables support for the GPIO controller available from + Broadcom's BCM2835 SoC. + +config XENO_DRIVERS_GPIO_MXC + depends on GPIO_MXC + tristate "Support for MXC GPIOs" + help + + Suitable for the GPIO controller available from + Freescale/NXP's MXC architecture. + +config XENO_DRIVERS_GPIO_SUN8I_H3 + depends on MACH_SUN8I && PINCTRL_SUN8I_H3 + tristate "Support for SUN8I H3 GPIOs" + help + + Suitable for the GPIO controller available from Allwinner's H3 + SoC, as found on the NanoPI boards. + +config XENO_DRIVERS_GPIO_ZYNQ7000 + depends on ARCH_ZYNQ || ARCH_ZYNQMP + tristate "Support for Zynq7000 GPIOs" + help + + Enables support for the GPIO controller available from + Xilinx's Zynq7000 SoC. + +config XENO_DRIVERS_GPIO_XILINX + depends on ARCH_ZYNQ || ARCH_ZYNQMP + tristate "Support for Xilinx GPIOs" + help + + Enables support for the GPIO controller available from + Xilinx's softcore IP. + +config XENO_DRIVERS_GPIO_OMAP + depends on ARCH_OMAP2PLUS || ARCH_OMAP + tristate "Support for OMAP GPIOs" + help + + Enables support for the GPIO controller available from + OMAP family SOC. + +config XENO_DRIVERS_GPIO_CHERRYVIEW + depends on PINCTRL_CHERRYVIEW + tristate "Support for Cherryview GPIOs" + help + + Enables support for the Intel Cherryview GPIO controller + +config XENO_DRIVERS_GPIO_DEBUG + bool "Enable GPIO core debugging features" + +endif + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile new file mode 100644 index 0000000..e534eab --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile @@ -0,0 +1,18 @@ +ccflags-$(CONFIG_XENO_DRIVERS_GPIO_DEBUG) := -DDEBUG + +obj-$(CONFIG_XENO_DRIVERS_GPIO_BCM2835) += xeno-gpio-bcm2835.o +obj-$(CONFIG_XENO_DRIVERS_GPIO_MXC) += xeno-gpio-mxc.o +obj-$(CONFIG_XENO_DRIVERS_GPIO_SUN8I_H3) += xeno-gpio-sun8i-h3.o +obj-$(CONFIG_XENO_DRIVERS_GPIO_ZYNQ7000) += xeno-gpio-zynq7000.o +obj-$(CONFIG_XENO_DRIVERS_GPIO_XILINX) += xeno-gpio-xilinx.o +obj-$(CONFIG_XENO_DRIVERS_GPIO_OMAP) += xeno-gpio-omap.o +obj-$(CONFIG_XENO_DRIVERS_GPIO_CHERRYVIEW) += xeno-gpio-cherryview.o +obj-$(CONFIG_XENO_DRIVERS_GPIO) += gpio-core.o + +xeno-gpio-bcm2835-y := gpio-bcm2835.o +xeno-gpio-mxc-y := gpio-mxc.o +xeno-gpio-sun8i-h3-y := gpio-sun8i-h3.o +xeno-gpio-zynq7000-y := gpio-zynq7000.o +xeno-gpio-xilinx-y := gpio-xilinx.o +xeno-gpio-omap-y := gpio-omap.o +xeno-gpio-cherryview-y := gpio-cherryview.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c new file mode 100644 index 0000000..c379e6c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c @@ -0,0 +1,37 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_BCM2835 1 + +static int __init bcm2835_gpio_init(void) +{ + return rtdm_gpiochip_scan_of(NULL, "brcm,bcm2835-gpio", + RTDM_SUBCLASS_BCM2835); +} +module_init(bcm2835_gpio_init); + +static void __exit bcm2835_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_BCM2835); +} +module_exit(bcm2835_gpio_exit); + +MODULE_LICENSE("GPL"); + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c new file mode 100644 index 0000000..1234a3e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * @note Copyright (C) 2021 Hongzhan Chen <hongzhan.chen@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_CHERRYVIEW 7 + +static const char *label_array[] = { + "INT33FF:00", + "INT33FF:01", + "INT33FF:02", + "INT33FF:03", +}; + +static int __init cherryview_gpio_init(void) +{ + return rtdm_gpiochip_array_find(NULL, label_array, + ARRAY_SIZE(label_array), + RTDM_SUBCLASS_CHERRYVIEW); +} +module_init(cherryview_gpio_init); + +static void __exit cherryview_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_CHERRYVIEW); +} +module_exit(cherryview_gpio_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c new file mode 100644 index 0000000..f67a5bf --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c @@ -0,0 +1,691 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/gpio.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <rtdm/gpio.h> + +struct rtdm_gpio_chan { + int requested : 1, + has_direction : 1, + is_output : 1, + is_interrupt : 1, + want_timestamp : 1; +}; + +static LIST_HEAD(rtdm_gpio_chips); + +static DEFINE_MUTEX(chip_lock); + +static int gpio_pin_interrupt(rtdm_irq_t *irqh) +{ + struct rtdm_gpio_pin *pin; + + pin = rtdm_irq_get_arg(irqh, struct rtdm_gpio_pin); + + if (pin->monotonic_timestamp) + pin->timestamp = rtdm_clock_read_monotonic(); + else + pin->timestamp = rtdm_clock_read(); + rtdm_event_signal(&pin->event); + + return RTDM_IRQ_HANDLED; +} + +static int request_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin, + struct rtdm_gpio_chan *chan, + int trigger) +{ + int ret, irq_trigger, irq; + + if (trigger & ~GPIO_TRIGGER_MASK) + return -EINVAL; + + if (!chan->requested) { + ret = gpio_request(gpio, pin->name); + if (ret) { + if (ret != -EPROBE_DEFER) + printk(XENO_ERR + "can not request GPIO%d\n", gpio); + return ret; + } + chan->requested = true; + } + + ret = gpio_direction_input(gpio); + if (ret) { + printk(XENO_ERR "cannot set GPIO%d as input\n", gpio); + goto fail; + } + + chan->has_direction = true; + gpio_export(gpio, true); + + rtdm_event_clear(&pin->event); + + /* + * Attempt to hook the interrupt associated to that pin. We + * might fail getting a valid IRQ number, in case the GPIO + * chip did not define any mapping handler (->to_irq). If so, + * just assume that either we have no IRQ indeed, or interrupt + * handling may be open coded elsewhere. + */ + irq = gpio_to_irq(gpio); + if (irq < 0) + goto done; + + irq_trigger = 0; + if (trigger & GPIO_TRIGGER_EDGE_RISING) + irq_trigger |= IRQ_TYPE_EDGE_RISING; + if (trigger & GPIO_TRIGGER_EDGE_FALLING) + irq_trigger |= IRQ_TYPE_EDGE_FALLING; + if (trigger & GPIO_TRIGGER_LEVEL_HIGH) + irq_trigger |= IRQ_TYPE_LEVEL_HIGH; + if (trigger & GPIO_TRIGGER_LEVEL_LOW) + irq_trigger |= IRQ_TYPE_LEVEL_LOW; + + if (irq_trigger) + irq_set_irq_type(irq, irq_trigger); + + ret = rtdm_irq_request(&pin->irqh, irq, gpio_pin_interrupt, + 0, pin->name, pin); + if (ret) { + printk(XENO_ERR "cannot request GPIO%d interrupt\n", gpio); + goto fail; + } + + + rtdm_irq_enable(&pin->irqh); +done: + chan->is_interrupt = true; + + return 0; +fail: + gpio_free(gpio); + chan->requested = false; + + return ret; +} + +static void release_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin, + struct rtdm_gpio_chan *chan) +{ + if (chan->is_interrupt) { + rtdm_irq_free(&pin->irqh); + chan->is_interrupt = false; + } + gpio_free(gpio); + chan->requested = false; +} + +static int gpio_pin_ioctl_nrt(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd); + struct rtdm_device *dev = rtdm_fd_device(fd); + unsigned int gpio = rtdm_fd_minor(fd); + int ret = 0, val, trigger; + struct rtdm_gpio_pin *pin; + + pin = container_of(dev, struct rtdm_gpio_pin, dev); + + switch (request) { + case GPIO_RTIOC_DIR_OUT: + ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val)); + if (ret) + return ret; + ret = gpio_direction_output(gpio, val); + if (ret == 0) { + chan->has_direction = true; + chan->is_output = true; + } + break; + case GPIO_RTIOC_DIR_IN: + ret = gpio_direction_input(gpio); + if (ret == 0) + chan->has_direction = true; + break; + case GPIO_RTIOC_IRQEN: + if (chan->is_interrupt) { + return -EBUSY; + } + ret = rtdm_safe_copy_from_user(fd, &trigger, + arg, sizeof(trigger)); + if (ret) + return ret; + ret = request_gpio_irq(gpio, pin, chan, trigger); + break; + case GPIO_RTIOC_IRQDIS: + if (chan->is_interrupt) { + release_gpio_irq(gpio, pin, chan); + chan->requested = false; + chan->is_interrupt = false; + } + break; + case GPIO_RTIOC_REQS: + ret = gpio_request(gpio, pin->name); + if (ret) + return ret; + else + chan->requested = true; + break; + case GPIO_RTIOC_RELS: + gpio_free(gpio); + chan->requested = false; + break; + case GPIO_RTIOC_TS_MONO: + case GPIO_RTIOC_TS_REAL: + ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val)); + if (ret) + return ret; + chan->want_timestamp = !!val; + pin->monotonic_timestamp = request == GPIO_RTIOC_TS_MONO; + break; + default: + return -EINVAL; + } + + return ret; +} + +static ssize_t gpio_pin_read_rt(struct rtdm_fd *fd, + void __user *buf, size_t len) +{ + struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd); + struct rtdm_device *dev = rtdm_fd_device(fd); + struct rtdm_gpio_readout rdo; + struct rtdm_gpio_pin *pin; + int ret; + + if (!chan->has_direction) + return -EAGAIN; + + if (chan->is_output) + return -EINVAL; + + pin = container_of(dev, struct rtdm_gpio_pin, dev); + + if (chan->want_timestamp) { + if (len < sizeof(rdo)) + return -EINVAL; + + if (!(fd->oflags & O_NONBLOCK)) { + ret = rtdm_event_wait(&pin->event); + if (ret) + return ret; + rdo.timestamp = pin->timestamp; + } else if (pin->monotonic_timestamp) { + rdo.timestamp = rtdm_clock_read_monotonic(); + } else { + rdo.timestamp = rtdm_clock_read(); + } + + len = sizeof(rdo); + rdo.value = gpiod_get_raw_value(pin->desc); + ret = rtdm_safe_copy_to_user(fd, buf, &rdo, len); + } else { + if (len < sizeof(rdo.value)) + return -EINVAL; + + if (!(fd->oflags & O_NONBLOCK)) { + ret = rtdm_event_wait(&pin->event); + if (ret) + return ret; + } + + len = sizeof(rdo.value); + rdo.value = gpiod_get_raw_value(pin->desc); + ret = rtdm_safe_copy_to_user(fd, buf, &rdo.value, len); + } + + return ret ?: len; +} + +static ssize_t gpio_pin_write_rt(struct rtdm_fd *fd, + const void __user *buf, size_t len) +{ + struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd); + struct rtdm_device *dev = rtdm_fd_device(fd); + struct rtdm_gpio_pin *pin; + int value, ret; + + if (len < sizeof(value)) + return -EINVAL; + + if (!chan->has_direction) + return -EAGAIN; + + if (!chan->is_output) + return -EINVAL; + + ret = rtdm_safe_copy_from_user(fd, &value, buf, sizeof(value)); + if (ret) + return ret; + + pin = container_of(dev, struct rtdm_gpio_pin, dev); + gpiod_set_raw_value(pin->desc, value); + + return sizeof(value); +} + +static int gpio_pin_select(struct rtdm_fd *fd, struct xnselector *selector, + unsigned int type, unsigned int index) +{ + struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd); + struct rtdm_device *dev = rtdm_fd_device(fd); + struct rtdm_gpio_pin *pin; + + if (!chan->has_direction) + return -EAGAIN; + + if (chan->is_output) + return -EINVAL; + + pin = container_of(dev, struct rtdm_gpio_pin, dev); + + return rtdm_event_select(&pin->event, selector, type, index); +} + +int gpio_pin_open(struct rtdm_fd *fd, int oflags) +{ + struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd); + struct rtdm_device *dev = rtdm_fd_device(fd); + unsigned int gpio = rtdm_fd_minor(fd); + int ret = 0; + struct rtdm_gpio_pin *pin; + + pin = container_of(dev, struct rtdm_gpio_pin, dev); + ret = gpio_request(gpio, pin->name); + if (ret) { + printk(XENO_ERR "failed to request pin %d : %d\n", gpio, ret); + return ret; + } else { + chan->requested = true; + } + + return 0; +} + +static void gpio_pin_close(struct rtdm_fd *fd) +{ + struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd); + struct rtdm_device *dev = rtdm_fd_device(fd); + unsigned int gpio = rtdm_fd_minor(fd); + struct rtdm_gpio_pin *pin; + + if (chan->requested) { + pin = container_of(dev, struct rtdm_gpio_pin, dev); + release_gpio_irq(gpio, pin, chan); + } +} + +static void delete_pin_devices(struct rtdm_gpio_chip *rgc) +{ + struct rtdm_gpio_pin *pin; + struct rtdm_device *dev; + int offset; + + for (offset = 0; offset < rgc->gc->ngpio; offset++) { + pin = rgc->pins + offset; + dev = &pin->dev; + rtdm_dev_unregister(dev); + rtdm_event_destroy(&pin->event); + kfree(dev->label); + kfree(pin->name); + } +} + +static int create_pin_devices(struct rtdm_gpio_chip *rgc) +{ + struct gpio_chip *gc = rgc->gc; + struct rtdm_gpio_pin *pin; + struct rtdm_device *dev; + int offset, ret, gpio; + + for (offset = 0; offset < gc->ngpio; offset++) { + ret = -ENOMEM; + gpio = gc->base + offset; + pin = rgc->pins + offset; + pin->name = kasprintf(GFP_KERNEL, "gpio%d", gpio); + if (pin->name == NULL) + goto fail_name; + pin->desc = gpio_to_desc(gpio); + if (pin->desc == NULL) { + ret = -ENODEV; + goto fail_desc; + } + dev = &pin->dev; + dev->driver = &rgc->driver; + dev->label = kasprintf(GFP_KERNEL, "%s/gpio%%d", gc->label); + if (dev->label == NULL) + goto fail_label; + dev->minor = gpio; + dev->device_data = rgc; + ret = rtdm_dev_register(dev); + if (ret) + goto fail_register; + rtdm_event_init(&pin->event, 0); + } + + return 0; + +fail_register: + kfree(dev->label); +fail_desc: +fail_label: + kfree(pin->name); +fail_name: + delete_pin_devices(rgc); + + return ret; +} + +static char *gpio_pin_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "rtdm/%s/%s", + dev->class->name, + dev_name(dev)); +} + +int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc, + struct gpio_chip *gc, int gpio_subclass) +{ + int ret; + + rgc->devclass = class_create(gc->owner, gc->label); + if (IS_ERR(rgc->devclass)) { + printk(XENO_ERR "cannot create sysfs class\n"); + return PTR_ERR(rgc->devclass); + } + rgc->devclass->devnode = gpio_pin_devnode; + + rgc->driver.profile_info = (struct rtdm_profile_info) + RTDM_PROFILE_INFO(rtdm_gpio_chip, + RTDM_CLASS_GPIO, + gpio_subclass, + 0); + rgc->driver.device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR; + rgc->driver.base_minor = gc->base; + rgc->driver.device_count = gc->ngpio; + rgc->driver.context_size = sizeof(struct rtdm_gpio_chan); + rgc->driver.ops = (struct rtdm_fd_ops){ + .open = gpio_pin_open, + .close = gpio_pin_close, + .ioctl_nrt = gpio_pin_ioctl_nrt, + .read_rt = gpio_pin_read_rt, + .write_rt = gpio_pin_write_rt, + .select = gpio_pin_select, + }; + + rtdm_drv_set_sysclass(&rgc->driver, rgc->devclass); + + rgc->gc = gc; + rtdm_lock_init(&rgc->lock); + + ret = create_pin_devices(rgc); + if (ret) + class_destroy(rgc->devclass); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_add); + +struct rtdm_gpio_chip * +rtdm_gpiochip_alloc(struct gpio_chip *gc, int gpio_subclass) +{ + struct rtdm_gpio_chip *rgc; + size_t asize; + int ret; + + if (gc->ngpio == 0) + return ERR_PTR(-EINVAL); + + asize = sizeof(*rgc) + gc->ngpio * sizeof(struct rtdm_gpio_pin); + rgc = kzalloc(asize, GFP_KERNEL); + if (rgc == NULL) + return ERR_PTR(-ENOMEM); + + ret = rtdm_gpiochip_add(rgc, gc, gpio_subclass); + if (ret) { + kfree(rgc); + return ERR_PTR(ret); + } + + mutex_lock(&chip_lock); + list_add(&rgc->next, &rtdm_gpio_chips); + mutex_unlock(&chip_lock); + + return rgc; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_alloc); + +void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc) +{ + mutex_lock(&chip_lock); + list_del(&rgc->next); + mutex_unlock(&chip_lock); + delete_pin_devices(rgc); + class_destroy(rgc->devclass); +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove); + +int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc, + unsigned int offset) +{ + struct rtdm_gpio_pin *pin; + + if (offset >= rgc->gc->ngpio) + return -EINVAL; + + pin = rgc->pins + offset; + if (pin->monotonic_timestamp) + pin->timestamp = rtdm_clock_read_monotonic(); + else + pin->timestamp = rtdm_clock_read(); + rtdm_event_signal(&pin->event); + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_post_event); + +static int gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + const char *name = data; + + return !strcmp(chip->label, name); +} + +static struct gpio_chip *find_chip_by_name(const char *name) +{ + return gpiochip_find((void *)name, gpiochip_match_name); +} + +int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc, + const char *label, int gpio_subclass) +{ + struct gpio_chip *gc = find_chip_by_name(label); + + if (gc == NULL) + return -EPROBE_DEFER; + + return rtdm_gpiochip_add(rgc, gc, gpio_subclass); +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_add_by_name); + +int rtdm_gpiochip_find(struct device_node *from, const char *label, int type) +{ + struct rtdm_gpio_chip *rgc; + struct gpio_chip *chip; + int ret = -ENODEV; + + if (!rtdm_available()) + return -ENOSYS; + + chip = find_chip_by_name(label); + if (chip == NULL) + return ret; + + ret = 0; + rgc = rtdm_gpiochip_alloc(chip, type); + if (IS_ERR(rgc)) + ret = PTR_ERR(rgc); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_find); + +int rtdm_gpiochip_array_find(struct device_node *from, const char *label[], + int nentries, int type) +{ + int ret = -ENODEV, _ret, n; + + for (n = 0; n < nentries; n++) { + _ret = rtdm_gpiochip_find(from, label[n], type); + if (_ret) { + if (_ret != -ENODEV) + return _ret; + } else + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_array_find); + +#ifdef CONFIG_OF + +#include <linux/of_platform.h> + +struct gpiochip_holder { + struct gpio_chip *chip; + struct list_head next; +}; + +struct gpiochip_match_data { + struct device *parent; + struct list_head list; +}; + +static int match_gpio_chip(struct gpio_chip *gc, void *data) +{ + struct gpiochip_match_data *d = data; + struct gpiochip_holder *h; + + if (cobalt_gpiochip_dev(gc) == d->parent) { + h = kmalloc(sizeof(*h), GFP_KERNEL); + if (h) { + h->chip = gc; + list_add(&h->next, &d->list); + } + } + + /* + * Iterate over all existing GPIO chips, we may have several + * hosted by the same pin controller mapping different ranges. + */ + return 0; +} + +int rtdm_gpiochip_scan_of(struct device_node *from, const char *compat, + int type) +{ + struct gpiochip_match_data match; + struct gpiochip_holder *h, *n; + struct device_node *np = from; + struct platform_device *pdev; + struct rtdm_gpio_chip *rgc; + int ret = -ENODEV, _ret; + + if (!rtdm_available()) + return -ENOSYS; + + for (;;) { + np = of_find_compatible_node(np, NULL, compat); + if (np == NULL) + break; + pdev = of_find_device_by_node(np); + of_node_put(np); + if (pdev == NULL) + break; + match.parent = &pdev->dev; + INIT_LIST_HEAD(&match.list); + gpiochip_find(&match, match_gpio_chip); + if (!list_empty(&match.list)) { + ret = 0; + list_for_each_entry_safe(h, n, &match.list, next) { + list_del(&h->next); + _ret = 0; + rgc = rtdm_gpiochip_alloc(h->chip, type); + if (IS_ERR(rgc)) + _ret = PTR_ERR(rgc); + kfree(h); + if (_ret && !ret) + ret = _ret; + } + if (ret) + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_of); + +int rtdm_gpiochip_scan_array_of(struct device_node *from, + const char *compat[], + int nentries, int type) +{ + int ret = -ENODEV, _ret, n; + + for (n = 0; n < nentries; n++) { + _ret = rtdm_gpiochip_scan_of(from, compat[n], type); + if (_ret) { + if (_ret != -ENODEV) + return _ret; + } else + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_array_of); + +#endif /* CONFIG_OF */ + +void rtdm_gpiochip_remove_by_type(int type) +{ + struct rtdm_gpio_chip *rgc, *n; + + mutex_lock(&chip_lock); + + list_for_each_entry_safe(rgc, n, &rtdm_gpio_chips, next) { + if (rgc->driver.profile_info.subclass_id == type) { + mutex_unlock(&chip_lock); + rtdm_gpiochip_remove(rgc); + kfree(rgc); + mutex_lock(&chip_lock); + } + } + + mutex_unlock(&chip_lock); +} +EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove_by_type); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c new file mode 100644 index 0000000..ccc41da --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c @@ -0,0 +1,42 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_MXC 2 + +static const char *compat_array[] = { + "fsl,imx6q-gpio", + "fsl,imx7d-gpio", +}; + +static int __init mxc_gpio_init(void) +{ + return rtdm_gpiochip_scan_array_of(NULL, compat_array, + ARRAY_SIZE(compat_array), + RTDM_SUBCLASS_MXC); +} +module_init(mxc_gpio_init); + +static void __exit mxc_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_MXC); +} +module_exit(mxc_gpio_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c new file mode 100644 index 0000000..ea213a3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c @@ -0,0 +1,43 @@ +/** + * @note Copyright (C) 2020 Greg Gallagher <greg@embeddedgreg.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_OMAP 6 + +static const char *compat_array[] = { + "ti,omap4-gpio", + "ti,omap3-gpio", + "ti,omap2-gpio", +}; + +static int __init omap_gpio_init(void) +{ + return rtdm_gpiochip_scan_array_of(NULL, compat_array, + ARRAY_SIZE(compat_array), + RTDM_SUBCLASS_OMAP); +} +module_init(omap_gpio_init); + +static void __exit omap_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_OMAP); +} +module_exit(omap_gpio_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c new file mode 100644 index 0000000..55059ef --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c @@ -0,0 +1,43 @@ +/** + * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_H3 3 + +static int __init h3_gpio_init(void) +{ + int ret; + + ret = rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-pinctrl", + RTDM_SUBCLASS_H3); + if (ret) + return ret; + + return rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-r-pinctrl", + RTDM_SUBCLASS_H3); +} +module_init(h3_gpio_init); + +static void __exit h3_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_H3); +} +module_exit(h3_gpio_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c new file mode 100644 index 0000000..e9ae3b1 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c @@ -0,0 +1,40 @@ +/** + * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com> + * + * This driver controls the gpio that can be located on the PL + * of the Zynq SOC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_XILINX 5 + +static int __init xilinx_gpio_init(void) +{ + return rtdm_gpiochip_scan_of(NULL, "xlnx,xps-gpio-1.00.a", + RTDM_SUBCLASS_XILINX); +} +module_init(xilinx_gpio_init); + +static void __exit xilinx_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_XILINX); +} +module_exit(xilinx_gpio_exit); + +MODULE_LICENSE("GPL"); + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c new file mode 100644 index 0000000..9997a74 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c @@ -0,0 +1,40 @@ +/** + * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com> + * + * This driver is inspired by: + * gpio-bcm2835.c, please see original file for copyright information + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <rtdm/gpio.h> + +#define RTDM_SUBCLASS_ZYNQ7000 4 + +static int __init zynq7000_gpio_init(void) +{ + return rtdm_gpiochip_scan_of(NULL, "xlnx,zynq-gpio-1.0", + RTDM_SUBCLASS_ZYNQ7000); +} +module_init(zynq7000_gpio_init); + +static void __exit zynq7000_gpio_exit(void) +{ + rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_ZYNQ7000); +} +module_exit(zynq7000_gpio_exit); + +MODULE_LICENSE("GPL"); + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig new file mode 100644 index 0000000..532742a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig @@ -0,0 +1,9 @@ +menu "GPIOPWM support" + +config XENO_DRIVERS_GPIOPWM + tristate "GPIOPWM driver" + help + + An RTDM-based GPIO PWM generator driver + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile new file mode 100644 index 0000000..8c9d5be --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile @@ -0,0 +1,5 @@ +ccflags-y += -I$(srctree)/kernel -I$(srctree)/include/xenomai/ + +obj-$(CONFIG_XENO_DRIVERS_GPIOPWM) += xeno_gpiopwm.o + +xeno_gpiopwm-y := gpiopwm.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c new file mode 100644 index 0000000..ed42e08 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c @@ -0,0 +1,298 @@ +/* + * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>. + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/slab.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <rtdm/driver.h> +#include <rtdm/gpiopwm.h> + +MODULE_AUTHOR("Jorge Ramirez <jro@xenomai.org>"); +MODULE_DESCRIPTION("PWM driver"); +MODULE_VERSION("0.0.1"); +MODULE_LICENSE("GPL"); + +#define MAX_DUTY_CYCLE 100 +#define MAX_SAMPLES (MAX_DUTY_CYCLE + 1) + +struct gpiopwm_base_signal { + unsigned long period; +}; + +struct gpiopwm_duty_signal { + unsigned int range_min; + unsigned int range_max; + unsigned long period; + unsigned int cycle; +}; + +struct gpiopwm_control { + struct gpiopwm_duty_signal duty; + unsigned int configured; + unsigned int update; +}; + +struct gpiopwm_priv { + struct gpiopwm_base_signal base; + struct gpiopwm_duty_signal duty; + struct gpiopwm_control ctrl; + + rtdm_timer_t base_timer; + rtdm_timer_t duty_timer; + + int gpio; +}; + +static inline int div100(long long dividend) +{ + const long long divisor = 0x28f5c29; + return ((divisor * dividend) >> 32) & 0xffffffff; +} + +static inline unsigned long duty_period(struct gpiopwm_duty_signal *p) +{ + unsigned long period; + + period = p->range_min + div100((p->range_max - p->range_min) * p->cycle); + return period * 1000; +} + +static void gpiopwm_handle_base_timer(rtdm_timer_t *timer) +{ + struct gpiopwm_priv *ctx = container_of(timer, struct gpiopwm_priv, + base_timer); + gpio_set_value(ctx->gpio, 1); + + /* one shot timer to avoid carrying over errors */ + rtdm_timer_start_in_handler(&ctx->duty_timer, ctx->duty.period, 0, + RTDM_TIMERMODE_RELATIVE); + + if (ctx->ctrl.update) { + ctx->duty.period = ctx->ctrl.duty.period; + ctx->duty.cycle = ctx->ctrl.duty.cycle; + ctx->ctrl.update = 0; + } +} + +static void gpiopwm_handle_duty_timer(rtdm_timer_t *timer) +{ + struct gpiopwm_priv *ctx = container_of(timer, struct gpiopwm_priv, + duty_timer); + gpio_set_value(ctx->gpio, 0); +} + +static inline int gpiopwm_config(struct rtdm_fd *fd, struct gpiopwm *conf) +{ + struct rtdm_dev_context *dev_ctx = rtdm_fd_to_context(fd); + struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd); + int ret; + + if (ctx->ctrl.configured) + return -EINVAL; + + if (conf->duty_cycle > MAX_DUTY_CYCLE) + return -EINVAL; + + ret = gpio_request(conf->gpio, dev_ctx->device->name); + if (ret < 0) { + ctx->gpio = -1; + return ret; + } + + ret = gpio_direction_output(conf->gpio, 0); + if (ret < 0) + return ret; + + gpio_set_value(conf->gpio, 0); + + ctx->duty.range_min = ctx->ctrl.duty.range_min = conf->range_min; + ctx->duty.range_max = ctx->ctrl.duty.range_max = conf->range_max; + ctx->duty.cycle = conf->duty_cycle; + ctx->base.period = conf->period; + ctx->gpio = conf->gpio; + ctx->duty.period = duty_period(&ctx->duty); + + rtdm_timer_init(&ctx->base_timer, gpiopwm_handle_base_timer, "base_timer"); + rtdm_timer_init(&ctx->duty_timer, gpiopwm_handle_duty_timer, "duty_timer"); + + ctx->ctrl.configured = 1; + + return 0; +} + +static inline int gpiopwm_change_duty_cycle(struct gpiopwm_priv *ctx, unsigned int cycle) +{ + if (cycle > MAX_DUTY_CYCLE) + return -EINVAL; + + /* prepare the new data on the calling thread */ + ctx->ctrl.duty.cycle = cycle; + ctx->ctrl.duty.period = duty_period(&ctx->ctrl.duty); + + /* update data on the next base signal timeout */ + ctx->ctrl.update = 1; + + return 0; +} + +static inline int gpiopwm_stop(struct rtdm_fd *fd) +{ + struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd); + + if (!ctx->ctrl.configured) + return -EINVAL; + + gpio_set_value(ctx->gpio, 0); + + rtdm_timer_stop(&ctx->base_timer); + rtdm_timer_stop(&ctx->duty_timer); + + return 0; +} + +static inline int gpiopwm_start(struct rtdm_fd *fd) +{ + struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd); + + if (!ctx->ctrl.configured) + return -EINVAL; + + /* update duty cycle on next timeout */ + ctx->ctrl.update = 1; + + /* start the base signal tick */ + rtdm_timer_start(&ctx->base_timer, ctx->base.period, ctx->base.period, + RTDM_TIMERMODE_RELATIVE); + + return 0; +} + +static int gpiopwm_ioctl_rt(struct rtdm_fd *fd, unsigned int request, void __user *arg) +{ + struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd); + + switch (request) { + case GPIOPWM_RTIOC_SET_CONFIG: + return -ENOSYS; + case GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE: + return gpiopwm_change_duty_cycle(ctx, (unsigned long) arg); + case GPIOPWM_RTIOC_START: + return gpiopwm_start(fd); + case GPIOPWM_RTIOC_STOP: + return gpiopwm_stop(fd); + default: + return -EINVAL; + } + + return 0; +} + +static int gpiopwm_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void __user *arg) +{ + struct gpiopwm conf; + + switch (request) { + case GPIOPWM_RTIOC_SET_CONFIG: + if (!rtdm_rw_user_ok(fd, arg, sizeof(conf))) + return -EFAULT; + + rtdm_copy_from_user(fd, &conf, arg, sizeof(conf)); + return gpiopwm_config(fd, &conf); + case GPIOPWM_RTIOC_GET_CONFIG: + default: + return -EINVAL; + } + + return 0; +} + +static int gpiopwm_open(struct rtdm_fd *fd, int oflags) +{ + struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd); + + ctx->ctrl.configured = 0; + ctx->gpio = -1; + + return 0; +} + +static void gpiopwm_close(struct rtdm_fd *fd) +{ + struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd); + + if (ctx->gpio >= 0) + gpio_free(ctx->gpio); + + if (!ctx->ctrl.configured) + return; + + rtdm_timer_destroy(&ctx->base_timer); + rtdm_timer_destroy(&ctx->duty_timer); +} + +static struct rtdm_driver gpiopwm_driver = { + .profile_info = RTDM_PROFILE_INFO(gpiopwm, + RTDM_CLASS_PWM, + RTDM_SUBCLASS_GENERIC, + RTPWM_PROFILE_VER), + .device_flags = RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE, + .device_count = 8, + .context_size = sizeof(struct gpiopwm_priv), + .ops = { + .open = gpiopwm_open, + .close = gpiopwm_close, + .ioctl_rt = gpiopwm_ioctl_rt, + .ioctl_nrt = gpiopwm_ioctl_nrt, + }, +}; + +static struct rtdm_device device[8] = { + [0 ... 7] = { + .driver = &gpiopwm_driver, + .label = "gpiopwm%d", + } +}; + +static int __init __gpiopwm_init(void) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(device); i++) { + ret = rtdm_dev_register(device + i); + if (ret) + goto fail; + } + + return 0; +fail: + while (i-- > 0) + rtdm_dev_unregister(device + i); + + return ret; +} + +static void __exit __gpiopwm_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(device); i++) + rtdm_dev_unregister(device + i); +} + +module_init(__gpiopwm_init); +module_exit(__gpiopwm_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig new file mode 100644 index 0000000..104413a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig @@ -0,0 +1,81 @@ +menu "Real-time IPC drivers" + +config XENO_DRIVERS_RTIPC + tristate "RTIPC protocol family" + help + + This driver provides the real-time IPC protocol family + (PF_RTIPC) over RTDM. + +config XENO_DRIVERS_RTIPC_XDDP + depends on XENO_DRIVERS_RTIPC + select XENO_OPT_PIPE + default y + bool "XDDP cross-domain datagram protocol" + help + + Xenomai's XDDP protocol enables threads to exchange datagrams + across the Xenomai/Linux domain boundary, using "message + pipes". + + Message pipes are bi-directional FIFO communication channels + allowing data exchange between real-time Xenomai threads and + regular (i.e. non real-time) user-space processes. Message + pipes are datagram-based and thus natively preserve message + boundaries, but they can also be used in byte stream mode when + sending from the real-time to the non real-time domain. + + The maximum number of communication ports available in the + system can be configured using the XENO_OPT_PIPE_NRDEV option + from the Nucleus menu. + +config XENO_DRIVERS_RTIPC_IDDP + depends on XENO_DRIVERS_RTIPC + select XENO_OPT_MAP + default y + bool "IDDP intra-domain datagram protocol" + help + + Xenomai's IDDP protocol enables real-time threads to exchange + datagrams within the Xenomai domain. + +config XENO_OPT_IDDP_NRPORT + depends on XENO_DRIVERS_RTIPC_IDDP + int "Number of IDDP communication ports" + default 32 + help + + This parameter defines the number of IDDP ports available in + the system for creating receiver endpoints. Port numbers range + from 0 to CONFIG_XENO_OPT_IDDP_NRPORT - 1. + +config XENO_DRIVERS_RTIPC_BUFP + depends on XENO_DRIVERS_RTIPC + select XENO_OPT_MAP + default y + bool "Buffer protocol" + help + + The buffer protocol implements a byte-oriented, one-way + Producer-Consumer data path, which makes it a bit faster than + datagram-oriented protocols. All messages written are buffered + into a single memory area in strict FIFO order, until read by + the consumer. + + This protocol prevents short writes, and only allows short + reads when a potential deadlock situation arises (i.e. readers + and writers waiting for each other indefinitely), which + usually means that the buffer size does not fit the use peer + threads are making from the protocol. + +config XENO_OPT_BUFP_NRPORT + depends on XENO_DRIVERS_RTIPC_BUFP + int "Number of BUFP communication ports" + default 32 + help + + This parameter defines the number of BUFP ports available in + the system for creating receiver endpoints. Port numbers range + from 0 to CONFIG_XENO_OPT_BUFP_NRPORT - 1. + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile new file mode 100644 index 0000000..75fb27c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile @@ -0,0 +1,8 @@ + +obj-$(CONFIG_XENO_DRIVERS_RTIPC) += xeno_rtipc.o + +xeno_rtipc-y := rtipc.o + +xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_XDDP) += xddp.o +xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_IDDP) += iddp.o +xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_BUFP) += bufp.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c new file mode 100644 index 0000000..fd533db --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c @@ -0,0 +1,1104 @@ +/** + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/time.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/map.h> +#include <cobalt/kernel/bufd.h> +#include <rtdm/ipc.h> +#include "internal.h" + +#define BUFP_SOCKET_MAGIC 0xa61a61a6 + +struct bufp_socket { + int magic; + struct sockaddr_ipc name; + struct sockaddr_ipc peer; + + void *bufmem; + size_t bufsz; + u_long status; + xnhandle_t handle; + char label[XNOBJECT_NAME_LEN]; + + off_t rdoff; + off_t rdrsvd; + int rdsem; + off_t wroff; + off_t wrrsvd; + int wrsem; + size_t fillsz; + rtdm_event_t i_event; + rtdm_event_t o_event; + + nanosecs_rel_t rx_timeout; + nanosecs_rel_t tx_timeout; + + struct rtipc_private *priv; +}; + +struct bufp_wait_context { + struct rtipc_wait_context wc; + size_t len; + struct bufp_socket *sk; +}; + +static struct sockaddr_ipc nullsa = { + .sipc_family = AF_RTIPC, + .sipc_port = -1 +}; + +static struct xnmap *portmap; + +#define _BUFP_BINDING 0 +#define _BUFP_BOUND 1 +#define _BUFP_CONNECTED 2 + +#ifdef CONFIG_XENO_OPT_VFILE + +static char *__bufp_link_target(void *obj) +{ + struct bufp_socket *sk = obj; + + return kasformat("%d", sk->name.sipc_port); +} + +extern struct xnptree rtipc_ptree; + +static struct xnpnode_link __bufp_pnode = { + .node = { + .dirname = "bufp", + .root = &rtipc_ptree, + .ops = &xnregistry_vlink_ops, + }, + .target = __bufp_link_target, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __bufp_pnode = { + .node = { + .dirname = "bufp", + }, +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +static int bufp_socket(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct bufp_socket *sk = priv->state; + + sk->magic = BUFP_SOCKET_MAGIC; + sk->name = nullsa; /* Unbound */ + sk->peer = nullsa; + sk->bufmem = NULL; + sk->bufsz = 0; + sk->rdoff = 0; + sk->wroff = 0; + sk->fillsz = 0; + sk->rdrsvd = 0; + sk->wrrsvd = 0; + sk->rdsem = 0; + sk->wrsem = 0; + sk->status = 0; + sk->handle = 0; + sk->rx_timeout = RTDM_TIMEOUT_INFINITE; + sk->tx_timeout = RTDM_TIMEOUT_INFINITE; + *sk->label = 0; + rtdm_event_init(&sk->i_event, 0); + rtdm_event_init(&sk->o_event, 0); + sk->priv = priv; + + return 0; +} + +static void bufp_close(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct bufp_socket *sk = priv->state; + rtdm_lockctx_t s; + + rtdm_event_destroy(&sk->i_event); + rtdm_event_destroy(&sk->o_event); + + if (test_bit(_BUFP_BOUND, &sk->status)) { + if (sk->name.sipc_port > -1) { + cobalt_atomic_enter(s); + xnmap_remove(portmap, sk->name.sipc_port); + cobalt_atomic_leave(s); + } + + if (sk->handle) + xnregistry_remove(sk->handle); + + if (sk->bufmem) + xnheap_vfree(sk->bufmem); + } + + kfree(sk); +} + +static ssize_t __bufp_readbuf(struct bufp_socket *sk, + struct xnbufd *bufd, + int flags) +{ + struct bufp_wait_context wait, *bufwc; + struct rtipc_wait_context *wc; + struct xnthread *waiter; + size_t rbytes, n, avail; + ssize_t len, ret, xret; + rtdm_toseq_t toseq; + rtdm_lockctx_t s; + off_t rdoff; + int resched; + + len = bufd->b_len; + + rtdm_toseq_init(&toseq, sk->rx_timeout); + + cobalt_atomic_enter(s); +redo: + for (;;) { + /* + * We should be able to read a complete message of the + * requested length, or block. + */ + avail = sk->fillsz - sk->rdrsvd; + if (avail < len) + goto wait; + + /* Reserve a read slot into the circular buffer. */ + rdoff = sk->rdoff; + sk->rdoff = (rdoff + len) % sk->bufsz; + sk->rdrsvd += len; + sk->rdsem++; + rbytes = ret = len; + + do { + if (rdoff + rbytes > sk->bufsz) + n = sk->bufsz - rdoff; + else + n = rbytes; + /* + * Drop the lock before copying data to + * user. The read slot is consumed in any + * case: the non-copied portion of the message + * is lost on bad write. + */ + cobalt_atomic_leave(s); + xret = xnbufd_copy_from_kmem(bufd, sk->bufmem + rdoff, n); + cobalt_atomic_enter(s); + if (xret < 0) { + ret = -EFAULT; + break; + } + + rbytes -= n; + rdoff = (rdoff + n) % sk->bufsz; + } while (rbytes > 0); + + if (--sk->rdsem > 0) + goto out; + + resched = 0; + if (sk->fillsz == sk->bufsz) /* -> becomes writable */ + resched |= xnselect_signal(&sk->priv->send_block, POLLOUT); + + sk->fillsz -= sk->rdrsvd; + sk->rdrsvd = 0; + + if (sk->fillsz == 0) /* -> becomes non-readable */ + resched |= xnselect_signal(&sk->priv->recv_block, 0); + + /* + * Wake up all threads pending on the output wait + * queue, if we freed enough room for the leading one + * to post its message. + */ + waiter = rtipc_peek_wait_head(&sk->o_event); + if (waiter == NULL) + goto out; + + wc = rtipc_get_wait_context(waiter); + XENO_BUG_ON(COBALT, wc == NULL); + bufwc = container_of(wc, struct bufp_wait_context, wc); + if (bufwc->len + sk->fillsz <= sk->bufsz) + /* This call rescheds internally. */ + rtdm_event_pulse(&sk->o_event); + else if (resched) + xnsched_run(); + /* + * We cannot fail anymore once some data has been + * copied via the buffer descriptor, so no need to + * check for any reason to invalidate the latter. + */ + goto out; + + wait: + if (flags & MSG_DONTWAIT) { + ret = -EWOULDBLOCK; + break; + } + + /* + * Check whether writers are already waiting for + * sending data, while we are about to wait for + * receiving some. In such a case, we have a + * pathological use of the buffer. We must allow for a + * short read to prevent a deadlock. + */ + if (sk->fillsz > 0 && rtipc_peek_wait_head(&sk->o_event)) { + len = sk->fillsz; + goto redo; + } + + wait.len = len; + wait.sk = sk; + rtipc_prepare_wait(&wait.wc); + /* + * Keep the nucleus lock across the wait call, so that + * we don't miss a pulse. + */ + ret = rtdm_event_timedwait(&sk->i_event, + sk->rx_timeout, &toseq); + if (unlikely(ret)) + break; + } +out: + cobalt_atomic_leave(s); + + return ret; +} + +static ssize_t __bufp_recvmsg(struct rtdm_fd *fd, + struct iovec *iov, int iovlen, int flags, + struct sockaddr_ipc *saddr) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct bufp_socket *sk = priv->state; + ssize_t len, wrlen, vlen, ret; + struct xnbufd bufd; + int nvec; + + if (!test_bit(_BUFP_BOUND, &sk->status)) + return -EAGAIN; + + len = rtdm_get_iov_flatlen(iov, iovlen); + if (len == 0) + return 0; + /* + * We may only return complete messages to readers, so there + * is no point in waiting for messages which are larger than + * what the buffer can hold. + */ + if (len > sk->bufsz) + return -EINVAL; + + /* + * Write "len" bytes from the buffer to the vector cells. Each + * cell is handled as a separate message. + */ + for (nvec = 0, wrlen = len; nvec < iovlen && wrlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = __bufp_readbuf(sk, &bufd, flags); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = __bufp_readbuf(sk, &bufd, flags); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + return ret; + iov[nvec].iov_base += vlen; + iov[nvec].iov_len -= vlen; + wrlen -= vlen; + if (ret < vlen) + /* Short reads may happen in rare cases. */ + break; + } + + /* + * There is no way to determine who the sender was since we + * process data in byte-oriented mode, so we just copy our own + * sockaddr to send back a valid address. + */ + if (saddr) + *saddr = sk->name; + + return len - wrlen; +} + +static ssize_t bufp_recvmsg(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags) +{ + struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov; + struct sockaddr_ipc saddr; + ssize_t ret; + + if (flags & ~MSG_DONTWAIT) + return -EINVAL; + + if (msg->msg_name) { + if (msg->msg_namelen < sizeof(struct sockaddr_ipc)) + return -EINVAL; + } else if (msg->msg_namelen != 0) + return -EINVAL; + + if (msg->msg_iovlen >= UIO_MAXIOV) + return -EINVAL; + + /* Copy I/O vector in */ + ret = rtdm_get_iovec(fd, &iov, msg, iov_fast); + if (ret) + return ret; + + ret = __bufp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr); + if (ret <= 0) { + rtdm_drop_iovec(iov, iov_fast); + return ret; + } + + /* Copy the updated I/O vector back */ + if (rtdm_put_iovec(fd, iov, msg, iov_fast)) + return -EFAULT; + + /* Copy the source address if required. */ + if (msg->msg_name) { + if (rtipc_put_arg(fd, msg->msg_name, + &saddr, sizeof(saddr))) + return -EFAULT; + msg->msg_namelen = sizeof(struct sockaddr_ipc); + } + + return ret; +} + +static ssize_t bufp_read(struct rtdm_fd *fd, void *buf, size_t len) +{ + struct iovec iov = { .iov_base = buf, .iov_len = len }; + + return __bufp_recvmsg(fd, &iov, 1, 0, NULL); +} + +static ssize_t __bufp_writebuf(struct bufp_socket *rsk, + struct bufp_socket *sk, + struct xnbufd *bufd, + int flags) +{ + struct bufp_wait_context wait, *bufwc; + struct rtipc_wait_context *wc; + struct xnthread *waiter; + size_t wbytes, n, avail; + ssize_t len, ret, xret; + rtdm_toseq_t toseq; + rtdm_lockctx_t s; + off_t wroff; + int resched; + + len = bufd->b_len; + + rtdm_toseq_init(&toseq, sk->tx_timeout); + + cobalt_atomic_enter(s); + + for (;;) { + /* + * No short or scattered writes: we should write the + * entire message atomically or block. + */ + avail = rsk->fillsz + rsk->wrrsvd; + if (avail + len > rsk->bufsz) + goto wait; + + /* Reserve a write slot into the circular buffer. */ + wroff = rsk->wroff; + rsk->wroff = (wroff + len) % rsk->bufsz; + rsk->wrrsvd += len; + rsk->wrsem++; + wbytes = ret = len; + + do { + if (wroff + wbytes > rsk->bufsz) + n = rsk->bufsz - wroff; + else + n = wbytes; + /* + * We have to drop the lock while reading in + * data, but we can't rollback on bad read + * from user because some other thread might + * have populated the memory ahead of our + * write slot already: bluntly clear the + * unavailable bytes on copy error. + */ + cobalt_atomic_leave(s); + xret = xnbufd_copy_to_kmem(rsk->bufmem + wroff, bufd, n); + cobalt_atomic_enter(s); + if (xret < 0) { + memset(rsk->bufmem + wroff, 0, n); + ret = -EFAULT; + break; + } + + wbytes -= n; + wroff = (wroff + n) % rsk->bufsz; + } while (wbytes > 0); + + if (--rsk->wrsem > 0) + goto out; + + resched = 0; + if (rsk->fillsz == 0) /* -> becomes readable */ + resched |= xnselect_signal(&rsk->priv->recv_block, POLLIN); + + rsk->fillsz += rsk->wrrsvd; + rsk->wrrsvd = 0; + + if (rsk->fillsz == rsk->bufsz) /* becomes non-writable */ + resched |= xnselect_signal(&rsk->priv->send_block, 0); + /* + * Wake up all threads pending on the input wait + * queue, if we accumulated enough data to feed the + * leading one. + */ + waiter = rtipc_peek_wait_head(&rsk->i_event); + if (waiter == NULL) + goto out; + + wc = rtipc_get_wait_context(waiter); + XENO_BUG_ON(COBALT, wc == NULL); + bufwc = container_of(wc, struct bufp_wait_context, wc); + if (bufwc->len <= rsk->fillsz) + rtdm_event_pulse(&rsk->i_event); + else if (resched) + xnsched_run(); + /* + * We cannot fail anymore once some data has been + * copied via the buffer descriptor, so no need to + * check for any reason to invalidate the latter. + */ + goto out; + wait: + if (flags & MSG_DONTWAIT) { + ret = -EWOULDBLOCK; + break; + } + + wait.len = len; + wait.sk = rsk; + rtipc_prepare_wait(&wait.wc); + /* + * Keep the nucleus lock across the wait call, so that + * we don't miss a pulse. + */ + ret = rtdm_event_timedwait(&rsk->o_event, + sk->tx_timeout, &toseq); + if (unlikely(ret)) + break; + } +out: + cobalt_atomic_leave(s); + + return ret; +} + +static ssize_t __bufp_sendmsg(struct rtdm_fd *fd, + struct iovec *iov, int iovlen, int flags, + const struct sockaddr_ipc *daddr) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct bufp_socket *sk = priv->state, *rsk; + ssize_t len, rdlen, vlen, ret = 0; + struct rtdm_fd *rfd; + struct xnbufd bufd; + rtdm_lockctx_t s; + int nvec; + + len = rtdm_get_iov_flatlen(iov, iovlen); + if (len == 0) + return 0; + + cobalt_atomic_enter(s); + rfd = xnmap_fetch_nocheck(portmap, daddr->sipc_port); + if (rfd && rtdm_fd_lock(rfd) < 0) + rfd = NULL; + cobalt_atomic_leave(s); + if (rfd == NULL) + return -ECONNRESET; + + rsk = rtipc_fd_to_state(rfd); + if (!test_bit(_BUFP_BOUND, &rsk->status)) { + rtdm_fd_unlock(rfd); + return -ECONNREFUSED; + } + + /* + * We may only send complete messages, so there is no point in + * accepting messages which are larger than what the buffer + * can hold. + */ + if (len > rsk->bufsz) { + ret = -EINVAL; + goto fail; + } + + /* + * Read "len" bytes to the buffer from the vector cells. Each + * cell is handled as a separate message. + */ + for (nvec = 0, rdlen = len; nvec < iovlen && rdlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = __bufp_writebuf(rsk, sk, &bufd, flags); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = __bufp_writebuf(rsk, sk, &bufd, flags); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + goto fail; + iov[nvec].iov_base += vlen; + iov[nvec].iov_len -= vlen; + rdlen -= vlen; + } + + rtdm_fd_unlock(rfd); + + return len - rdlen; +fail: + rtdm_fd_unlock(rfd); + + return ret; +} + +static ssize_t bufp_sendmsg(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov; + struct bufp_socket *sk = priv->state; + struct sockaddr_ipc daddr; + ssize_t ret; + + if (flags & ~MSG_DONTWAIT) + return -EINVAL; + + if (msg->msg_name) { + if (msg->msg_namelen != sizeof(struct sockaddr_ipc)) + return -EINVAL; + + /* Fetch the destination address to send to. */ + if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr))) + return -EFAULT; + + if (daddr.sipc_port < 0 || + daddr.sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT) + return -EINVAL; + } else { + if (msg->msg_namelen != 0) + return -EINVAL; + daddr = sk->peer; + if (daddr.sipc_port < 0) + return -EDESTADDRREQ; + } + + if (msg->msg_iovlen >= UIO_MAXIOV) + return -EINVAL; + + /* Copy I/O vector in */ + ret = rtdm_get_iovec(fd, &iov, msg, iov_fast); + if (ret) + return ret; + + ret = __bufp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr); + if (ret <= 0) { + rtdm_drop_iovec(iov, iov_fast); + return ret; + } + + /* Copy updated I/O vector back */ + return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret; +} + +static ssize_t bufp_write(struct rtdm_fd *fd, + const void *buf, size_t len) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iovec iov = { .iov_base = (void *)buf, .iov_len = len }; + struct bufp_socket *sk = priv->state; + + if (sk->peer.sipc_port < 0) + return -EDESTADDRREQ; + + return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer); +} + +static int __bufp_bind_socket(struct rtipc_private *priv, + struct sockaddr_ipc *sa) +{ + struct bufp_socket *sk = priv->state; + int ret = 0, port; + struct rtdm_fd *fd; + rtdm_lockctx_t s; + + if (sa->sipc_family != AF_RTIPC) + return -EINVAL; + + if (sa->sipc_port < -1 || + sa->sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT) + return -EINVAL; + + cobalt_atomic_enter(s); + if (test_bit(_BUFP_BOUND, &sk->status) || + __test_and_set_bit(_BUFP_BINDING, &sk->status)) + ret = -EADDRINUSE; + cobalt_atomic_leave(s); + + if (ret) + return ret; + + /* Will auto-select a free port number if unspec (-1). */ + port = sa->sipc_port; + fd = rtdm_private_to_fd(priv); + cobalt_atomic_enter(s); + port = xnmap_enter(portmap, port, fd); + cobalt_atomic_leave(s); + if (port < 0) + return port == -EEXIST ? -EADDRINUSE : -ENOMEM; + + sa->sipc_port = port; + + /* + * The caller must have told us how much memory is needed for + * buffer space via setsockopt(), before we got there. + */ + if (sk->bufsz == 0) + return -ENOBUFS; + + sk->bufmem = xnheap_vmalloc(sk->bufsz); + if (sk->bufmem == NULL) { + ret = -ENOMEM; + goto fail; + } + + sk->name = *sa; + /* Set default destination if unset at binding time. */ + if (sk->peer.sipc_port < 0) + sk->peer = *sa; + + if (*sk->label) { + ret = xnregistry_enter(sk->label, sk, + &sk->handle, &__bufp_pnode.node); + if (ret) { + xnheap_vfree(sk->bufmem); + goto fail; + } + } + + cobalt_atomic_enter(s); + __clear_bit(_BUFP_BINDING, &sk->status); + __set_bit(_BUFP_BOUND, &sk->status); + if (xnselect_signal(&priv->send_block, POLLOUT)) + xnsched_run(); + cobalt_atomic_leave(s); + + return 0; +fail: + xnmap_remove(portmap, port); + clear_bit(_BUFP_BINDING, &sk->status); + + return ret; +} + +static int __bufp_connect_socket(struct bufp_socket *sk, + struct sockaddr_ipc *sa) +{ + struct sockaddr_ipc _sa; + struct bufp_socket *rsk; + int ret, resched = 0; + rtdm_lockctx_t s; + xnhandle_t h; + + if (sa == NULL) { + _sa = nullsa; + sa = &_sa; + goto set_assoc; + } + + if (sa->sipc_family != AF_RTIPC) + return -EINVAL; + + if (sa->sipc_port < -1 || + sa->sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT) + return -EINVAL; + /* + * - If a valid sipc_port is passed in the [0..NRPORT-1] range, + * it is used verbatim and the connection succeeds + * immediately, regardless of whether the destination is + * bound at the time of the call. + * + * - If sipc_port is -1 and a label was set via BUFP_LABEL, + * connect() blocks for the requested amount of time (see + * SO_RCVTIMEO) until a socket is bound to the same label. + * + * - If sipc_port is -1 and no label is given, the default + * destination address is cleared, meaning that any subsequent + * write() to the socket will return -EDESTADDRREQ, until a + * valid destination address is set via connect() or bind(). + * + * - In all other cases, -EINVAL is returned. + */ + if (sa->sipc_port < 0 && *sk->label) { + ret = xnregistry_bind(sk->label, + sk->rx_timeout, XN_RELATIVE, &h); + if (ret) + return ret; + + cobalt_atomic_enter(s); + rsk = xnregistry_lookup(h, NULL); + if (rsk == NULL || rsk->magic != BUFP_SOCKET_MAGIC) + ret = -EINVAL; + else { + /* Fetch labeled port number. */ + sa->sipc_port = rsk->name.sipc_port; + resched = xnselect_signal(&sk->priv->send_block, POLLOUT); + } + cobalt_atomic_leave(s); + if (ret) + return ret; + } else if (sa->sipc_port < 0) + sa = &nullsa; +set_assoc: + cobalt_atomic_enter(s); + if (!test_bit(_BUFP_BOUND, &sk->status)) + /* Set default name. */ + sk->name = *sa; + /* Set default destination. */ + sk->peer = *sa; + if (sa->sipc_port < 0) + __clear_bit(_BUFP_CONNECTED, &sk->status); + else + __set_bit(_BUFP_CONNECTED, &sk->status); + if (resched) + xnsched_run(); + cobalt_atomic_leave(s); + + return 0; +} + +static int __bufp_setsockopt(struct bufp_socket *sk, + struct rtdm_fd *fd, + void *arg) +{ + struct _rtdm_setsockopt_args sopt; + struct rtipc_port_label plabel; + struct __kernel_old_timeval tv; + rtdm_lockctx_t s; + size_t len; + int ret; + + ret = rtipc_get_sockoptin(fd, &sopt, arg); + if (ret) + return ret; + + if (sopt.level == SOL_SOCKET) { + switch (sopt.optname) { + + case SO_RCVTIMEO_OLD: + ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen); + if (ret) + return ret; + sk->rx_timeout = rtipc_timeval_to_ns(&tv); + break; + + case SO_SNDTIMEO_OLD: + ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen); + if (ret) + return ret; + sk->tx_timeout = rtipc_timeval_to_ns(&tv); + break; + + default: + ret = -EINVAL; + } + + return ret; + } + + if (sopt.level != SOL_BUFP) + return -ENOPROTOOPT; + + switch (sopt.optname) { + + case BUFP_BUFSZ: + ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen); + if (ret) + return ret; + if (len == 0) + return -EINVAL; + cobalt_atomic_enter(s); + /* + * We may not do this more than once, and we have to + * do this before the first binding. + */ + if (test_bit(_BUFP_BOUND, &sk->status) || + test_bit(_BUFP_BINDING, &sk->status)) + ret = -EALREADY; + else + sk->bufsz = len; + cobalt_atomic_leave(s); + break; + + case BUFP_LABEL: + if (sopt.optlen < sizeof(plabel)) + return -EINVAL; + if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel))) + return -EFAULT; + cobalt_atomic_enter(s); + /* + * We may attach a label to a client socket which was + * previously bound in BUFP. + */ + if (test_bit(_BUFP_BINDING, &sk->status)) + ret = -EALREADY; + else { + strcpy(sk->label, plabel.label); + sk->label[XNOBJECT_NAME_LEN-1] = 0; + } + cobalt_atomic_leave(s); + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int __bufp_getsockopt(struct bufp_socket *sk, + struct rtdm_fd *fd, + void *arg) +{ + struct _rtdm_getsockopt_args sopt; + struct rtipc_port_label plabel; + struct __kernel_old_timeval tv; + rtdm_lockctx_t s; + socklen_t len; + int ret; + + ret = rtipc_get_sockoptout(fd, &sopt, arg); + if (ret) + return ret; + + if (rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len))) + return -EFAULT; + + if (sopt.level == SOL_SOCKET) { + switch (sopt.optname) { + + case SO_RCVTIMEO_OLD: + rtipc_ns_to_timeval(&tv, sk->rx_timeout); + ret = rtipc_put_timeval(fd, sopt.optval, &tv, len); + if (ret) + return ret; + break; + + case SO_SNDTIMEO_OLD: + rtipc_ns_to_timeval(&tv, sk->tx_timeout); + ret = rtipc_put_timeval(fd, sopt.optval, &tv, len); + if (ret) + return ret; + break; + + default: + ret = -EINVAL; + } + + return ret; + } + + if (sopt.level != SOL_BUFP) + return -ENOPROTOOPT; + + switch (sopt.optname) { + + case BUFP_LABEL: + if (len < sizeof(plabel)) + return -EINVAL; + cobalt_atomic_enter(s); + strcpy(plabel.label, sk->label); + cobalt_atomic_leave(s); + if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel))) + return -EFAULT; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int __bufp_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct sockaddr_ipc saddr, *saddrp = &saddr; + struct bufp_socket *sk = priv->state; + int ret = 0; + + switch (request) { + + COMPAT_CASE(_RTIOC_CONNECT): + ret = rtipc_get_sockaddr(fd, &saddrp, arg); + if (ret) + return ret; + ret = __bufp_connect_socket(sk, saddrp); + break; + + COMPAT_CASE(_RTIOC_BIND): + ret = rtipc_get_sockaddr(fd, &saddrp, arg); + if (ret) + return ret; + if (saddrp == NULL) + return -EFAULT; + ret = __bufp_bind_socket(priv, saddrp); + break; + + COMPAT_CASE(_RTIOC_GETSOCKNAME): + ret = rtipc_put_sockaddr(fd, arg, &sk->name); + break; + + COMPAT_CASE(_RTIOC_GETPEERNAME): + ret = rtipc_put_sockaddr(fd, arg, &sk->peer); + break; + + COMPAT_CASE(_RTIOC_SETSOCKOPT): + ret = __bufp_setsockopt(sk, fd, arg); + break; + + COMPAT_CASE(_RTIOC_GETSOCKOPT): + ret = __bufp_getsockopt(sk, fd, arg); + break; + + case _RTIOC_LISTEN: + COMPAT_CASE(_RTIOC_ACCEPT): + ret = -EOPNOTSUPP; + break; + + case _RTIOC_SHUTDOWN: + ret = -ENOTCONN; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int bufp_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + int ret; + + switch (request) { + COMPAT_CASE(_RTIOC_BIND): + if (rtdm_in_rt_context()) + return -ENOSYS; /* Try downgrading to NRT */ + fallthrough; + default: + ret = __bufp_ioctl(fd, request, arg); + } + + return ret; +} + +static unsigned int bufp_pollstate(struct rtdm_fd *fd) /* atomic */ +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct bufp_socket *sk = priv->state, *rsk; + unsigned int mask = 0; + struct rtdm_fd *rfd; + + if (test_bit(_BUFP_BOUND, &sk->status) && sk->fillsz > 0) + mask |= POLLIN; + + /* + * If the socket is connected, POLLOUT means that the peer + * exists, is bound and can receive data. Otherwise POLLOUT is + * always set, assuming the client is likely to use explicit + * addressing in send operations. + */ + if (test_bit(_BUFP_CONNECTED, &sk->status)) { + rfd = xnmap_fetch_nocheck(portmap, sk->peer.sipc_port); + if (rfd) { + rsk = rtipc_fd_to_state(rfd); + if (rsk->fillsz < rsk->bufsz) + mask |= POLLOUT; + } + } else + mask |= POLLOUT; + + return mask; +} + +static int bufp_init(void) +{ + portmap = xnmap_create(CONFIG_XENO_OPT_BUFP_NRPORT, 0, 0); + if (portmap == NULL) + return -ENOMEM; + + return 0; +} + +static void bufp_exit(void) +{ + xnmap_delete(portmap); +} + +struct rtipc_protocol bufp_proto_driver = { + .proto_name = "bufp", + .proto_statesz = sizeof(struct bufp_socket), + .proto_init = bufp_init, + .proto_exit = bufp_exit, + .proto_ops = { + .socket = bufp_socket, + .close = bufp_close, + .recvmsg = bufp_recvmsg, + .sendmsg = bufp_sendmsg, + .read = bufp_read, + .write = bufp_write, + .ioctl = bufp_ioctl, + .pollstate = bufp_pollstate, + } +}; diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c new file mode 100644 index 0000000..a553902 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c @@ -0,0 +1,990 @@ +/** + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/list.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/time.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/bufd.h> +#include <cobalt/kernel/map.h> +#include <rtdm/ipc.h> +#include "internal.h" + +#define IDDP_SOCKET_MAGIC 0xa37a37a8 + +struct iddp_message { + struct list_head next; + int from; + size_t rdoff; + size_t len; + char data[]; +}; + +struct iddp_socket { + int magic; + struct sockaddr_ipc name; + struct sockaddr_ipc peer; + struct xnheap *bufpool; + struct xnheap privpool; + rtdm_waitqueue_t *poolwaitq; + rtdm_waitqueue_t privwaitq; + size_t poolsz; + rtdm_sem_t insem; + struct list_head inq; + u_long status; + xnhandle_t handle; + char label[XNOBJECT_NAME_LEN]; + nanosecs_rel_t rx_timeout; + nanosecs_rel_t tx_timeout; + unsigned long stalls; /* Buffer stall counter. */ + struct rtipc_private *priv; +}; + +static struct sockaddr_ipc nullsa = { + .sipc_family = AF_RTIPC, + .sipc_port = -1 +}; + +static struct xnmap *portmap; + +static rtdm_waitqueue_t poolwaitq; + +#define _IDDP_BINDING 0 +#define _IDDP_BOUND 1 +#define _IDDP_CONNECTED 2 + +#ifdef CONFIG_XENO_OPT_VFILE + +static char *__iddp_link_target(void *obj) +{ + struct iddp_socket *sk = obj; + + return kasformat("%d", sk->name.sipc_port); +} + +extern struct xnptree rtipc_ptree; + +static struct xnpnode_link __iddp_pnode = { + .node = { + .dirname = "iddp", + .root = &rtipc_ptree, + .ops = &xnregistry_vlink_ops, + }, + .target = __iddp_link_target, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __iddp_pnode = { + .node = { + .dirname = "iddp", + }, +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +static inline void __iddp_init_mbuf(struct iddp_message *mbuf, size_t len) +{ + mbuf->rdoff = 0; + mbuf->len = len; + INIT_LIST_HEAD(&mbuf->next); +} + +static struct iddp_message * +__iddp_alloc_mbuf(struct iddp_socket *sk, size_t len, + nanosecs_rel_t timeout, int flags, int *pret) +{ + struct iddp_message *mbuf = NULL; + rtdm_toseq_t timeout_seq; + rtdm_lockctx_t s; + int ret = 0; + + rtdm_toseq_init(&timeout_seq, timeout); + + for (;;) { + mbuf = xnheap_alloc(sk->bufpool, len + sizeof(*mbuf)); + if (mbuf) { + __iddp_init_mbuf(mbuf, len); + break; + } + if (flags & MSG_DONTWAIT) { + ret = -EAGAIN; + break; + } + /* + * No luck, no buffer free. Wait for a buffer to be + * released and retry. Admittedly, we might create a + * thundering herd effect if many waiters put a lot of + * memory pressure on the pool, but in this case, the + * pool size should be adjusted. + */ + rtdm_waitqueue_lock(sk->poolwaitq, s); + ++sk->stalls; + ret = rtdm_timedwait_locked(sk->poolwaitq, timeout, &timeout_seq); + rtdm_waitqueue_unlock(sk->poolwaitq, s); + if (unlikely(ret == -EIDRM)) + ret = -ECONNRESET; + if (ret) + break; + } + + *pret = ret; + + return mbuf; +} + +static void __iddp_free_mbuf(struct iddp_socket *sk, + struct iddp_message *mbuf) +{ + xnheap_free(sk->bufpool, mbuf); + rtdm_waitqueue_broadcast(sk->poolwaitq); +} + +static int iddp_socket(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iddp_socket *sk = priv->state; + + sk->magic = IDDP_SOCKET_MAGIC; + sk->name = nullsa; /* Unbound */ + sk->peer = nullsa; + sk->bufpool = &cobalt_heap; + sk->poolwaitq = &poolwaitq; + sk->poolsz = 0; + sk->status = 0; + sk->handle = 0; + sk->rx_timeout = RTDM_TIMEOUT_INFINITE; + sk->tx_timeout = RTDM_TIMEOUT_INFINITE; + sk->stalls = 0; + *sk->label = 0; + INIT_LIST_HEAD(&sk->inq); + rtdm_sem_init(&sk->insem, 0); + rtdm_waitqueue_init(&sk->privwaitq); + sk->priv = priv; + + return 0; +} + +static void iddp_close(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iddp_socket *sk = priv->state; + struct iddp_message *mbuf; + rtdm_lockctx_t s; + void *poolmem; + u32 poolsz; + + rtdm_sem_destroy(&sk->insem); + rtdm_waitqueue_destroy(&sk->privwaitq); + + if (test_bit(_IDDP_BOUND, &sk->status)) { + if (sk->handle) + xnregistry_remove(sk->handle); + if (sk->name.sipc_port > -1) { + cobalt_atomic_enter(s); + xnmap_remove(portmap, sk->name.sipc_port); + cobalt_atomic_leave(s); + } + if (sk->bufpool != &cobalt_heap) { + poolmem = xnheap_get_membase(&sk->privpool); + poolsz = xnheap_get_size(&sk->privpool); + xnheap_destroy(&sk->privpool); + xnheap_vfree(poolmem); + return; + } + } + + /* Send unread datagrams back to the system heap. */ + while (!list_empty(&sk->inq)) { + mbuf = list_entry(sk->inq.next, struct iddp_message, next); + list_del(&mbuf->next); + xnheap_free(&cobalt_heap, mbuf); + } + + kfree(sk); + + return; +} + +static ssize_t __iddp_recvmsg(struct rtdm_fd *fd, + struct iovec *iov, int iovlen, int flags, + struct sockaddr_ipc *saddr) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iddp_socket *sk = priv->state; + ssize_t maxlen, len, wrlen, vlen; + rtdm_toseq_t timeout_seq, *toseq; + int nvec, rdoff, ret, dofree; + struct iddp_message *mbuf; + nanosecs_rel_t timeout; + struct xnbufd bufd; + rtdm_lockctx_t s; + + if (!test_bit(_IDDP_BOUND, &sk->status)) + return -EAGAIN; + + maxlen = rtdm_get_iov_flatlen(iov, iovlen); + if (maxlen == 0) + return 0; + + if (flags & MSG_DONTWAIT) { + timeout = RTDM_TIMEOUT_NONE; + toseq = NULL; + } else { + timeout = sk->rx_timeout; + toseq = &timeout_seq; + } + + /* We want to pick one buffer from the queue. */ + + for (;;) { + ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq); + if (unlikely(ret)) { + if (ret == -EIDRM) + return -ECONNRESET; + return ret; + } + /* We may have spurious wakeups. */ + cobalt_atomic_enter(s); + if (!list_empty(&sk->inq)) + break; + cobalt_atomic_leave(s); + } + + /* Pull heading message from input queue. */ + mbuf = list_entry(sk->inq.next, struct iddp_message, next); + rdoff = mbuf->rdoff; + len = mbuf->len - rdoff; + if (saddr) { + saddr->sipc_family = AF_RTIPC; + saddr->sipc_port = mbuf->from; + } + if (maxlen >= len) { + list_del(&mbuf->next); + dofree = 1; + if (list_empty(&sk->inq)) /* -> non-readable */ + xnselect_signal(&priv->recv_block, 0); + + } else { + /* Buffer is only partially read: repost. */ + mbuf->rdoff += maxlen; + len = maxlen; + dofree = 0; + } + + if (!dofree) + rtdm_sem_up(&sk->insem); + + cobalt_atomic_leave(s); + + /* Now, write "len" bytes from mbuf->data to the vector cells */ + for (nvec = 0, wrlen = len; nvec < iovlen && wrlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + break; + iov[nvec].iov_base += vlen; + iov[nvec].iov_len -= vlen; + wrlen -= vlen; + rdoff += vlen; + } + + if (dofree) + __iddp_free_mbuf(sk, mbuf); + + return ret ?: len; +} + +static ssize_t iddp_recvmsg(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags) +{ + struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov; + struct sockaddr_ipc saddr; + ssize_t ret; + + if (flags & ~MSG_DONTWAIT) + return -EINVAL; + + if (msg->msg_name) { + if (msg->msg_namelen < sizeof(struct sockaddr_ipc)) + return -EINVAL; + } else if (msg->msg_namelen != 0) + return -EINVAL; + + if (msg->msg_iovlen >= UIO_MAXIOV) + return -EINVAL; + + /* Copy I/O vector in */ + ret = rtdm_get_iovec(fd, &iov, msg, iov_fast); + if (ret) + return ret; + + ret = __iddp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr); + if (ret <= 0) { + rtdm_drop_iovec(iov, iov_fast); + return ret; + } + + /* Copy the updated I/O vector back */ + if (rtdm_put_iovec(fd, iov, msg, iov_fast)) + return -EFAULT; + + /* Copy the source address if required. */ + if (msg->msg_name) { + if (rtipc_put_arg(fd, msg->msg_name, &saddr, sizeof(saddr))) + return -EFAULT; + msg->msg_namelen = sizeof(struct sockaddr_ipc); + } + + return ret; +} + +static ssize_t iddp_read(struct rtdm_fd *fd, void *buf, size_t len) +{ + struct iovec iov = { .iov_base = buf, .iov_len = len }; + + return __iddp_recvmsg(fd, &iov, 1, 0, NULL); +} + +static ssize_t __iddp_sendmsg(struct rtdm_fd *fd, + struct iovec *iov, int iovlen, int flags, + const struct sockaddr_ipc *daddr) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iddp_socket *sk = priv->state, *rsk; + struct iddp_message *mbuf; + ssize_t len, rdlen, vlen; + int nvec, wroff, ret; + struct rtdm_fd *rfd; + struct xnbufd bufd; + rtdm_lockctx_t s; + + len = rtdm_get_iov_flatlen(iov, iovlen); + if (len == 0) + return 0; + + cobalt_atomic_enter(s); + rfd = xnmap_fetch_nocheck(portmap, daddr->sipc_port); + if (rfd && rtdm_fd_lock(rfd) < 0) + rfd = NULL; + cobalt_atomic_leave(s); + if (rfd == NULL) + return -ECONNRESET; + + rsk = rtipc_fd_to_state(rfd); + if (!test_bit(_IDDP_BOUND, &rsk->status)) { + rtdm_fd_unlock(rfd); + return -ECONNREFUSED; + } + + mbuf = __iddp_alloc_mbuf(rsk, len, sk->tx_timeout, flags, &ret); + if (unlikely(ret)) { + rtdm_fd_unlock(rfd); + return ret; + } + + /* Now, move "len" bytes to mbuf->data from the vector cells */ + for (nvec = 0, rdlen = len, wroff = 0; + nvec < iovlen && rdlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_to_kmem(mbuf->data + wroff, &bufd, vlen); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_to_kmem(mbuf->data + wroff, &bufd, vlen); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + goto fail; + iov[nvec].iov_base += vlen; + iov[nvec].iov_len -= vlen; + rdlen -= vlen; + wroff += vlen; + } + + cobalt_atomic_enter(s); + + /* + * CAUTION: we must remain atomic from the moment we signal + * POLLIN, until sem_up has happened. + */ + if (list_empty(&rsk->inq)) /* -> readable */ + xnselect_signal(&rsk->priv->recv_block, POLLIN); + + mbuf->from = sk->name.sipc_port; + + if (flags & MSG_OOB) + list_add(&mbuf->next, &rsk->inq); + else + list_add_tail(&mbuf->next, &rsk->inq); + + rtdm_sem_up(&rsk->insem); /* Will resched. */ + + cobalt_atomic_leave(s); + + rtdm_fd_unlock(rfd); + + return len; + +fail: + __iddp_free_mbuf(rsk, mbuf); + + rtdm_fd_unlock(rfd); + + return ret; +} + +static ssize_t iddp_sendmsg(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov; + struct iddp_socket *sk = priv->state; + struct sockaddr_ipc daddr; + ssize_t ret; + + if (flags & ~(MSG_OOB | MSG_DONTWAIT)) + return -EINVAL; + + if (msg->msg_name) { + if (msg->msg_namelen != sizeof(struct sockaddr_ipc)) + return -EINVAL; + + /* Fetch the destination address to send to. */ + if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr))) + return -EFAULT; + + if (daddr.sipc_port < 0 || + daddr.sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT) + return -EINVAL; + } else { + if (msg->msg_namelen != 0) + return -EINVAL; + daddr = sk->peer; + if (daddr.sipc_port < 0) + return -EDESTADDRREQ; + } + + if (msg->msg_iovlen >= UIO_MAXIOV) + return -EINVAL; + + /* Copy I/O vector in */ + ret = rtdm_get_iovec(fd, &iov, msg, iov_fast); + if (ret) + return ret; + + ret = __iddp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr); + if (ret <= 0) { + rtdm_drop_iovec(iov, iov_fast); + return ret; + } + + /* Copy updated I/O vector back */ + return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret; +} + +static ssize_t iddp_write(struct rtdm_fd *fd, + const void *buf, size_t len) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iovec iov = { .iov_base = (void *)buf, .iov_len = len }; + struct iddp_socket *sk = priv->state; + + if (sk->peer.sipc_port < 0) + return -EDESTADDRREQ; + + return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer); +} + +static int __iddp_bind_socket(struct rtdm_fd *fd, + struct sockaddr_ipc *sa) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iddp_socket *sk = priv->state; + int ret = 0, port; + rtdm_lockctx_t s; + void *poolmem; + size_t poolsz; + + if (sa->sipc_family != AF_RTIPC) + return -EINVAL; + + if (sa->sipc_port < -1 || + sa->sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT) + return -EINVAL; + + cobalt_atomic_enter(s); + if (test_bit(_IDDP_BOUND, &sk->status) || + __test_and_set_bit(_IDDP_BINDING, &sk->status)) + ret = -EADDRINUSE; + cobalt_atomic_leave(s); + if (ret) + return ret; + + /* Will auto-select a free port number if unspec (-1). */ + port = sa->sipc_port; + cobalt_atomic_enter(s); + port = xnmap_enter(portmap, port, fd); + cobalt_atomic_leave(s); + if (port < 0) + return port == -EEXIST ? -EADDRINUSE : -ENOMEM; + + sa->sipc_port = port; + + /* + * Allocate a local buffer pool if we were told to do so via + * setsockopt() before we got there. + */ + poolsz = sk->poolsz; + if (poolsz > 0) { + poolsz = PAGE_ALIGN(poolsz); + poolmem = xnheap_vmalloc(poolsz); + if (poolmem == NULL) { + ret = -ENOMEM; + goto fail; + } + + ret = xnheap_init(&sk->privpool, poolmem, poolsz); + if (ret) { + xnheap_vfree(poolmem); + goto fail; + } + xnheap_set_name(&sk->privpool, "iddp-pool@%d", port); + sk->poolwaitq = &sk->privwaitq; + sk->bufpool = &sk->privpool; + } + + sk->name = *sa; + /* Set default destination if unset at binding time. */ + if (sk->peer.sipc_port < 0) + sk->peer = *sa; + + if (*sk->label) { + ret = xnregistry_enter(sk->label, sk, + &sk->handle, &__iddp_pnode.node); + if (ret) { + if (poolsz > 0) { + xnheap_destroy(&sk->privpool); + xnheap_vfree(poolmem); + } + goto fail; + } + } + + cobalt_atomic_enter(s); + __clear_bit(_IDDP_BINDING, &sk->status); + __set_bit(_IDDP_BOUND, &sk->status); + if (xnselect_signal(&priv->send_block, POLLOUT)) + xnsched_run(); + cobalt_atomic_leave(s); + + return 0; +fail: + xnmap_remove(portmap, port); + clear_bit(_IDDP_BINDING, &sk->status); + + return ret; +} + +static int __iddp_connect_socket(struct iddp_socket *sk, + struct sockaddr_ipc *sa) +{ + struct sockaddr_ipc _sa; + struct iddp_socket *rsk; + int ret, resched = 0; + rtdm_lockctx_t s; + xnhandle_t h; + + if (sa == NULL) { + _sa = nullsa; + sa = &_sa; + goto set_assoc; + } + + if (sa->sipc_family != AF_RTIPC) + return -EINVAL; + + if (sa->sipc_port < -1 || + sa->sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT) + return -EINVAL; + /* + * - If a valid sipc_port is passed in the [0..NRPORT-1] range, + * it is used verbatim and the connection succeeds + * immediately, regardless of whether the destination is + * bound at the time of the call. + * + * - If sipc_port is -1 and a label was set via IDDP_LABEL, + * connect() blocks for the requested amount of time (see + * SO_RCVTIMEO) until a socket is bound to the same label. + * + * - If sipc_port is -1 and no label is given, the default + * destination address is cleared, meaning that any subsequent + * write() to the socket will return -EDESTADDRREQ, until a + * valid destination address is set via connect() or bind(). + * + * - In all other cases, -EINVAL is returned. + */ + if (sa->sipc_port < 0 && *sk->label) { + ret = xnregistry_bind(sk->label, + sk->rx_timeout, XN_RELATIVE, &h); + if (ret) + return ret; + + cobalt_atomic_enter(s); + rsk = xnregistry_lookup(h, NULL); + if (rsk == NULL || rsk->magic != IDDP_SOCKET_MAGIC) + ret = -EINVAL; + else { + /* Fetch labeled port number. */ + sa->sipc_port = rsk->name.sipc_port; + resched = xnselect_signal(&sk->priv->send_block, POLLOUT); + } + cobalt_atomic_leave(s); + if (ret) + return ret; + } else if (sa->sipc_port < 0) + sa = &nullsa; +set_assoc: + cobalt_atomic_enter(s); + if (!test_bit(_IDDP_BOUND, &sk->status)) + /* Set default name. */ + sk->name = *sa; + /* Set default destination. */ + sk->peer = *sa; + if (sa->sipc_port < 0) + __clear_bit(_IDDP_CONNECTED, &sk->status); + else + __set_bit(_IDDP_CONNECTED, &sk->status); + if (resched) + xnsched_run(); + cobalt_atomic_leave(s); + + return 0; +} + +static int __iddp_setsockopt(struct iddp_socket *sk, + struct rtdm_fd *fd, + void *arg) +{ + struct _rtdm_setsockopt_args sopt; + struct rtipc_port_label plabel; + struct __kernel_old_timeval tv; + rtdm_lockctx_t s; + size_t len; + int ret; + + ret = rtipc_get_sockoptin(fd, &sopt, arg); + if (ret) + return ret; + + if (sopt.level == SOL_SOCKET) { + switch (sopt.optname) { + + case SO_RCVTIMEO_OLD: + ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen); + if (ret) + return ret; + sk->rx_timeout = rtipc_timeval_to_ns(&tv); + break; + + case SO_SNDTIMEO_OLD: + ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen); + if (ret) + return ret; + sk->tx_timeout = rtipc_timeval_to_ns(&tv); + break; + + default: + ret = -EINVAL; + } + + return ret; + } + + if (sopt.level != SOL_IDDP) + return -ENOPROTOOPT; + + switch (sopt.optname) { + + case IDDP_POOLSZ: + ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen); + if (ret) + return ret; + if (len == 0) + return -EINVAL; + cobalt_atomic_enter(s); + /* + * We may not do this more than once, and we have to + * do this before the first binding. + */ + if (test_bit(_IDDP_BOUND, &sk->status) || + test_bit(_IDDP_BINDING, &sk->status)) + ret = -EALREADY; + else + sk->poolsz = len; + cobalt_atomic_leave(s); + break; + + case IDDP_LABEL: + if (sopt.optlen < sizeof(plabel)) + return -EINVAL; + if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel))) + return -EFAULT; + cobalt_atomic_enter(s); + /* + * We may attach a label to a client socket which was + * previously bound in IDDP. + */ + if (test_bit(_IDDP_BINDING, &sk->status)) + ret = -EALREADY; + else { + strcpy(sk->label, plabel.label); + sk->label[XNOBJECT_NAME_LEN-1] = 0; + } + cobalt_atomic_leave(s); + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int __iddp_getsockopt(struct iddp_socket *sk, + struct rtdm_fd *fd, + void *arg) +{ + struct _rtdm_getsockopt_args sopt; + struct rtipc_port_label plabel; + struct __kernel_old_timeval tv; + rtdm_lockctx_t s; + socklen_t len; + int ret; + + ret = rtipc_get_sockoptout(fd, &sopt, arg); + if (ret) + return ret; + + ret = rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len)); + if (ret) + return ret; + + if (sopt.level == SOL_SOCKET) { + switch (sopt.optname) { + + case SO_RCVTIMEO_OLD: + rtipc_ns_to_timeval(&tv, sk->rx_timeout); + ret = rtipc_put_timeval(fd, sopt.optval, &tv, len); + if (ret) + return ret; + break; + + case SO_SNDTIMEO_OLD: + rtipc_ns_to_timeval(&tv, sk->tx_timeout); + ret = rtipc_put_timeval(fd, sopt.optval, &tv, len); + if (ret) + return ret; + break; + + default: + ret = -EINVAL; + } + + return ret; + } + + if (sopt.level != SOL_IDDP) + return -ENOPROTOOPT; + + switch (sopt.optname) { + + case IDDP_LABEL: + if (len < sizeof(plabel)) + return -EINVAL; + cobalt_atomic_enter(s); + strcpy(plabel.label, sk->label); + cobalt_atomic_leave(s); + if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel))) + return -EFAULT; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int __iddp_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct sockaddr_ipc saddr, *saddrp = &saddr; + struct iddp_socket *sk = priv->state; + int ret = 0; + + switch (request) { + + COMPAT_CASE(_RTIOC_CONNECT): + ret = rtipc_get_sockaddr(fd, &saddrp, arg); + if (ret) + return ret; + ret = __iddp_connect_socket(sk, saddrp); + break; + + COMPAT_CASE(_RTIOC_BIND): + ret = rtipc_get_sockaddr(fd, &saddrp, arg); + if (ret) + return ret; + if (saddrp == NULL) + return -EFAULT; + ret = __iddp_bind_socket(fd, saddrp); + break; + + COMPAT_CASE(_RTIOC_GETSOCKNAME): + ret = rtipc_put_sockaddr(fd, arg, &sk->name); + break; + + COMPAT_CASE(_RTIOC_GETPEERNAME): + ret = rtipc_put_sockaddr(fd, arg, &sk->peer); + break; + + COMPAT_CASE(_RTIOC_SETSOCKOPT): + ret = __iddp_setsockopt(sk, fd, arg); + break; + + COMPAT_CASE(_RTIOC_GETSOCKOPT): + ret = __iddp_getsockopt(sk, fd, arg); + break; + + case _RTIOC_LISTEN: + COMPAT_CASE(_RTIOC_ACCEPT): + ret = -EOPNOTSUPP; + break; + + case _RTIOC_SHUTDOWN: + ret = -ENOTCONN; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int iddp_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + int ret; + + switch (request) { + COMPAT_CASE(_RTIOC_BIND): + if (rtdm_in_rt_context()) + return -ENOSYS; /* Try downgrading to NRT */ + fallthrough; + default: + ret = __iddp_ioctl(fd, request, arg); + } + + return ret; +} + +static int iddp_init(void) +{ + portmap = xnmap_create(CONFIG_XENO_OPT_IDDP_NRPORT, 0, 0); + if (portmap == NULL) + return -ENOMEM; + + rtdm_waitqueue_init(&poolwaitq); + + return 0; +} + +static void iddp_exit(void) +{ + rtdm_waitqueue_destroy(&poolwaitq); + xnmap_delete(portmap); +} + +static unsigned int iddp_pollstate(struct rtdm_fd *fd) /* atomic */ +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iddp_socket *sk = priv->state; + unsigned int mask = 0; + struct rtdm_fd *rfd; + + if (test_bit(_IDDP_BOUND, &sk->status) && !list_empty(&sk->inq)) + mask |= POLLIN; + + /* + * If the socket is connected, POLLOUT means that the peer + * exists. Otherwise POLLOUT is always set, assuming the + * client is likely to use explicit addressing in send + * operations. + * + * If the peer exists, we still can't really know whether + * writing to the socket would block as it depends on the + * message size and other highly dynamic factors, so pretend + * it would not. + */ + if (test_bit(_IDDP_CONNECTED, &sk->status)) { + rfd = xnmap_fetch_nocheck(portmap, sk->peer.sipc_port); + if (rfd) + mask |= POLLOUT; + } else + mask |= POLLOUT; + + return mask; +} + +struct rtipc_protocol iddp_proto_driver = { + .proto_name = "iddp", + .proto_statesz = sizeof(struct iddp_socket), + .proto_init = iddp_init, + .proto_exit = iddp_exit, + .proto_ops = { + .socket = iddp_socket, + .close = iddp_close, + .recvmsg = iddp_recvmsg, + .sendmsg = iddp_sendmsg, + .read = iddp_read, + .write = iddp_write, + .ioctl = iddp_ioctl, + .pollstate = iddp_pollstate, + } +}; diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h new file mode 100644 index 0000000..919a5d9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h @@ -0,0 +1,135 @@ +/** + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTIPC_INTERNAL_H +#define _RTIPC_INTERNAL_H + +#include <linux/uio.h> +#include <linux/time.h> +#include <cobalt/kernel/registry.h> +#include <cobalt/kernel/clock.h> +#include <cobalt/kernel/select.h> +#include <rtdm/rtdm.h> +#include <rtdm/compat.h> +#include <rtdm/driver.h> + +struct rtipc_protocol; + +struct rtipc_private { + struct rtipc_protocol *proto; + DECLARE_XNSELECT(send_block); + DECLARE_XNSELECT(recv_block); + void *state; +}; + +struct rtipc_protocol { + const char *proto_name; + int proto_statesz; + int (*proto_init)(void); + void (*proto_exit)(void); + struct { + int (*socket)(struct rtdm_fd *fd); + void (*close)(struct rtdm_fd *fd); + ssize_t (*recvmsg)(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags); + ssize_t (*sendmsg)(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags); + ssize_t (*read)(struct rtdm_fd *fd, + void *buf, size_t len); + ssize_t (*write)(struct rtdm_fd *fd, + const void *buf, size_t len); + int (*ioctl)(struct rtdm_fd *fd, + unsigned int request, void *arg); + unsigned int (*pollstate)(struct rtdm_fd *fd); + } proto_ops; +}; + +static inline void *rtipc_fd_to_state(struct rtdm_fd *fd) +{ + struct rtipc_private *p = rtdm_fd_to_private(fd); + return p->state; +} + +static inline nanosecs_rel_t rtipc_timeval_to_ns(const struct __kernel_old_timeval *tv) +{ + nanosecs_rel_t ns = tv->tv_usec * 1000; + + if (tv->tv_sec) + ns += (nanosecs_rel_t)tv->tv_sec * 1000000000UL; + + return ns; +} + +static inline void rtipc_ns_to_timeval(struct __kernel_old_timeval *tv, nanosecs_rel_t ns) +{ + unsigned long nsecs; + + tv->tv_sec = xnclock_divrem_billion(ns, &nsecs); + tv->tv_usec = nsecs / 1000; +} + +int rtipc_get_sockaddr(struct rtdm_fd *fd, + struct sockaddr_ipc **saddrp, + const void *arg); + +int rtipc_put_sockaddr(struct rtdm_fd *fd, void *arg, + const struct sockaddr_ipc *saddr); + +int rtipc_get_sockoptout(struct rtdm_fd *fd, + struct _rtdm_getsockopt_args *sopt, + const void *arg); + +int rtipc_put_sockoptout(struct rtdm_fd *fd, void *arg, + const struct _rtdm_getsockopt_args *sopt); + +int rtipc_get_sockoptin(struct rtdm_fd *fd, + struct _rtdm_setsockopt_args *sopt, + const void *arg); + +int rtipc_get_timeval(struct rtdm_fd *fd, struct __kernel_old_timeval *tv, + const void *arg, size_t arglen); + +int rtipc_put_timeval(struct rtdm_fd *fd, void *arg, + const struct __kernel_old_timeval *tv, size_t arglen); + +int rtipc_get_length(struct rtdm_fd *fd, size_t *lenp, + const void *arg, size_t arglen); + +int rtipc_get_arg(struct rtdm_fd *fd, void *dst, const void *src, + size_t len); + +int rtipc_put_arg(struct rtdm_fd *fd, void *dst, const void *src, + size_t len); + +extern struct rtipc_protocol xddp_proto_driver; + +extern struct rtipc_protocol iddp_proto_driver; + +extern struct rtipc_protocol bufp_proto_driver; + +extern struct xnptree rtipc_ptree; + +#define rtipc_wait_context xnthread_wait_context +#define rtipc_prepare_wait xnthread_prepare_wait +#define rtipc_get_wait_context xnthread_get_wait_context +#define rtipc_peek_wait_head(obj) xnsynch_peek_pendq(&(obj)->synch_base) + +#define COMPAT_CASE(__op) case __op __COMPAT_CASE(__op ## _COMPAT) + +#endif /* !_RTIPC_INTERNAL_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c new file mode 100644 index 0000000..abb7681 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c @@ -0,0 +1,524 @@ +/** + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/time.h> +#include <rtdm/ipc.h> +#include <rtdm/compat.h> +#include "internal.h" + +MODULE_DESCRIPTION("Real-time IPC interface"); +MODULE_AUTHOR("Philippe Gerum <rpm@xenomai.org>"); +MODULE_LICENSE("GPL"); + +static struct rtipc_protocol *protocols[IPCPROTO_MAX] = { +#ifdef CONFIG_XENO_DRIVERS_RTIPC_XDDP + [IPCPROTO_XDDP - 1] = &xddp_proto_driver, +#endif +#ifdef CONFIG_XENO_DRIVERS_RTIPC_IDDP + [IPCPROTO_IDDP - 1] = &iddp_proto_driver, +#endif +#ifdef CONFIG_XENO_DRIVERS_RTIPC_BUFP + [IPCPROTO_BUFP - 1] = &bufp_proto_driver, +#endif +}; + +DEFINE_XNPTREE(rtipc_ptree, "rtipc"); + +int rtipc_get_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len) +{ + if (!rtdm_fd_is_user(fd)) { + memcpy(dst, src, len); + return 0; + } + + return rtdm_copy_from_user(fd, dst, src, len); +} + +int rtipc_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len) +{ + if (!rtdm_fd_is_user(fd)) { + memcpy(dst, src, len); + return 0; + } + + return rtdm_copy_to_user(fd, dst, src, len); +} + +int rtipc_get_sockaddr(struct rtdm_fd *fd, struct sockaddr_ipc **saddrp, + const void *arg) +{ + const struct _rtdm_setsockaddr_args *p; + struct _rtdm_setsockaddr_args sreq; + int ret; + + if (!rtdm_fd_is_user(fd)) { + p = arg; + if (p->addrlen > 0) { + if (p->addrlen != sizeof(**saddrp)) + return -EINVAL; + memcpy(*saddrp, p->addr, sizeof(**saddrp)); + } else { + if (p->addr) + return -EINVAL; + *saddrp = NULL; + } + return 0; + } + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_setsockaddr_args csreq; + ret = rtdm_safe_copy_from_user(fd, &csreq, arg, sizeof(csreq)); + if (ret) + return ret; + if (csreq.addrlen > 0) { + if (csreq.addrlen != sizeof(**saddrp)) + return -EINVAL; + return rtdm_safe_copy_from_user(fd, *saddrp, + compat_ptr(csreq.addr), + sizeof(**saddrp)); + } + if (csreq.addr) + return -EINVAL; + + *saddrp = NULL; + + return 0; + } +#endif + + ret = rtdm_safe_copy_from_user(fd, &sreq, arg, sizeof(sreq)); + if (ret) + return ret; + if (sreq.addrlen > 0) { + if (sreq.addrlen != sizeof(**saddrp)) + return -EINVAL; + return rtdm_safe_copy_from_user(fd, *saddrp, + sreq.addr, sizeof(**saddrp)); + } + if (sreq.addr) + return -EINVAL; + + *saddrp = NULL; + + return 0; +} + +int rtipc_put_sockaddr(struct rtdm_fd *fd, void *arg, + const struct sockaddr_ipc *saddr) +{ + const struct _rtdm_getsockaddr_args *p; + struct _rtdm_getsockaddr_args sreq; + socklen_t len; + int ret; + + if (!rtdm_fd_is_user(fd)) { + p = arg; + if (*p->addrlen < sizeof(*saddr)) + return -EINVAL; + memcpy(p->addr, saddr, sizeof(*saddr)); + *p->addrlen = sizeof(*saddr); + return 0; + } + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_getsockaddr_args csreq; + ret = rtdm_safe_copy_from_user(fd, &csreq, arg, sizeof(csreq)); + if (ret) + return ret; + + ret = rtdm_safe_copy_from_user(fd, &len, + compat_ptr(csreq.addrlen), + sizeof(len)); + if (ret) + return ret; + + if (len < sizeof(*saddr)) + return -EINVAL; + + ret = rtdm_safe_copy_to_user(fd, compat_ptr(csreq.addr), + saddr, sizeof(*saddr)); + if (ret) + return ret; + + len = sizeof(*saddr); + return rtdm_safe_copy_to_user(fd, compat_ptr(csreq.addrlen), + &len, sizeof(len)); + } +#endif + + sreq.addr = NULL; + sreq.addrlen = NULL; + ret = rtdm_safe_copy_from_user(fd, &sreq, arg, sizeof(sreq)); + if (ret) + return ret; + + ret = rtdm_safe_copy_from_user(fd, &len, sreq.addrlen, sizeof(len)); + if (ret) + return ret; + + if (len < sizeof(*saddr)) + return -EINVAL; + + ret = rtdm_safe_copy_to_user(fd, sreq.addr, saddr, sizeof(*saddr)); + if (ret) + return ret; + + len = sizeof(*saddr); + + return rtdm_safe_copy_to_user(fd, sreq.addrlen, &len, sizeof(len)); +} + +int rtipc_get_sockoptout(struct rtdm_fd *fd, struct _rtdm_getsockopt_args *sopt, + const void *arg) +{ + if (!rtdm_fd_is_user(fd)) { + *sopt = *(struct _rtdm_getsockopt_args *)arg; + return 0; + } + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_getsockopt_args csopt; + int ret; + ret = rtdm_safe_copy_from_user(fd, &csopt, arg, sizeof(csopt)); + if (ret) + return ret; + sopt->level = csopt.level; + sopt->optname = csopt.optname; + sopt->optval = compat_ptr(csopt.optval); + sopt->optlen = compat_ptr(csopt.optlen); + return 0; + } +#endif + + return rtdm_safe_copy_from_user(fd, sopt, arg, sizeof(*sopt)); +} + +int rtipc_put_sockoptout(struct rtdm_fd *fd, void *arg, + const struct _rtdm_getsockopt_args *sopt) +{ + if (!rtdm_fd_is_user(fd)) { + *(struct _rtdm_getsockopt_args *)arg = *sopt; + return 0; + } + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_getsockopt_args csopt; + int ret; + csopt.level = sopt->level; + csopt.optname = sopt->optname; + csopt.optval = ptr_to_compat(sopt->optval); + csopt.optlen = ptr_to_compat(sopt->optlen); + ret = rtdm_safe_copy_to_user(fd, arg, &csopt, sizeof(csopt)); + if (ret) + return ret; + return 0; + } +#endif + + return rtdm_safe_copy_to_user(fd, arg, sopt, sizeof(*sopt)); +} + +int rtipc_get_sockoptin(struct rtdm_fd *fd, struct _rtdm_setsockopt_args *sopt, + const void *arg) +{ + if (!rtdm_fd_is_user(fd)) { + *sopt = *(struct _rtdm_setsockopt_args *)arg; + return 0; + } + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + struct compat_rtdm_setsockopt_args csopt; + int ret; + ret = rtdm_safe_copy_from_user(fd, &csopt, arg, sizeof(csopt)); + if (ret) + return ret; + sopt->level = csopt.level; + sopt->optname = csopt.optname; + sopt->optval = compat_ptr(csopt.optval); + sopt->optlen = csopt.optlen; + return 0; + } +#endif + + return rtdm_safe_copy_from_user(fd, sopt, arg, sizeof(*sopt)); +} + +int rtipc_get_timeval(struct rtdm_fd *fd, struct __kernel_old_timeval *tv, + const void *arg, size_t arglen) +{ +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + if (arglen != sizeof(struct old_timeval32)) + return -EINVAL; + return sys32_get_timeval(tv, arg); + } +#endif + + if (arglen != sizeof(*tv)) + return -EINVAL; + + if (!rtdm_fd_is_user(fd)) { + *tv = *(struct __kernel_old_timeval *)arg; + return 0; + } + + return rtdm_safe_copy_from_user(fd, tv, arg, sizeof(*tv)); +} + +int rtipc_put_timeval(struct rtdm_fd *fd, void *arg, + const struct __kernel_old_timeval *tv, size_t arglen) +{ +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + if (arglen != sizeof(struct old_timeval32)) + return -EINVAL; + return sys32_put_timeval(arg, tv); + } +#endif + + if (arglen != sizeof(*tv)) + return -EINVAL; + + if (!rtdm_fd_is_user(fd)) { + *(struct __kernel_old_timeval *)arg = *tv; + return 0; + } + + return rtdm_safe_copy_to_user(fd, arg, tv, sizeof(*tv)); +} + +int rtipc_get_length(struct rtdm_fd *fd, size_t *lenp, + const void *arg, size_t arglen) +{ +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) { + const compat_size_t *csz; + if (arglen != sizeof(*csz)) + return -EINVAL; + csz = arg; + return csz == NULL || + !access_rok(csz, sizeof(*csz)) || + __xn_get_user(*lenp, csz) ? -EFAULT : 0; + } +#endif + + if (arglen != sizeof(size_t)) + return -EINVAL; + + if (!rtdm_fd_is_user(fd)) { + *lenp = *(size_t *)arg; + return 0; + } + + return rtdm_safe_copy_from_user(fd, lenp, arg, sizeof(*lenp)); +} + +static int rtipc_socket(struct rtdm_fd *fd, int protocol) +{ + struct rtipc_protocol *proto; + struct rtipc_private *priv; + int ret; + + if (protocol < 0 || protocol >= IPCPROTO_MAX) + return -EPROTONOSUPPORT; + + if (protocol == IPCPROTO_IPC) + /* Default protocol is IDDP */ + protocol = IPCPROTO_IDDP; + + proto = protocols[protocol - 1]; + if (proto == NULL) /* Not compiled in? */ + return -ENOPROTOOPT; + + priv = rtdm_fd_to_private(fd); + priv->proto = proto; + priv->state = kmalloc(proto->proto_statesz, GFP_KERNEL); + if (priv->state == NULL) + return -ENOMEM; + + xnselect_init(&priv->send_block); + xnselect_init(&priv->recv_block); + + ret = proto->proto_ops.socket(fd); + if (ret) + kfree(priv->state); + + return ret; +} + +static void rtipc_close(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + /* + * CAUTION: priv->state shall be released by the + * proto_ops.close() handler when appropriate (which may be + * done asynchronously later, see XDDP). + */ + priv->proto->proto_ops.close(fd); + xnselect_destroy(&priv->recv_block); + xnselect_destroy(&priv->send_block); +} + +static ssize_t rtipc_recvmsg(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + return priv->proto->proto_ops.recvmsg(fd, msg, flags); +} + +static ssize_t rtipc_sendmsg(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + return priv->proto->proto_ops.sendmsg(fd, msg, flags); +} + +static ssize_t rtipc_read(struct rtdm_fd *fd, + void *buf, size_t len) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + return priv->proto->proto_ops.read(fd, buf, len); +} + +static ssize_t rtipc_write(struct rtdm_fd *fd, + const void *buf, size_t len) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + return priv->proto->proto_ops.write(fd, buf, len); +} + +static int rtipc_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + return priv->proto->proto_ops.ioctl(fd, request, arg); +} + +static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector, + unsigned int type, unsigned int index) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct xnselect_binding *binding; + unsigned int pollstate, mask; + struct xnselect *block; + spl_t s; + int ret; + + if (type != XNSELECT_READ && type != XNSELECT_WRITE) + return -EINVAL; + + binding = xnmalloc(sizeof(*binding)); + if (binding == NULL) + return -ENOMEM; + + cobalt_atomic_enter(s); + + pollstate = priv->proto->proto_ops.pollstate(fd); + + if (type == XNSELECT_READ) { + mask = pollstate & POLLIN; + block = &priv->recv_block; + } else { + mask = pollstate & POLLOUT; + block = &priv->send_block; + } + + ret = xnselect_bind(block, binding, selector, type, index, mask); + + cobalt_atomic_leave(s); + + if (ret) + xnfree(binding); + + return ret; +} + +static struct rtdm_driver rtipc_driver = { + .profile_info = RTDM_PROFILE_INFO(rtipc, + RTDM_CLASS_RTIPC, + RTDM_SUBCLASS_GENERIC, + 1), + .device_flags = RTDM_PROTOCOL_DEVICE, + .device_count = 1, + .context_size = sizeof(struct rtipc_private), + .protocol_family = PF_RTIPC, + .socket_type = SOCK_DGRAM, + .ops = { + .socket = rtipc_socket, + .close = rtipc_close, + .recvmsg_rt = rtipc_recvmsg, + .recvmsg_nrt = NULL, + .sendmsg_rt = rtipc_sendmsg, + .sendmsg_nrt = NULL, + .ioctl_rt = rtipc_ioctl, + .ioctl_nrt = rtipc_ioctl, + .read_rt = rtipc_read, + .read_nrt = NULL, + .write_rt = rtipc_write, + .write_nrt = NULL, + .select = rtipc_select, + }, +}; + +static struct rtdm_device device = { + .driver = &rtipc_driver, + .label = "rtipc", +}; + +int __init __rtipc_init(void) +{ + int ret, n; + + if (!rtdm_available()) + return -ENOSYS; + + for (n = 0; n < IPCPROTO_MAX; n++) { + if (protocols[n] && protocols[n]->proto_init) { + ret = protocols[n]->proto_init(); + if (ret) + return ret; + } + } + + return rtdm_dev_register(&device); +} + +void __exit __rtipc_exit(void) +{ + int n; + + rtdm_dev_unregister(&device); + + for (n = 0; n < IPCPROTO_MAX; n++) { + if (protocols[n] && protocols[n]->proto_exit) + protocols[n]->proto_exit(); + } +} + +module_init(__rtipc_init); +module_exit(__rtipc_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c new file mode 100644 index 0000000..ae5b720 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c @@ -0,0 +1,1132 @@ +/** + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/string.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <cobalt/kernel/heap.h> +#include <cobalt/kernel/bufd.h> +#include <cobalt/kernel/pipe.h> +#include <rtdm/ipc.h> +#include "internal.h" + +#define XDDP_SOCKET_MAGIC 0xa21a21a2 + +struct xddp_message { + struct xnpipe_mh mh; + char data[]; +}; + +struct xddp_socket { + int magic; + struct sockaddr_ipc name; + struct sockaddr_ipc peer; + + int minor; + size_t poolsz; + xnhandle_t handle; + char label[XNOBJECT_NAME_LEN]; + struct rtdm_fd *fd; /* i.e. RTDM socket fd */ + + struct xddp_message *buffer; + int buffer_port; + struct xnheap *bufpool; + struct xnheap privpool; + size_t fillsz; + size_t curbufsz; /* Current streaming buffer size */ + u_long status; + rtdm_lock_t lock; + + nanosecs_rel_t timeout; /* connect()/recvmsg() timeout */ + size_t reqbufsz; /* Requested streaming buffer size */ + + int (*monitor)(struct rtdm_fd *fd, int event, long arg); + struct rtipc_private *priv; +}; + +static struct sockaddr_ipc nullsa = { + .sipc_family = AF_RTIPC, + .sipc_port = -1 +}; + +static struct rtdm_fd *portmap[CONFIG_XENO_OPT_PIPE_NRDEV]; /* indexes RTDM fildes */ + +#define _XDDP_SYNCWAIT 0 +#define _XDDP_ATOMIC 1 +#define _XDDP_BINDING 2 +#define _XDDP_BOUND 3 +#define _XDDP_CONNECTED 4 + +#ifdef CONFIG_XENO_OPT_VFILE + +static char *__xddp_link_target(void *obj) +{ + struct xddp_socket *sk = obj; + + return kasformat("/dev/rtp%d", sk->minor); +} + +extern struct xnptree rtipc_ptree; + +static struct xnpnode_link __xddp_pnode = { + .node = { + .dirname = "xddp", + .root = &rtipc_ptree, + .ops = &xnregistry_vlink_ops, + }, + .target = __xddp_link_target, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __xddp_pnode = { + .node = { + .dirname = "xddp", + }, +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +static void *__xddp_alloc_handler(size_t size, void *skarg) /* nklock free */ +{ + struct xddp_socket *sk = skarg; + void *buf; + + /* Try to allocate memory for the incoming message. */ + buf = xnheap_alloc(sk->bufpool, size); + if (unlikely(buf == NULL)) { + if (sk->monitor) + sk->monitor(sk->fd, XDDP_EVTNOBUF, size); + if (size > xnheap_get_size(sk->bufpool)) + buf = (void *)-1; /* Will never succeed. */ + } + + return buf; +} + +static int __xddp_resize_streambuf(struct xddp_socket *sk) /* sk->lock held */ +{ + if (sk->buffer) + xnheap_free(sk->bufpool, sk->buffer); + + if (sk->reqbufsz == 0) { + sk->buffer = NULL; + sk->curbufsz = 0; + return 0; + } + + sk->buffer = xnheap_alloc(sk->bufpool, sk->reqbufsz); + if (sk->buffer == NULL) { + sk->curbufsz = 0; + return -ENOMEM; + } + + sk->curbufsz = sk->reqbufsz; + + return 0; +} + +static void __xddp_free_handler(void *buf, void *skarg) /* nklock free */ +{ + struct xddp_socket *sk = skarg; + rtdm_lockctx_t s; + + if (buf != sk->buffer) { + xnheap_free(sk->bufpool, buf); + return; + } + + /* Reset the streaming buffer. */ + + rtdm_lock_get_irqsave(&sk->lock, s); + + sk->fillsz = 0; + sk->buffer_port = -1; + __clear_bit(_XDDP_SYNCWAIT, &sk->status); + __clear_bit(_XDDP_ATOMIC, &sk->status); + + /* + * If a XDDP_BUFSZ request is pending, resize the streaming + * buffer on-the-fly. + */ + if (unlikely(sk->curbufsz != sk->reqbufsz)) + __xddp_resize_streambuf(sk); + + rtdm_lock_put_irqrestore(&sk->lock, s); +} + +static void __xddp_output_handler(struct xnpipe_mh *mh, void *skarg) /* nklock held */ +{ + struct xddp_socket *sk = skarg; + + if (sk->monitor) + sk->monitor(sk->fd, XDDP_EVTOUT, xnpipe_m_size(mh)); +} + +static int __xddp_input_handler(struct xnpipe_mh *mh, int retval, void *skarg) /* nklock held */ +{ + struct xddp_socket *sk = skarg; + + if (sk->monitor) { + if (retval == 0) + /* Callee may alter the return value passed to userland. */ + retval = sk->monitor(sk->fd, XDDP_EVTIN, xnpipe_m_size(mh)); + else if (retval == -EPIPE && mh == NULL) + sk->monitor(sk->fd, XDDP_EVTDOWN, 0); + } + + if (retval == 0 && + (__xnpipe_pollstate(sk->minor) & POLLIN) != 0 && + xnselect_signal(&sk->priv->recv_block, POLLIN)) + xnsched_run(); + + return retval; +} + +static void __xddp_release_handler(void *skarg) /* nklock free */ +{ + struct xddp_socket *sk = skarg; + void *poolmem; + u32 poolsz; + + if (sk->bufpool == &sk->privpool) { + poolmem = xnheap_get_membase(&sk->privpool); + poolsz = xnheap_get_size(&sk->privpool); + xnheap_destroy(&sk->privpool); + xnheap_vfree(poolmem); + } else if (sk->buffer) + xnfree(sk->buffer); + + kfree(sk); +} + +static int xddp_socket(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct xddp_socket *sk = priv->state; + + sk->magic = XDDP_SOCKET_MAGIC; + sk->name = nullsa; /* Unbound */ + sk->peer = nullsa; + sk->minor = -1; + sk->handle = 0; + *sk->label = 0; + sk->poolsz = 0; + sk->buffer = NULL; + sk->buffer_port = -1; + sk->bufpool = NULL; + sk->fillsz = 0; + sk->status = 0; + sk->timeout = RTDM_TIMEOUT_INFINITE; + sk->curbufsz = 0; + sk->reqbufsz = 0; + sk->monitor = NULL; + rtdm_lock_init(&sk->lock); + sk->priv = priv; + + return 0; +} + +static void xddp_close(struct rtdm_fd *fd) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct xddp_socket *sk = priv->state; + rtdm_lockctx_t s; + + sk->monitor = NULL; + + if (!test_bit(_XDDP_BOUND, &sk->status)) + return; + + cobalt_atomic_enter(s); + portmap[sk->name.sipc_port] = NULL; + cobalt_atomic_leave(s); + + if (sk->handle) + xnregistry_remove(sk->handle); + + xnpipe_disconnect(sk->minor); +} + +static ssize_t __xddp_recvmsg(struct rtdm_fd *fd, + struct iovec *iov, int iovlen, int flags, + struct sockaddr_ipc *saddr) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct xddp_message *mbuf = NULL; /* Fake GCC */ + struct xddp_socket *sk = priv->state; + ssize_t maxlen, len, wrlen, vlen; + nanosecs_rel_t timeout; + struct xnpipe_mh *mh; + int nvec, rdoff, ret; + struct xnbufd bufd; + spl_t s; + + if (!test_bit(_XDDP_BOUND, &sk->status)) + return -EAGAIN; + + maxlen = rtdm_get_iov_flatlen(iov, iovlen); + if (maxlen == 0) + return 0; + + timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sk->timeout; + /* Pull heading message from the input queue. */ + len = xnpipe_recv(sk->minor, &mh, timeout); + if (len < 0) + return len == -EIDRM ? 0 : len; + if (len > maxlen) { + ret = -ENOBUFS; + goto out; + } + + mbuf = container_of(mh, struct xddp_message, mh); + + if (saddr) + *saddr = sk->name; + + /* Write "len" bytes from mbuf->data to the vector cells */ + for (ret = 0, nvec = 0, rdoff = 0, wrlen = len; + nvec < iovlen && wrlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + goto out; + iov[nvec].iov_base += vlen; + iov[nvec].iov_len -= vlen; + wrlen -= vlen; + rdoff += vlen; + } +out: + xnheap_free(sk->bufpool, mbuf); + cobalt_atomic_enter(s); + if ((__xnpipe_pollstate(sk->minor) & POLLIN) == 0 && + xnselect_signal(&priv->recv_block, 0)) + xnsched_run(); + cobalt_atomic_leave(s); + + return ret ?: len; +} + +static ssize_t xddp_recvmsg(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags) +{ + struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov; + struct sockaddr_ipc saddr; + ssize_t ret; + + if (flags & ~MSG_DONTWAIT) + return -EINVAL; + + if (msg->msg_name) { + if (msg->msg_namelen < sizeof(struct sockaddr_ipc)) + return -EINVAL; + } else if (msg->msg_namelen != 0) + return -EINVAL; + + if (msg->msg_iovlen >= UIO_MAXIOV) + return -EINVAL; + + /* Copy I/O vector in */ + ret = rtdm_get_iovec(fd, &iov, msg, iov_fast); + if (ret) + return ret; + + ret = __xddp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr); + if (ret <= 0) { + rtdm_drop_iovec(iov, iov_fast); + return ret; + } + + /* Copy the updated I/O vector back */ + if (rtdm_put_iovec(fd, iov, msg, iov_fast)) + return -EFAULT; + + /* Copy the source address if required. */ + if (msg->msg_name) { + if (rtipc_put_arg(fd, msg->msg_name, &saddr, sizeof(saddr))) + return -EFAULT; + msg->msg_namelen = sizeof(struct sockaddr_ipc); + } + + return ret; +} + +static ssize_t xddp_read(struct rtdm_fd *fd, void *buf, size_t len) +{ + struct iovec iov = { .iov_base = buf, .iov_len = len }; + + return __xddp_recvmsg(fd, &iov, 1, 0, NULL); +} + +static ssize_t __xddp_stream(struct xddp_socket *sk, + int from, struct xnbufd *bufd) +{ + struct xddp_message *mbuf; + size_t fillptr, rembytes; + rtdm_lockctx_t s; + ssize_t outbytes; + int ret; + + /* + * xnpipe_msend() and xnpipe_mfixup() routines will only grab + * the nklock directly or indirectly, so holding our socket + * lock across those calls is fine. + */ + rtdm_lock_get_irqsave(&sk->lock, s); + + /* + * There are two cases in which we must remove the cork + * unconditionally and send the incoming data as a standalone + * datagram: the destination port does not support streaming, + * or its streaming buffer is already filled with data issued + * from another port. + */ + if (sk->curbufsz == 0 || + (sk->buffer_port >= 0 && sk->buffer_port != from)) { + /* This will end up into a standalone datagram. */ + outbytes = 0; + goto out; + } + + mbuf = sk->buffer; + rembytes = sk->curbufsz - sizeof(*mbuf) - sk->fillsz; + outbytes = bufd->b_len > rembytes ? rembytes : bufd->b_len; + if (likely(outbytes > 0)) { + repeat: + /* Mark the beginning of a should-be-atomic section. */ + __set_bit(_XDDP_ATOMIC, &sk->status); + fillptr = sk->fillsz; + sk->fillsz += outbytes; + + rtdm_lock_put_irqrestore(&sk->lock, s); + ret = xnbufd_copy_to_kmem(mbuf->data + fillptr, + bufd, outbytes); + rtdm_lock_get_irqsave(&sk->lock, s); + + if (ret < 0) { + outbytes = ret; + __clear_bit(_XDDP_ATOMIC, &sk->status); + goto out; + } + + /* We haven't been atomic, let's try again. */ + if (!__test_and_clear_bit(_XDDP_ATOMIC, &sk->status)) + goto repeat; + + if (__test_and_set_bit(_XDDP_SYNCWAIT, &sk->status)) + outbytes = xnpipe_mfixup(sk->minor, + &mbuf->mh, outbytes); + else { + sk->buffer_port = from; + outbytes = xnpipe_send(sk->minor, &mbuf->mh, + outbytes + sizeof(*mbuf), + XNPIPE_NORMAL); + if (outbytes > 0) + outbytes -= sizeof(*mbuf); + } + } + +out: + rtdm_lock_put_irqrestore(&sk->lock, s); + + return outbytes; +} + +static ssize_t __xddp_sendmsg(struct rtdm_fd *fd, + struct iovec *iov, int iovlen, int flags, + const struct sockaddr_ipc *daddr) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + ssize_t len, rdlen, wrlen, vlen, ret, sublen; + struct xddp_socket *sk = priv->state; + struct xddp_message *mbuf; + struct xddp_socket *rsk; + struct rtdm_fd *rfd; + int nvec, to, from; + struct xnbufd bufd; + rtdm_lockctx_t s; + + len = rtdm_get_iov_flatlen(iov, iovlen); + if (len == 0) + return 0; + + from = sk->name.sipc_port; + to = daddr->sipc_port; + + cobalt_atomic_enter(s); + rfd = portmap[to]; + if (rfd && rtdm_fd_lock(rfd) < 0) + rfd = NULL; + cobalt_atomic_leave(s); + + if (rfd == NULL) + return -ECONNRESET; + + rsk = rtipc_fd_to_state(rfd); + if (!test_bit(_XDDP_BOUND, &rsk->status)) { + rtdm_fd_unlock(rfd); + return -ECONNREFUSED; + } + + sublen = len; + nvec = 0; + + /* + * If active, the streaming buffer is already pending on the + * output queue, so we basically have nothing to do during a + * MSG_MORE -> MSG_NONE transition. Therefore, we only have to + * take care of filling that buffer when MSG_MORE is + * given. Yummie. + */ + if (flags & MSG_MORE) { + for (rdlen = sublen, wrlen = 0; + nvec < iovlen && rdlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = __xddp_stream(rsk, from, &bufd); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = __xddp_stream(rsk, from, &bufd); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + goto fail_unlock; + wrlen += ret; + rdlen -= ret; + iov[nvec].iov_base += ret; + iov[nvec].iov_len -= ret; + /* + * In case of a short write to the streaming + * buffer, send the unsent part as a + * standalone datagram. + */ + if (ret < vlen) { + sublen = rdlen; + goto nostream; + } + } + len = wrlen; + goto done; + } + +nostream: + mbuf = xnheap_alloc(rsk->bufpool, sublen + sizeof(*mbuf)); + if (unlikely(mbuf == NULL)) { + ret = -ENOMEM; + goto fail_unlock; + } + + /* + * Move "sublen" bytes to mbuf->data from the vector cells + */ + for (rdlen = sublen, wrlen = 0; nvec < iovlen && rdlen > 0; nvec++) { + if (iov[nvec].iov_len == 0) + continue; + vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen; + if (rtdm_fd_is_user(fd)) { + xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_to_kmem(mbuf->data + wrlen, &bufd, vlen); + xnbufd_unmap_uread(&bufd); + } else { + xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen); + ret = xnbufd_copy_to_kmem(mbuf->data + wrlen, &bufd, vlen); + xnbufd_unmap_kread(&bufd); + } + if (ret < 0) + goto fail_freebuf; + iov[nvec].iov_base += vlen; + iov[nvec].iov_len -= vlen; + rdlen -= vlen; + wrlen += vlen; + } + + ret = xnpipe_send(rsk->minor, &mbuf->mh, + sublen + sizeof(*mbuf), + (flags & MSG_OOB) ? + XNPIPE_URGENT : XNPIPE_NORMAL); + + if (unlikely(ret < 0)) { + fail_freebuf: + xnheap_free(rsk->bufpool, mbuf); + fail_unlock: + rtdm_fd_unlock(rfd); + return ret; + } +done: + rtdm_fd_unlock(rfd); + + return len; +} + +static ssize_t xddp_sendmsg(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov; + struct xddp_socket *sk = priv->state; + struct sockaddr_ipc daddr; + ssize_t ret; + + /* + * We accept MSG_DONTWAIT, but do not care about it, since + * writing to the real-time endpoint of a message pipe must be + * a non-blocking operation. + */ + if (flags & ~(MSG_MORE | MSG_OOB | MSG_DONTWAIT)) + return -EINVAL; + + /* + * MSG_MORE and MSG_OOB are mutually exclusive in our + * implementation. + */ + if ((flags & (MSG_MORE | MSG_OOB)) == (MSG_MORE | MSG_OOB)) + return -EINVAL; + + if (msg->msg_name) { + if (msg->msg_namelen != sizeof(struct sockaddr_ipc)) + return -EINVAL; + + /* Fetch the destination address to send to. */ + if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr))) + return -EFAULT; + + if (daddr.sipc_port < 0 || + daddr.sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV) + return -EINVAL; + } else { + if (msg->msg_namelen != 0) + return -EINVAL; + daddr = sk->peer; + if (daddr.sipc_port < 0) + return -EDESTADDRREQ; + } + + if (msg->msg_iovlen >= UIO_MAXIOV) + return -EINVAL; + + /* Copy I/O vector in */ + ret = rtdm_get_iovec(fd, &iov, msg, iov_fast); + if (ret) + return ret; + + ret = __xddp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr); + if (ret <= 0) { + rtdm_drop_iovec(iov, iov_fast); + return ret; + } + + /* Copy updated I/O vector back */ + return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret; +} + +static ssize_t xddp_write(struct rtdm_fd *fd, + const void *buf, size_t len) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct iovec iov = { .iov_base = (void *)buf, .iov_len = len }; + struct xddp_socket *sk = priv->state; + + if (sk->peer.sipc_port < 0) + return -EDESTADDRREQ; + + return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer); +} + +static int __xddp_bind_socket(struct rtipc_private *priv, + struct sockaddr_ipc *sa) +{ + struct xddp_socket *sk = priv->state; + struct xnpipe_operations ops; + rtdm_lockctx_t s; + size_t poolsz; + void *poolmem; + int ret = 0; + + if (sa->sipc_family != AF_RTIPC) + return -EINVAL; + + /* Allow special port -1 for auto-selection. */ + if (sa->sipc_port < -1 || + sa->sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV) + return -EINVAL; + + cobalt_atomic_enter(s); + if (test_bit(_XDDP_BOUND, &sk->status) || + __test_and_set_bit(_XDDP_BINDING, &sk->status)) + ret = -EADDRINUSE; + cobalt_atomic_leave(s); + if (ret) + return ret; + + poolsz = sk->poolsz; + if (poolsz > 0) { + poolsz = PAGE_ALIGN(poolsz); + poolsz += PAGE_ALIGN(sk->reqbufsz); + poolmem = xnheap_vmalloc(poolsz); + if (poolmem == NULL) { + ret = -ENOMEM; + goto fail; + } + + ret = xnheap_init(&sk->privpool, poolmem, poolsz); + if (ret) { + xnheap_vfree(poolmem); + goto fail; + } + + sk->bufpool = &sk->privpool; + } else + sk->bufpool = &cobalt_heap; + + if (sk->reqbufsz > 0) { + sk->buffer = xnheap_alloc(sk->bufpool, sk->reqbufsz); + if (sk->buffer == NULL) { + ret = -ENOMEM; + goto fail_freeheap; + } + sk->curbufsz = sk->reqbufsz; + } + + sk->fd = rtdm_private_to_fd(priv); + + ops.output = &__xddp_output_handler; + ops.input = &__xddp_input_handler; + ops.alloc_ibuf = &__xddp_alloc_handler; + ops.free_ibuf = &__xddp_free_handler; + ops.free_obuf = &__xddp_free_handler; + ops.release = &__xddp_release_handler; + + ret = xnpipe_connect(sa->sipc_port, &ops, sk); + if (ret < 0) { + if (ret == -EBUSY) + ret = -EADDRINUSE; + fail_freeheap: + if (poolsz > 0) { + xnheap_destroy(&sk->privpool); + xnheap_vfree(poolmem); + } + fail: + clear_bit(_XDDP_BINDING, &sk->status); + return ret; + } + + sk->minor = ret; + sa->sipc_port = ret; + sk->name = *sa; + /* Set default destination if unset at binding time. */ + if (sk->peer.sipc_port < 0) + sk->peer = *sa; + + if (poolsz > 0) + xnheap_set_name(sk->bufpool, "xddp-pool@%d", sa->sipc_port); + + if (*sk->label) { + ret = xnregistry_enter(sk->label, sk, &sk->handle, + &__xddp_pnode.node); + if (ret) { + /* The release handler will cleanup the pool for us. */ + xnpipe_disconnect(sk->minor); + return ret; + } + } + + cobalt_atomic_enter(s); + portmap[sk->minor] = rtdm_private_to_fd(priv); + __clear_bit(_XDDP_BINDING, &sk->status); + __set_bit(_XDDP_BOUND, &sk->status); + if (xnselect_signal(&priv->send_block, POLLOUT)) + xnsched_run(); + cobalt_atomic_leave(s); + + return 0; +} + +static int __xddp_connect_socket(struct xddp_socket *sk, + struct sockaddr_ipc *sa) +{ + struct sockaddr_ipc _sa; + struct xddp_socket *rsk; + int ret, resched = 0; + rtdm_lockctx_t s; + xnhandle_t h; + + if (sa == NULL) { + _sa = nullsa; + sa = &_sa; + goto set_assoc; + } + + if (sa->sipc_family != AF_RTIPC) + return -EINVAL; + + if (sa->sipc_port < -1 || + sa->sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV) + return -EINVAL; + /* + * - If a valid sipc_port is passed in the [0..NRDEV-1] range, + * it is used verbatim and the connection succeeds + * immediately, regardless of whether the destination is + * bound at the time of the call. + * + * - If sipc_port is -1 and a label was set via XDDP_LABEL, + * connect() blocks for the requested amount of time (see + * SO_RCVTIMEO) until a socket is bound to the same label. + * + * - If sipc_port is -1 and no label is given, the default + * destination address is cleared, meaning that any subsequent + * write() to the socket will return -EDESTADDRREQ, until a + * valid destination address is set via connect() or bind(). + * + * - In all other cases, -EINVAL is returned. + */ + if (sa->sipc_port < 0 && *sk->label) { + ret = xnregistry_bind(sk->label, + sk->timeout, XN_RELATIVE, &h); + if (ret) + return ret; + + cobalt_atomic_enter(s); + rsk = xnregistry_lookup(h, NULL); + if (rsk == NULL || rsk->magic != XDDP_SOCKET_MAGIC) + ret = -EINVAL; + else { + /* Fetch labeled port number. */ + sa->sipc_port = rsk->minor; + resched = xnselect_signal(&sk->priv->send_block, POLLOUT); + } + cobalt_atomic_leave(s); + if (ret) + return ret; + } else if (sa->sipc_port < 0) + sa = &nullsa; +set_assoc: + cobalt_atomic_enter(s); + if (!test_bit(_XDDP_BOUND, &sk->status)) + /* Set default name. */ + sk->name = *sa; + /* Set default destination. */ + sk->peer = *sa; + if (sa->sipc_port < 0) + __clear_bit(_XDDP_CONNECTED, &sk->status); + else + __set_bit(_XDDP_CONNECTED, &sk->status); + if (resched) + xnsched_run(); + cobalt_atomic_leave(s); + + return 0; +} + +static int __xddp_setsockopt(struct xddp_socket *sk, + struct rtdm_fd *fd, + void *arg) +{ + int (*monitor)(struct rtdm_fd *fd, int event, long arg); + struct _rtdm_setsockopt_args sopt; + struct rtipc_port_label plabel; + struct __kernel_old_timeval tv; + rtdm_lockctx_t s; + size_t len; + int ret; + + ret = rtipc_get_sockoptin(fd, &sopt, arg); + if (ret) + return ret; + + if (sopt.level == SOL_SOCKET) { + switch (sopt.optname) { + + case SO_RCVTIMEO_OLD: + ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen); + if (ret) + return ret; + sk->timeout = rtipc_timeval_to_ns(&tv); + break; + + default: + ret = -EINVAL; + } + + return ret; + } + + if (sopt.level != SOL_XDDP) + return -ENOPROTOOPT; + + switch (sopt.optname) { + + case XDDP_BUFSZ: + ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen); + if (ret) + return ret; + if (len > 0) { + len += sizeof(struct xddp_message); + if (sk->bufpool && + len > xnheap_get_size(sk->bufpool)) { + return -EINVAL; + } + } + rtdm_lock_get_irqsave(&sk->lock, s); + sk->reqbufsz = len; + if (len != sk->curbufsz && + !test_bit(_XDDP_SYNCWAIT, &sk->status) && + test_bit(_XDDP_BOUND, &sk->status)) + ret = __xddp_resize_streambuf(sk); + rtdm_lock_put_irqrestore(&sk->lock, s); + break; + + case XDDP_POOLSZ: + ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen); + if (ret) + return ret; + if (len == 0) + return -EINVAL; + cobalt_atomic_enter(s); + if (test_bit(_XDDP_BOUND, &sk->status) || + test_bit(_XDDP_BINDING, &sk->status)) + ret = -EALREADY; + else + sk->poolsz = len; + cobalt_atomic_leave(s); + break; + + case XDDP_MONITOR: + /* Monitoring is available from kernel-space only. */ + if (rtdm_fd_is_user(fd)) + return -EPERM; + if (sopt.optlen != sizeof(monitor)) + return -EINVAL; + if (rtipc_get_arg(NULL, &monitor, sopt.optval, sizeof(monitor))) + return -EFAULT; + sk->monitor = monitor; + break; + + case XDDP_LABEL: + if (sopt.optlen < sizeof(plabel)) + return -EINVAL; + if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel))) + return -EFAULT; + cobalt_atomic_enter(s); + if (test_bit(_XDDP_BOUND, &sk->status) || + test_bit(_XDDP_BINDING, &sk->status)) + ret = -EALREADY; + else { + strcpy(sk->label, plabel.label); + sk->label[XNOBJECT_NAME_LEN-1] = 0; + } + cobalt_atomic_leave(s); + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int __xddp_getsockopt(struct xddp_socket *sk, + struct rtdm_fd *fd, + void *arg) +{ + struct _rtdm_getsockopt_args sopt; + struct rtipc_port_label plabel; + struct __kernel_old_timeval tv; + rtdm_lockctx_t s; + socklen_t len; + int ret; + + ret = rtipc_get_sockoptout(fd, &sopt, arg); + if (ret) + return ret; + + if (rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len))) + return -EFAULT; + + if (sopt.level == SOL_SOCKET) { + switch (sopt.optname) { + + case SO_RCVTIMEO_OLD: + rtipc_ns_to_timeval(&tv, sk->timeout); + ret = rtipc_put_timeval(fd, sopt.optval, &tv, len); + if (ret) + return ret; + break; + + default: + ret = -EINVAL; + } + + return ret; + } + + if (sopt.level != SOL_XDDP) + return -ENOPROTOOPT; + + switch (sopt.optname) { + + case XDDP_LABEL: + if (len < sizeof(plabel)) + return -EINVAL; + cobalt_atomic_enter(s); + strcpy(plabel.label, sk->label); + cobalt_atomic_leave(s); + if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel))) + return -EFAULT; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int __xddp_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct sockaddr_ipc saddr, *saddrp = &saddr; + struct xddp_socket *sk = priv->state; + int ret = 0; + + switch (request) { + + COMPAT_CASE(_RTIOC_CONNECT): + ret = rtipc_get_sockaddr(fd, &saddrp, arg); + if (ret == 0) + ret = __xddp_connect_socket(sk, saddrp); + break; + + COMPAT_CASE(_RTIOC_BIND): + ret = rtipc_get_sockaddr(fd, &saddrp, arg); + if (ret) + return ret; + if (saddrp == NULL) + return -EFAULT; + ret = __xddp_bind_socket(priv, saddrp); + break; + + COMPAT_CASE(_RTIOC_GETSOCKNAME): + ret = rtipc_put_sockaddr(fd, arg, &sk->name); + break; + + COMPAT_CASE(_RTIOC_GETPEERNAME): + ret = rtipc_put_sockaddr(fd, arg, &sk->peer); + break; + + COMPAT_CASE(_RTIOC_SETSOCKOPT): + ret = __xddp_setsockopt(sk, fd, arg); + break; + + COMPAT_CASE(_RTIOC_GETSOCKOPT): + ret = __xddp_getsockopt(sk, fd, arg); + break; + + case _RTIOC_LISTEN: + COMPAT_CASE(_RTIOC_ACCEPT): + ret = -EOPNOTSUPP; + break; + + case _RTIOC_SHUTDOWN: + ret = -ENOTCONN; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static int xddp_ioctl(struct rtdm_fd *fd, + unsigned int request, void *arg) +{ + int ret; + + switch (request) { + COMPAT_CASE(_RTIOC_BIND): + if (rtdm_in_rt_context()) + return -ENOSYS; /* Try downgrading to NRT */ + fallthrough; + default: + ret = __xddp_ioctl(fd, request, arg); + } + + return ret; +} + +static unsigned int xddp_pollstate(struct rtdm_fd *fd) /* atomic */ +{ + struct rtipc_private *priv = rtdm_fd_to_private(fd); + struct xddp_socket *sk = priv->state, *rsk; + unsigned int mask = 0, pollstate; + struct rtdm_fd *rfd; + + pollstate = __xnpipe_pollstate(sk->minor); + if (test_bit(_XDDP_BOUND, &sk->status)) + mask |= (pollstate & POLLIN); + + /* + * If the socket is connected, POLLOUT means that the peer + * exists, is bound and can receive data. Otherwise POLLOUT is + * always set, assuming the client is likely to use explicit + * addressing in send operations. + */ + if (test_bit(_XDDP_CONNECTED, &sk->status)) { + rfd = portmap[sk->peer.sipc_port]; + if (rfd) { + rsk = rtipc_fd_to_state(rfd); + mask |= (pollstate & POLLOUT); + } + } else + mask |= POLLOUT; + + return mask; +} + +struct rtipc_protocol xddp_proto_driver = { + .proto_name = "xddp", + .proto_statesz = sizeof(struct xddp_socket), + .proto_ops = { + .socket = xddp_socket, + .close = xddp_close, + .recvmsg = xddp_recvmsg, + .sendmsg = xddp_sendmsg, + .read = xddp_read, + .write = xddp_write, + .ioctl = xddp_ioctl, + .pollstate = xddp_pollstate, + } +}; diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig new file mode 100644 index 0000000..2e80324 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig @@ -0,0 +1,25 @@ +menu "RTnet" + +config XENO_DRIVERS_NET + depends on m + select NET + tristate "RTnet, TCP/IP socket interface" + +if XENO_DRIVERS_NET + +config XENO_DRIVERS_RTNET_CHECKED + bool "Internal Bug Checks" + default n + help + Switch on if you face crashes when RTnet is running or if you suspect + any other RTnet-related issues. This feature will add a few sanity + checks at critical points that will produce warnings on the kernel + console in case certain internal bugs are detected. + +source "drivers/xenomai/net/stack/Kconfig" +source "drivers/xenomai/net/drivers/Kconfig" +source "drivers/xenomai/net/addons/Kconfig" + +endif + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile new file mode 100644 index 0000000..94525b4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_XENO_DRIVERS_NET) += stack/ drivers/ addons/ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig new file mode 100644 index 0000000..e92f6d8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig @@ -0,0 +1,44 @@ +menu "Add-Ons" + depends on XENO_DRIVERS_NET + +config XENO_DRIVERS_NET_ADDON_RTCAP + depends on XENO_DRIVERS_NET && m + select ETHERNET + tristate "Real-Time Capturing Support" + default n + help + This feature allows to capture real-time packets traversing the RTnet + stack. It can both be used to sniff passively on a network (in this + case you may want to enable the promisc mode of your real-time NIC via + rtifconfig) and to log the traffic the node receives and transmits + during normal operation. RTcap consists of additional hooks in the + RTnet stack and a separate module as interface to standard network + analysis tools like Ethereal. + + For further information see Documentation/README.rtcap. + +config XENO_DRIVERS_NET_ADDON_PROXY + depends on XENO_DRIVERS_NET_RTIPV4 && m + select ETHERNET + tristate "IP protocol proxy for Linux" + default n + help + Enables a forward-to-Linux module for all IP protocols that are not + handled by the IPv4 implemenation of RTnet (TCP, UDP, etc.). Only use + when you know what you are doing - it can easily break your real-time + requirements! + + See Documentation/README.rtnetproxy for further information. + +config XENO_DRIVERS_NET_ADDON_PROXY_ARP + depends on XENO_DRIVERS_NET_ADDON_PROXY + bool "Enable ARP handling via protocol proxy" + default n + help + Enables ARP support for the IP protocol proxy. Incoming ARP replies + are then delivered to both, the RTnet and the Linux network stack, + but only answered by Linux. The IP protocol proxy gets attached to + the RTnet device specified by the module parameter "rtdev_attach", + rteth0 by default. + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile new file mode 100644 index 0000000..1f3939b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile @@ -0,0 +1,9 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP) += rtcap.o + +rtcap-y := cap.o + +obj-$(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY) += rtnetproxy.o + +rtnetproxy-y := proxy.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c new file mode 100644 index 0000000..3784b65 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c @@ -0,0 +1,502 @@ +/*** + * + * rtcap/rtcap.c + * + * Real-Time Capturing Interface + * + * Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/sched.h> + +#include <rtdev.h> +#include <rtnet_chrdev.h> +#include <rtnet_port.h> /* for netdev_priv() */ + +MODULE_LICENSE("GPL"); + +static unsigned int rtcap_rtskbs = 128; +module_param(rtcap_rtskbs, uint, 0444); +MODULE_PARM_DESC(rtcap_rtskbs, "Number of real-time socket buffers per " + "real-time device"); + +#define TAP_DEV 1 +#define RTMAC_TAP_DEV 2 +#define XMIT_HOOK 4 + +static rtdm_nrtsig_t cap_signal; +static struct rtskb_queue cap_queue; +static struct rtskb_pool cap_pool; + +static struct tap_device_t { + struct net_device *tap_dev; + struct net_device *rtmac_tap_dev; + struct net_device_stats tap_dev_stats; + int present; + int (*orig_xmit)(struct rtskb *skb, struct rtnet_device *dev); +} tap_device[MAX_RT_DEVICES]; + +void rtcap_rx_hook(struct rtskb *rtskb) +{ + bool trigger = false; + + if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) { + tap_device[rtskb->rtdev->ifindex].tap_dev_stats.rx_dropped++; + return; + } + + if (cap_queue.first == NULL) { + cap_queue.first = rtskb; + trigger = true; + } else + cap_queue.last->cap_next = rtskb; + cap_queue.last = rtskb; + rtskb->cap_next = NULL; + + rtskb->cap_flags |= RTSKB_CAP_SHARED; + + if (trigger) + rtdm_nrtsig_pend(&cap_signal); +} + +int rtcap_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev) +{ + struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex]; + rtdm_lockctx_t context; + bool trigger = false; + + if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) { + tap_dev->tap_dev_stats.rx_dropped++; + return tap_dev->orig_xmit(rtskb, rtdev); + } + + rtskb->cap_next = NULL; + rtskb->cap_start = rtskb->data; + rtskb->cap_len = rtskb->len; + rtskb->cap_flags |= RTSKB_CAP_SHARED; + + rtskb->time_stamp = rtdm_clock_read(); + + rtdm_lock_get_irqsave(&rtcap_lock, context); + + if (cap_queue.first == NULL) { + cap_queue.first = rtskb; + trigger = true; + } else + cap_queue.last->cap_next = rtskb; + cap_queue.last = rtskb; + + rtdm_lock_put_irqrestore(&rtcap_lock, context); + + if (trigger) + rtdm_nrtsig_pend(&cap_signal); + + return tap_dev->orig_xmit(rtskb, rtdev); +} + +int rtcap_loopback_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev) +{ + struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex]; + + rtskb->time_stamp = rtdm_clock_read(); + + return tap_dev->orig_xmit(rtskb, rtdev); +} + +void rtcap_kfree_rtskb(struct rtskb *rtskb) +{ + rtdm_lockctx_t context; + struct rtskb *comp_skb; + + rtdm_lock_get_irqsave(&rtcap_lock, context); + + if (rtskb->cap_flags & RTSKB_CAP_SHARED) { + rtskb->cap_flags &= ~RTSKB_CAP_SHARED; + + comp_skb = rtskb->cap_comp_skb; + + rtdm_lock_put_irqrestore(&rtcap_lock, context); + + rtskb_pool_queue_tail(comp_skb->pool, comp_skb); + + return; + } + + rtdm_lock_put_irqrestore(&rtcap_lock, context); + + rtskb->chain_end = rtskb; + rtskb_pool_queue_tail(rtskb->pool, rtskb); +} + +static void convert_timestamp(nanosecs_abs_t timestamp, struct sk_buff *skb) +{ +#ifdef CONFIG_KTIME_SCALAR + skb->tstamp.tv64 = timestamp; +#else /* !CONFIG_KTIME_SCALAR */ + unsigned long rem; + + rem = do_div(timestamp, NSEC_PER_SEC); + skb->tstamp = ktime_set((long)timestamp, rem); +#endif /* !CONFIG_KTIME_SCALAR */ +} + +static void rtcap_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg) +{ + struct rtskb *rtskb; + struct sk_buff *skb; + struct sk_buff *rtmac_skb; + struct net_device_stats *stats; + int ifindex; + int active; + rtdm_lockctx_t context; + + while (1) { + rtdm_lock_get_irqsave(&rtcap_lock, context); + + if ((rtskb = cap_queue.first) == NULL) { + rtdm_lock_put_irqrestore(&rtcap_lock, context); + break; + } + + cap_queue.first = rtskb->cap_next; + + rtdm_lock_put_irqrestore(&rtcap_lock, context); + + ifindex = rtskb->rtdev->ifindex; + active = tap_device[ifindex].present; + + if (active) { + if ((tap_device[ifindex].tap_dev->flags & IFF_UP) == 0) + active &= ~TAP_DEV; + if (active & RTMAC_TAP_DEV && + !(tap_device[ifindex].rtmac_tap_dev->flags & + IFF_UP)) + active &= ~RTMAC_TAP_DEV; + } + + if (active == 0) { + tap_device[ifindex].tap_dev_stats.rx_dropped++; + rtcap_kfree_rtskb(rtskb); + continue; + } + + skb = dev_alloc_skb(rtskb->cap_len); + if (skb) { + memcpy(skb_put(skb, rtskb->cap_len), rtskb->cap_start, + rtskb->cap_len); + + if (active & TAP_DEV) { + skb->dev = tap_device[ifindex].tap_dev; + skb->protocol = eth_type_trans(skb, skb->dev); + convert_timestamp(rtskb->time_stamp, skb); + + rtmac_skb = NULL; + if ((rtskb->cap_flags & + RTSKB_CAP_RTMAC_STAMP) && + (active & RTMAC_TAP_DEV)) { + rtmac_skb = skb_clone(skb, GFP_ATOMIC); + if (rtmac_skb != NULL) + convert_timestamp( + rtskb->cap_rtmac_stamp, + rtmac_skb); + } + + rtcap_kfree_rtskb(rtskb); + + stats = &tap_device[ifindex].tap_dev_stats; + stats->rx_packets++; + stats->rx_bytes += skb->len; + + if (rtmac_skb != NULL) { + rtmac_skb->dev = tap_device[ifindex] + .rtmac_tap_dev; + netif_rx(rtmac_skb); + } + netif_rx(skb); + } else if (rtskb->cap_flags & RTSKB_CAP_RTMAC_STAMP) { + skb->dev = tap_device[ifindex].rtmac_tap_dev; + skb->protocol = eth_type_trans(skb, skb->dev); + convert_timestamp(rtskb->cap_rtmac_stamp, skb); + + rtcap_kfree_rtskb(rtskb); + + stats = &tap_device[ifindex].tap_dev_stats; + stats->rx_packets++; + stats->rx_bytes += skb->len; + + netif_rx(skb); + } else { + dev_kfree_skb(skb); + rtcap_kfree_rtskb(rtskb); + } + } else { + printk("RTcap: unable to allocate linux skb\n"); + rtcap_kfree_rtskb(rtskb); + } + } +} + +static int tap_dev_open(struct net_device *dev) +{ + int err; + + err = try_module_get(THIS_MODULE); + if (err == 0) + return -EIDRM; + + dev_addr_set(dev, + (*(struct rtnet_device **)netdev_priv(dev))->dev_addr); + + return 0; +} + +static int tap_dev_stop(struct net_device *dev) +{ + module_put(THIS_MODULE); + return 0; +} + +static int tap_dev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + netif_stop_queue(dev); + return 1; +} + +static struct net_device_stats *tap_dev_get_stats(struct net_device *dev) +{ + struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev); + + return &tap_device[rtdev->ifindex].tap_dev_stats; +} + +static int tap_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + return -EINVAL; +} + +static const struct net_device_ops tap_netdev_ops = { + .ndo_open = tap_dev_open, + .ndo_stop = tap_dev_stop, + .ndo_start_xmit = tap_dev_xmit, + .ndo_get_stats = tap_dev_get_stats, + .ndo_change_mtu = tap_dev_change_mtu, +}; + +static void tap_dev_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->netdev_ops = &tap_netdev_ops; + dev->mtu = 1500; + dev->flags &= ~IFF_MULTICAST; +} + +void cleanup_tap_devices(void) +{ + int i; + struct rtnet_device *rtdev; + + for (i = 0; i < MAX_RT_DEVICES; i++) + if ((tap_device[i].present & TAP_DEV) != 0) { + if ((tap_device[i].present & XMIT_HOOK) != 0) { + rtdev = *(struct rtnet_device **)netdev_priv( + tap_device[i].tap_dev); + + mutex_lock(&rtdev->nrt_lock); + rtdev->hard_start_xmit = + tap_device[i].orig_xmit; + if (rtdev->features & NETIF_F_LLTX) + rtdev->start_xmit = + tap_device[i].orig_xmit; + mutex_unlock(&rtdev->nrt_lock); + + rtdev_dereference(rtdev); + } + + if ((tap_device[i].present & RTMAC_TAP_DEV) != 0) { + unregister_netdev(tap_device[i].rtmac_tap_dev); + free_netdev(tap_device[i].rtmac_tap_dev); + } + + unregister_netdev(tap_device[i].tap_dev); + free_netdev(tap_device[i].tap_dev); + } +} + +int __init rtcap_init(void) +{ + struct rtnet_device *rtdev; + struct net_device *dev; + int ret; + int devices = 0; + int i; + + printk("RTcap: real-time capturing interface\n"); + + rtskb_queue_init(&cap_queue); + + rtdm_nrtsig_init(&cap_signal, rtcap_signal_handler, NULL); + + for (i = 0; i < MAX_RT_DEVICES; i++) { + tap_device[i].present = 0; + + rtdev = rtdev_get_by_index(i); + if (rtdev != NULL) { + mutex_lock(&rtdev->nrt_lock); + + if (test_bit(PRIV_FLAG_UP, &rtdev->priv_flags)) { + mutex_unlock(&rtdev->nrt_lock); + printk("RTcap: %s busy, skipping device!\n", + rtdev->name); + rtdev_dereference(rtdev); + continue; + } + + if (rtdev->mac_priv != NULL) { + mutex_unlock(&rtdev->nrt_lock); + + printk("RTcap: RTmac discipline already active on device %s. " + "Load RTcap before RTmac!\n", + rtdev->name); + + rtdev_dereference(rtdev); + continue; + } + + memset(&tap_device[i].tap_dev_stats, 0, + sizeof(struct net_device_stats)); + + dev = alloc_netdev(sizeof(struct rtnet_device *), + rtdev->name, NET_NAME_UNKNOWN, + tap_dev_setup); + if (!dev) { + ret = -ENOMEM; + goto error3; + } + + tap_device[i].tap_dev = dev; + *(struct rtnet_device **)netdev_priv(dev) = rtdev; + + ret = register_netdev(dev); + if (ret < 0) + goto error3; + + tap_device[i].present = TAP_DEV; + + tap_device[i].orig_xmit = rtdev->hard_start_xmit; + + if ((rtdev->flags & IFF_LOOPBACK) == 0) { + dev = alloc_netdev( + sizeof(struct rtnet_device *), + rtdev->name, NET_NAME_UNKNOWN, + tap_dev_setup); + if (!dev) { + ret = -ENOMEM; + goto error3; + } + + tap_device[i].rtmac_tap_dev = dev; + *(struct rtnet_device **)netdev_priv(dev) = + rtdev; + strncat(dev->name, "-mac", + IFNAMSIZ - strlen(dev->name)); + + ret = register_netdev(dev); + if (ret < 0) + goto error3; + + tap_device[i].present |= RTMAC_TAP_DEV; + + rtdev->hard_start_xmit = rtcap_xmit_hook; + } else + rtdev->hard_start_xmit = + rtcap_loopback_xmit_hook; + + /* If the device requires no xmit_lock, start_xmit points equals + * hard_start_xmit => we have to update this as well + */ + if (rtdev->features & NETIF_F_LLTX) + rtdev->start_xmit = rtdev->hard_start_xmit; + + tap_device[i].present |= XMIT_HOOK; + + mutex_unlock(&rtdev->nrt_lock); + + devices++; + } + } + + if (devices == 0) { + printk("RTcap: no real-time devices found!\n"); + ret = -ENODEV; + goto error2; + } + + if (rtskb_module_pool_init(&cap_pool, rtcap_rtskbs * devices) < + rtcap_rtskbs * devices) { + rtskb_pool_release(&cap_pool); + ret = -ENOMEM; + goto error2; + } + + /* register capturing handlers with RTnet core + * (adding the handler need no locking) */ + rtcap_handler = rtcap_rx_hook; + + return 0; + +error3: + mutex_unlock(&rtdev->nrt_lock); + rtdev_dereference(rtdev); + printk("RTcap: unable to register %s!\n", dev->name); + +error2: + cleanup_tap_devices(); + rtdm_nrtsig_destroy(&cap_signal); + + return ret; +} + +void rtcap_cleanup(void) +{ + rtdm_lockctx_t context; + + rtdm_nrtsig_destroy(&cap_signal); + + /* unregister capturing handlers + * (take lock to avoid any unloading code before handler was left) */ + rtdm_lock_get_irqsave(&rtcap_lock, context); + rtcap_handler = NULL; + rtdm_lock_put_irqrestore(&rtcap_lock, context); + + /* empty queue (should be already empty) */ + rtcap_signal_handler(0, NULL /* we ignore them anyway */); + + cleanup_tap_devices(); + + rtskb_pool_release(&cap_pool); + + printk("RTcap: unloaded\n"); +} + +module_init(rtcap_init); +module_exit(rtcap_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c new file mode 100644 index 0000000..f61794a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c @@ -0,0 +1,442 @@ +/* rtnetproxy.c: a Linux network driver that uses the RTnet driver to + * transport IP data from/to Linux kernel mode. + * This allows the usage of TCP/IP from linux space using via the RTNET + * network adapter. + * + * + * Usage: + * + * insmod rtnetproxy.o (only after having rtnet up and running) + * + * ifconfig rtproxy up IP_ADDRESS netmask NETMASK + * + * Use it like any other network device from linux. + * + * Restrictions: + * Only IPV4 based protocols are supported, UDP and ICMP can be send out + * but not received - as these are handled directly by rtnet! + * + * + * + * Based on the linux net driver dummy.c by Nick Holloway + * + * + * Changelog: + * + * 08-Nov-2002 Mathias Koehrer - Clear separation between rtai context and + * standard linux driver context. + * Data exchange via ringbuffers. + * A RTAI thread is used for rtnet transmission. + * + * 05-Nov-2002 Mathias Koehrer - Initial version! + * Development based on rtnet 0.2.6, + * rtai-24.1.10, kernel 2.4.19 + * + * + * Mathias Koehrer - mathias_koehrer@yahoo.de +*/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/init.h> +#include <linux/inet.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <net/sock.h> +#include <net/ip.h> + +#include <linux/if_ether.h> /* For the statistics structure. */ +#include <linux/if_arp.h> /* For ARPHRD_ETHER */ + +#include <rtdev.h> +#include <rtskb.h> +#include <rtdm/driver.h> +#include <ipv4/ip_input.h> +#include <ipv4/route.h> +#include <rtnet_port.h> + +static struct net_device *dev_rtnetproxy; + +/* ************************************************************************** + * SKB pool management (JK): + * ************************************************************************ */ +#define DEFAULT_PROXY_RTSKBS 32 + +static unsigned int proxy_rtskbs = DEFAULT_PROXY_RTSKBS; +module_param(proxy_rtskbs, uint, 0444); +MODULE_PARM_DESC(proxy_rtskbs, + "Number of realtime socket buffers in proxy pool"); + +static struct rtskb_pool rtskb_pool; + +static struct rtskb_queue tx_queue; +static struct rtskb_queue rx_queue; + +/* handle for non-real-time signal */ +static rtdm_nrtsig_t rtnetproxy_rx_signal; + +/* Thread for transmission */ +static rtdm_task_t rtnetproxy_tx_task; + +static rtdm_event_t rtnetproxy_tx_event; + +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP +static char *rtdev_attach = "rteth0"; +module_param(rtdev_attach, charp, 0444); +MODULE_PARM_DESC(rtdev_attach, "Attach to the specified RTnet device"); + +struct rtnet_device *rtnetproxy_rtdev; +#endif + +/* ************************************************************************ + * ************************************************************************ + * T R A N S M I T + * ************************************************************************ + * ************************************************************************ */ + +static void rtnetproxy_tx_loop(void *arg) +{ + struct rtnet_device *rtdev; + struct rtskb *rtskb; + + while (!rtdm_task_should_stop()) { + if (rtdm_event_wait(&rtnetproxy_tx_event) < 0) + break; + + while ((rtskb = rtskb_dequeue(&tx_queue)) != NULL) { + rtdev = rtskb->rtdev; + rtdev_xmit_proxy(rtskb); + rtdev_dereference(rtdev); + } + } +} + +/* ************************************************************************ + * hard_xmit + * + * This function runs in linux kernel context and is executed whenever + * there is a frame to be sent out. + * ************************************************************************ */ +static int rtnetproxy_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + struct rtskb *rtskb; + int len = skb->len; +#ifndef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + struct dest_route rt; + struct iphdr *iph; + u32 saddr, daddr; +#endif + + switch (ntohs(eth->h_proto)) { + case ETH_P_IP: + if (len < sizeof(struct ethhdr) + sizeof(struct iphdr)) + goto drop1; +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + case ETH_P_ARP: +#endif + break; + default: + drop1: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + rtskb = alloc_rtskb(len, &rtskb_pool); + if (!rtskb) + return NETDEV_TX_BUSY; + + memcpy(rtskb_put(rtskb, len), skb->data, len); + +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + dev_kfree_skb(skb); + + rtskb->rtdev = rtnetproxy_rtdev; + if (rtdev_reference(rtnetproxy_rtdev) == 0) { + dev->stats.tx_dropped++; + kfree_rtskb(rtskb); + return NETDEV_TX_BUSY; + } + +#else /* !CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */ + iph = (struct iphdr *)(skb->data + sizeof(struct ethhdr)); + saddr = iph->saddr; + daddr = iph->daddr; + + dev_kfree_skb(skb); + + if (rt_ip_route_output(&rt, daddr, INADDR_ANY) < 0) { + drop2: + dev->stats.tx_dropped++; + kfree_rtskb(rtskb); + return NETDEV_TX_OK; + } + if (rt.rtdev->local_ip != saddr) { + rtdev_dereference(rt.rtdev); + goto drop2; + } + + eth = (struct ethhdr *)rtskb->data; + memcpy(eth->h_source, rt.rtdev->dev_addr, rt.rtdev->addr_len); + memcpy(eth->h_dest, rt.dev_addr, rt.rtdev->addr_len); + + rtskb->rtdev = rt.rtdev; +#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */ + + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + + rtskb_queue_tail(&tx_queue, rtskb); + rtdm_event_signal(&rtnetproxy_tx_event); + + return NETDEV_TX_OK; +} + +/* ************************************************************************ + * ************************************************************************ + * R E C E I V E + * ************************************************************************ + * ************************************************************************ */ + +/* ************************************************************************ + * This function runs in real-time context. + * + * It is called from inside rtnet whenever a packet has been received that + * has to be processed by rtnetproxy. + * ************************************************************************ */ +static void rtnetproxy_recv(struct rtskb *rtskb) +{ + /* Acquire rtskb (JK) */ + if (rtskb_acquire(rtskb, &rtskb_pool) != 0) { + dev_rtnetproxy->stats.rx_dropped++; + rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n"); + kfree_rtskb(rtskb); + return; + } + + if (rtskb_queue_tail_check(&rx_queue, rtskb)) + rtdm_nrtsig_pend(&rtnetproxy_rx_signal); +} + +/* ************************************************************************ + * This function runs in kernel mode. + * It is activated from rtnetproxy_signal_handler whenever rtnet received a + * frame to be processed by rtnetproxy. + * ************************************************************************ */ +static inline void rtnetproxy_kernel_recv(struct rtskb *rtskb) +{ + struct sk_buff *skb; + struct net_device *dev = dev_rtnetproxy; + + int header_len = rtskb->rtdev->hard_header_len; + int len = rtskb->len + header_len; + + /* Copy the realtime skb (rtskb) to the standard skb: */ + skb = dev_alloc_skb(len + 2); + skb_reserve(skb, 2); + + memcpy(skb_put(skb, len), rtskb->data - header_len, len); + + /* Set some relevant entries in the skb: */ + skb->protocol = eth_type_trans(skb, dev); + skb->dev = dev; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_HOST; /* Extremely important! Why?!? */ + + /* the rtskb stamp is useless (different clock), get new one */ + __net_timestamp(skb); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) + dev->last_rx = jiffies; +#endif + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + netif_rx(skb); /* pass it to the received stuff */ +} + +/* ************************************************************************ + * This function runs in kernel mode. + * It is activated from rtnetproxy_recv whenever rtnet received a frame to + * be processed by rtnetproxy. + * ************************************************************************ */ +static void rtnetproxy_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg) +{ + struct rtskb *rtskb; + + while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) { + rtnetproxy_kernel_recv(rtskb); + kfree_rtskb(rtskb); + } +} + +/* ************************************************************************ + * ************************************************************************ + * G E N E R A L + * ************************************************************************ + * ************************************************************************ */ + +static void fake_multicast_support(struct net_device *dev) +{ +} + +#ifdef CONFIG_NET_FASTROUTE +static int rtnetproxy_accept_fastpath(struct net_device *dev, + struct dst_entry *dst) +{ + return -1; +} +#endif + +static int rtnetproxy_open(struct net_device *dev) +{ + int err = try_module_get(THIS_MODULE); + if (err == 0) + return -EIDRM; + + return 0; +} + +static int rtnetproxy_stop(struct net_device *dev) +{ + module_put(THIS_MODULE); + return 0; +} + +static const struct net_device_ops rtnetproxy_netdev_ops = { + .ndo_open = rtnetproxy_open, + .ndo_stop = rtnetproxy_stop, + .ndo_start_xmit = rtnetproxy_xmit, + .ndo_set_rx_mode = fake_multicast_support, +}; + +/* ************************************************************************ + * device init + * ************************************************************************ */ +static void __init rtnetproxy_init(struct net_device *dev) +{ + /* Fill in device structure with ethernet-generic values. */ + ether_setup(dev); + + dev->tx_queue_len = 0; +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + dev_addr_set(dev, rtnetproxy_rtdev->dev_addr); +#else + dev->flags |= IFF_NOARP; +#endif + dev->flags &= ~IFF_MULTICAST; + + dev->netdev_ops = &rtnetproxy_netdev_ops; +} + +/* ************************************************************************ + * ************************************************************************ + * I N I T + * ************************************************************************ + * ************************************************************************ */ +static int __init rtnetproxy_init_module(void) +{ + int err; + +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + if ((rtnetproxy_rtdev = rtdev_get_by_name(rtdev_attach)) == NULL) { + printk("Couldn't attach to %s\n", rtdev_attach); + return -EINVAL; + } + printk("RTproxy attached to %s\n", rtdev_attach); +#endif + + /* Initialize the proxy's rtskb pool (JK) */ + if (rtskb_module_pool_init(&rtskb_pool, proxy_rtskbs) < proxy_rtskbs) { + err = -ENOMEM; + goto err1; + } + + dev_rtnetproxy = + alloc_netdev(0, "rtproxy", NET_NAME_UNKNOWN, rtnetproxy_init); + if (!dev_rtnetproxy) { + err = -ENOMEM; + goto err1; + } + + rtdm_nrtsig_init(&rtnetproxy_rx_signal, rtnetproxy_signal_handler, + NULL); + + rtskb_queue_init(&tx_queue); + rtskb_queue_init(&rx_queue); + + err = register_netdev(dev_rtnetproxy); + if (err < 0) + goto err3; + + /* Init the task for transmission */ + rtdm_event_init(&rtnetproxy_tx_event, 0); + err = rtdm_task_init(&rtnetproxy_tx_task, "rtnetproxy", + rtnetproxy_tx_loop, 0, RTDM_TASK_LOWEST_PRIORITY, + 0); + if (err) + goto err4; + + /* Register with RTnet */ + rt_ip_fallback_handler = rtnetproxy_recv; + + printk("rtnetproxy installed as \"%s\"\n", dev_rtnetproxy->name); + + return 0; + +err4: + unregister_netdev(dev_rtnetproxy); + +err3: + rtdm_nrtsig_destroy(&rtnetproxy_rx_signal); + + free_netdev(dev_rtnetproxy); + +err1: + rtskb_pool_release(&rtskb_pool); +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + rtdev_dereference(rtnetproxy_rtdev); +#endif + return err; +} + +static void __exit rtnetproxy_cleanup_module(void) +{ + struct rtskb *rtskb; + + /* Unregister the fallback at rtnet */ + rt_ip_fallback_handler = NULL; + + /* Unregister the net device: */ + unregister_netdev(dev_rtnetproxy); + free_netdev(dev_rtnetproxy); + + rtdm_event_destroy(&rtnetproxy_tx_event); + rtdm_task_destroy(&rtnetproxy_tx_task); + + /* free the non-real-time signal */ + rtdm_nrtsig_destroy(&rtnetproxy_rx_signal); + + while ((rtskb = rtskb_dequeue(&tx_queue)) != NULL) { + rtdev_dereference(rtskb->rtdev); + kfree_rtskb(rtskb); + } + + while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) { + kfree_rtskb(rtskb); + } + + rtskb_pool_release(&rtskb_pool); + +#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP + rtdev_dereference(rtnetproxy_rtdev); +#endif +} + +module_init(rtnetproxy_init_module); +module_exit(rtnetproxy_cleanup_module); +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting new file mode 100644 index 0000000..0380971 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting @@ -0,0 +1,251 @@ +This list was created when porting the pcnet32 driver to RTnet and was +extended and revised afterwards. It is absolutely unsorted. Some points may +not apply to every driver, some may have to be added for others. It is +recommended to take a look at pcnet32-rt.c or other existing drivers if some +steps remain unclear. + +IMPORTANT: Check if the critical paths of the driver (xmit function, interrupt +handler) are free of any unbounded or unacceptable long delays, e.g. caused by +waiting on hardware events. + + +1. Add to beginning of file (also add a #define for MAX_UNITS if it is missing + so far): + + #include <rtnet_port.h> + + static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; + compat_module_int_param_array(cards, MAX_UNITS); + MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); + + +2. disable any copybreak mechanism (rtskbs are all equally sized) + + +3. add the following fields to private data: + + struct rtskb_queue skb_pool; + rtdm_irq_t irq_handle; + + +4. initialize skb pool in probe or init function: + + if (rtskb_pool_init(&<priv>->skb_pool, RX_RING_SIZE*2) < RX_RING_SIZE*2) { + rtskb_pool_release(&<priv>->skb_pool); + <cleanup>... + return -ENOMEM; + } + + +5. free skb pool in cleanup function + + +6. replace unregister_netdev with rt_unregister_rtnetdev + + +7. call rt_rtdev_disconnect in cleanup function (and on error cleanups!) + + +8. cleanup device structure with rtdev_free + + +9. replace netif_stop_queue with rtnetif_stop_queue + + +10. add to the close function replacing the free_irq call: + + if ( (i=rtdm_irq_free(&<priv>->irq_handle))<0 ) + return i; + + rt_stack_disconnect(dev); + + +11. replace struct sk_buff with struct rtskb + + +12. replace skb_XXX calls with rtskb_XXX + + +13. replace eth_type_trans with rt_eth_type_trans + + +14. replace netif_rx with rtnetif_rx + + +15. replace struct net_device with struct rtnet_device + + +16. replace netif_start_queue with rtnetif_start_queue + + +17. revise the xmit routine + +17.1. add new locking scheme replacing any standard spin lock calls: + + rtdm_lockctx_t context; + ... + rtdm_lock_get_irqsave(&<priv>->lock, context); + ... + rtdm_lock_put_irqrestore(&<priv>->lock, context); + + /* ONLY IN EXCEPTIONAL CASES, e.g. if the operation can take more than a + * few ten microseconds: */ + + rtdm_irq_disable(&<priv>->irq_handle); + rtdm_lock_get(&<priv>->lock); + ... + rtdm_lock_put(&<priv>->lock); + rtdm_irq_enable(&<priv>->irq_handle); + + /* Note that the latter scheme does not work if the IRQ line is shared + * with other devices. Also, rtdm_irq_disable/enable can be costly + * themselves on certain architectures. */ + +17.2. add the following code right before the code which triggers the physical + transmission (take care if data has to be transfered manually, i.e. + without DMA): + + /* get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + +17.3. make the code above and the transmission triggering atomical by switching + off all interrupts: + + rtdm_lockctx_t context; + ... + rtdm_lock_irqsave(context); + <patch time stamp> + <trigger transmission> + rtdm_lock_irqrestore(context); + + /* or combined with the spinlock: */ + + rtdm_lock_irqsave(&<priv>->lock, context); + <prepare transmission> + <patch time stamp> + <trigger transmission> + rtdm_lock_irqrestore(&<priv>->lock, context); + + NOTE: Some hardware may require the driver to calculate the frame + checksum, thus making a patching of the frame effectively impossible. In + this case use the following strategy: switch off the interrupts only if + there is actually a time stamp to patch. Normally, frames using this + feature are rather short and will not cause long irq locks. Take a look + at 8139too-rt or via-rhine-rt to find some examples. + + +18. modify interrupt handler: + + static int XXX_interrupt(rtdm_irq_t *irq_handle) + { + struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + ... + + Also adapt the prototype of the interrupt handler accordingly if provided. + + +19. replace spin_lock/spin_unlock with rtdm_lock_get/rtdm_lock_put within the + interrupt handler + + +20. replace printk in xmit function, interrupt handler, and any function called + within this context with rtdm_printk. Where avoidable, disable output in + critical functions (i.e. when interrupts are off) completely. + + +21. replace dev_kfree_skb[_XXX] with dev_kfree_rtskb + + +22. replace alloc_etherdev with the following lines: + + dev = rt_alloc_etherdev(sizeof(struct XXX_private) /* or 0 */); + if (dev == NULL) + return -ENOMEM; + rtdev_alloc_name(dev, "rteth%d"); + rt_rtdev_connect(dev, &RTDEV_manager); + RTNET_SET_MODULE_OWNER(dev); + dev->vers = RTDEV_VERS_2_0; + + +23. replace request_irq in open function with the following lines: + + rt_stack_connect(dev, &STACK_manager); + retval = rtdm_irq_request(&<priv>->irq_handle, dev->irq, XXX_interrupt, + RTDM_IRQTYPE_SHARED, NULL /* or driver name */, dev); + if (retval) + return retval; + + +24. replace netif_queue_stopped with rtnetif_queue_stopped + + +25. replace netif_wake_queue with rtnetif_wake_queue + + +26. add to the beginning of the probe or card-init function: + + static int cards_found = -1; + + cards_found++; + if (cards[cards_found] == 0) + return -ENODEV; + + +27. call rtdm_clock_read within receive interrupt and set time_stamp field of skb accordingly + + +28. initialize new unsigned int old_packet_cnt with <priv>->stats.rx_packets at + the beginning of the interrupt handler + + +29. add to the end of the interrupt handler: + + rtdm_lock_put(&<priv>->lock); /* if locking is not done in interrupt main function */ + if (old_packet_cnt != <priv>->stats.rx_packets) + rt_mark_stack_mgr(dev); + + +30. disable any timer setup and delete calls + + +31. uncomment not required(!) MII related assignments and functions + + +32. uncomment any other unused functions + + +33. replace register_netdev with rt_register_rtnetdev + + +34. replace netif_carrier_{on|off} with rtnetif_carrier_{on|off} + + +35. replace dev_alloc_skb(size) with dev_alloc_rtskb(size, &<priv>->skb_pool) + + +36. reduce RX_RING_SIZE to 8 + + +37. replace MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT with RTNET_MOD_INC_USE_COUNT/RTNET_MOD_DEC_USE_COUNT + and check if they are used appropriately + + +38. rename type of lock field in private data from spinlock_t to rtdm_lock_t + + +39. replace spin_lock_init(&<priv>->lock) with rtdm_lock_init(&<priv>->lock) + + +40. rtskb structure does not contain a data_len field => set any occurrence to zero + + +41. return from interrupt handler only by providing RTDM_IRQ_HANDLED or RTDM_IRQ_NONE as + return values, depending if the IRQ was handled or not + +42. fill rtdev field in every received rtskb object properly + + skb->rtdev = rtdev + +XX. check the critical paths in xmit function and interrupt handler for delays + or hardware wait loops, disable or avoid them diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394 b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394 new file mode 100644 index 0000000..5ae5f49 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394 @@ -0,0 +1,65 @@ +HOWTO for using RTnet over FireWire (ETH1394) +============================================= +To use RTnet over FireWire, one needs another package, i.e. RT-FireWire, which +can be checked out via "svn checkout svn://svn.berlios.de/rtfirewire/trunk". +RT-FireWire package is developed by RT-FireWire project team, see the project +homepage for more interesting information (http://rtfirewire.berlios.de). + +It is recommended to compile and test the RT-FireWire package first. +RT-FireWire only compiles with fusion. At the time of writing, it is the CVS +version of fusion which will become release 0.9. Use --with-rtai=XXX to +specify the installation location of fusion in your system. + +To compile RTnet's Eth1394 driver with RT-FireWire, one needs to do 2 things +in configuration: +1. add --with-rtfw=XXX to specify the source location of RT-FireWire +2. add --enable-eth1394 to enable the compiling of eth1394 +Of course, don't forget --with-rtai=XXX for RTnet. + +RT-FireWire comes with some basic testing tool, one of which is similiar to +"rtping" on Ethernet. See the Readme of RT-FireWire for how to play around +with basic FireWire testing. + +Currently, Eth1394 appears exactly the same as normal Ethernet device. So from +the application point of view, no medium difference can be seen, which means +application on Ethernet can be directly moved to FireWire without any porting +effort. + +So, play around with your new medium i.e. FireWire, with exactly the same tool +on Ethernet-:). + + +Modification to RFC2734 +======================= +Each IP-capable node must have it own unique hardware address in the network. +The original IPover1394 spec (RFC2734) employs the 64-bit GUID of each +FireWire adapter chip as the hardware address. That way, the hardware address +can be guaranteed to be unique even in the world scale, but the address +resolution process is not efficient, see below: + + ARP Eth1394 internal + resolution resolution + 48-bit MAC 16-bit +IP address -----------> (64-bit GUID) ---------------> FireWire nodeid + +The modified ARP on IPover1394 directly use the FireWire node id as hardware +address for each Eth1394 nodes. That way, the mapping between IP address and +hardware address (FireWire node id) only needs one time of resolution, which +is more efficient than the original one. Note that here we assume that we use +static allocation of 1394 address space to IPover1394, i.e. on each node, the +address space for Eth1394 would be exactly the same, see "eth1394.h". So, the +16 bits would be enough to represent the hardware address. Now the address +resolution process is more efficient, as below: + + ARP resolution +48-bit IP address ---------------> MAC (FireWire nodeid) + +To give exactly the same look as normal Ethernet devices, the MAC address of +Eth1394 is extended to 6-bytes by filling 0 after the 2 bytes FireWire node +id. This way all the highlevel stuff which is already working on Ethernet, +like RTnet's TDMA, RTcfg, can be directly moved to Eth1394. + + +Good Luck! + +2005-08-02 Zhang Yuchen <yuchen623-at-gmail.com> diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation new file mode 100644 index 0000000..f552bbc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation @@ -0,0 +1,49 @@ +README.ipfragmentation +======================= + +19-May-2003 - Mathias Koehrer (mathias_koehrer@yahoo.de) (original version) +21-Oct-2003 - Jan Kiszka (jan.kiszka@web.de) + + +This file documents the restrictions and pitfalls when using fragmented IP +packets with RTnet. + + +Introduction: +------------- +Ethernet provides 1500 bytes of payload within each packet. Subtracting the IP +header (20 bytes without options) and the UDP header (8 bytes), this leaves +1472 bytes of data for the (UDP) user. When sending larger packets, the RTnet +implementation of IP fragments the packet and sends it in multiple chunks over +the network. When a RTnet station receives a sequence of fragmented IP packets, +it reassembles it and passed the whole packet to the next layer (UDP) +afterwards. + + +Restrictions: +------------- +Incoming IP fragments are collected by the IP layer. The collector mechanism is +a global resource, when all collector slots are used, unassignable fragmented +packets are dropped! In order to guarantee bounded execution time of the +collector lookup mechanism, it is not possible to provide an unlimited number +of collectors (currently 10 are support, see ipv4/ip_fragment.c). Therefore, be +careful how many fragmented packets all of your stations are producing and if +one receiver might be overwhelmed with fragments! + +Fragmented IP packets are generated AND received at the expense of the socket +rtskb pool. Adjust the pool size appropriately to provide sufficient rtskbs +(see also examples/frap_ip). + +To identify the destination socket and to simplify the defragmentation, all IP +fragments must arrive in a strictly ascending order. Unordered packets are +dropped, if they can be assigned to an existing collector, the already +collected fragments are also cleaned up. However, for typically isolated +real-time networks, this requirement can be easily fulfilled. + + +Known Issues: +------------- +When sending fragmented IP packets over a NIC without RTmac being installed, +the NIC's transmission queue may easily overflow (take a look at the driver +source for the exact limit - typically TX_RING_SIZE). This is due to the still +lacking flow control for packet transmission. Will hopefully be fixed soon... diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools new file mode 100644 index 0000000..b932a1d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools @@ -0,0 +1,117 @@ + Buffer Pool Management + ====================== + +RTnet holds packet or packet fragments internally in so-called real-time socket +buffers (rtskbs, comparable to Linux skbs). These buffers are used to store +incoming data while it is processed by the stack and before it is copied to the +user buffer. They are also used for setting up outgoing packets and passing +them to the NIC driver. + +Unlike buffers in a normal network stack, rtskbs have to be allocatable in a +strictly deterministic way. For this reason, rtskbs are kept preallocated in +multiple pools, one for each producer or consumer of packets. When a filled +buffer is passed from a producer to a consumer, the consumer has to return an +empty rtskb back. Thus it can be avoided that a failing component can exhaust +global resources like the buffers and lock the whole RTnet system. + +This is an overview of rtskb pool in RTnet, how large they are by default, and +how they can be extended or shrunk. + + +1. Socket Pools +--------------- + +Default Size: 16 +Resizable: module parameter "socket_rtskbs" +Runtime Resize: [rt_dev_]setsockopt() +Initialization: real-time / non real-time (see text) + +Every socket gets an own rtskb pool upon creation. This pool is used for +compensation when an incoming packet needs to be stored until the user fetches +it and when a packet is prepared for transmission. The initial pool size can be +set with "socket_rtskbs". + +During runtime the pool can be extended (RT_SO_EXTPOOL) or shrunk +(RT_SO_SHRPOOL) using the [rt_dev_]setsockopt() function. When a socket is to +be created within a real-time context (e.g. a kernel RT-task), the buffers are +allocated from the real-time rtskb cache (see below) instead of using a Linux +system call. When a real-time-created socket is closed again, the buffers +return to that cache. Note that a [rt_dev_]close() call can fail if not all +buffers have yet return to the socket pool. In this case, be patient and retry +later. :) + + +2. Global Pool +-------------- + +Default Size: 0 + 16 * number of registered NICs +Resizable: module parameter "global_rtskbs" (base value) + module parameter "device_rtskbs" (increment per NIC) +Runtime Resize: by adding or removing NIC drivers +Initialization: non real-time + +The global pool is used by the ARP protocol (transmission only) and by the +real-time protocol part of RTmac. + + +3. ICMP Pool +------------ + +Default Size: 8 +Resizable: - +Runtime Resize: - +Initialization: non real-time + +For technical reasons, the ICMP pool which is used for replying incoming +requests is separated from the global pool. + + +4. NIC Receiver Pool +-------------------- + +Default Size: 16 (typically RX_RING_SIZE*2) +Resizable: module parameter "rx_pool_size" (8139too-rt.o only) +Runtime Resize: - +Initialization: non real-time + +The receiver pools are used by the NICs to store incoming packets. Their size +is typically fixed and can only be changed by recompiling the driver. + + +5. VNIC Pool +------------ + +Default Size: 32 +Resizable: module parameter "vnic_rtskbs" (rtmac.o) +Runtime Resize: - +Initialization: non real-time + +The VNIC pool is used from compensating incoming non real-time packets when +they are queued for being processed by Linux. The pool is also used for +creating outgoing VNIC packets. + + +6. rtnetproxy Pool +------------------ + +Default Size: 32 +Resizable: module parameter "proxy_rtskbs" (rtnetproxy.o) +Runtime Resize: - +Initialization: non real-time + +This pool is used the same way as the VNIC pool. + + +All module parameters at a glance: + + Module | Parameter | Default Value + ----------------------------------------------- + rtnet | socket_rtskbs | 16 + rtnet | global_rtskbs | 0 + rtnet | device_rtskbs | 16 + rtmac | vnic_rtskbs | 32 + rtnetproxy | proxy_rtskbs | 32 + rt_8139too | rx_pool_size | 16 + +A statistic of the currently allocated pools is available through the /proc +interface of RTnet (/proc/rtnet/rtskb). diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing new file mode 100644 index 0000000..f59c3f3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing @@ -0,0 +1,117 @@ + IP Routing Subsystem + ==================== + +The IPv4 implementation of RTnet comes with a real-time routing subsystem which +has some differences compared to normal IP stacks. Basically, all dynamic +elements of the routing and device address resolution (ARP) process have been +converted into statically configurable mechanisms. This allows an easy analysis +of the routing and address resolution complexity for known real-time networks. + + +1. Concept +---------- + +The routing systems is based on two tables. The so-called host routing table +contains all destination IPs which can be reached directly over local network +segments. These IPs include local loopback addresses and network broadcasts. + +The optional network routing table provides the addresses of gateways +to distant real-time networks, thus allowing more complex network structures. +In order to use the network routing feature, RTnet has to be compiled with +--enable-net-routing (see configure script). + +When preparing the transmission of an IP packet, RTnet first tries to find the +destination address in the host routing table. If this fails and network +routing is available, the network routing table is queried. On success, the +host routing table is consulted again, this time using the gateway IP. + +Incoming IP packets are no longer checked against any routing table on standard +RTnet nodes. Only if RTnet was compiled as a router by passing --enable-router +to the configure script, the destination IP is checked if it describes a +non-local address. In case the destination address does not equals the unicast +or broadcast IP of the receiving device and if the input channel is not a +loopback device, the RTnet router will try to find the next hop by performing +the output routing procedure described above and, on success, will forward the +packet. Note that, just like with non-real-time networks, any RTnet router can +become a bottleneck for real-time messages if the traffic is not planned +thoroughly (packets of the RTmac VNICs do not interfer with the real-time +routing). + + +2. Host Routing Table +--------------------- + +The content of the host routing table is comparable to ARP tables of standard +IP stacks: destination IP address, the respective device address, and a +reference to the output device. While normal ARP table lookups are not +performed before the routing decision is made, RTnet is using this table +already for the first and mostly sole routing process, and regardless of the +device type, thus also for loopback IPs. + +All entries of the host routing table are stored according to a hash mechanism. +The hash key is calculated using the least significant bits of the destination +IP. The size of the hash table, i.e. the number of relevant destination bits is +statically configured (default: 64, see ipv4/route.c). Also the number of +available host routing entries is statically limited (default: 32) and can be +set by recompiling RTnet with modified values. + + +Example (hash table size 64): + +192.168.2.35 & 0.0.0.63 = 35, the host hash key + + +Host routes are either added or updated manually via the rtroute tool or +automatically when an ARP request or reply arrives. Note that ARP messages are +only triggered by explicite user commands (rtroute solicit). Moreover, the +entries in the host routing table will not expire until they are manually +removed, e.g. by shutting down the respective output device. + +The easiest way to create and maintain the host routing table is to use RTcfg, +see README.rtcfg for further information. + + +3. Network Routing Table +------------------------ + +The entries of the network routing table contain the destination IP address, a +mask defining the relevant bits of the destination IP, and the IP of the +gateway to reach the destination network (or host). To simplify updates of host +routes, i.e. foremost changes of the destination device address, gateway IPs +have to be resolved through the host routing table. + +Network routes are either stored using a hash key derived from the destination +IP or without any hashing mechanism. The size of the hash table and thus the +number of considered IP bits for generating the key is defined in the source +code (default: 32). The start of the bit range is specified by a module +parameter of rtnet.o called net_hash_key_shift (default: 8). + + +Example (hash table size 32, net_hash_key_shift 8): + +(192.168.2.35 >> 8) & 0.0.0.31 = += 0.192.168.2 & 0.0.0.31 = 2, the network hash key + + +A new network route is only assigned to a hash key if the network mask of the +route completely covers the hash mask. + + +Examples (hash table size is 32, net_hash_key_shift is 8): + +rtroute add 192.168.2.0 netmask 255.255.255.0 gw 192.168.0.1 +hashmask = 0.0.0.31 << 8 = 0.0.31.0 +netmask & hashmask = 255.255.255.0 & 0.0.31.0 = 0.0.31.0 = hashmask => use key! + +rtroute add 10.0.0.0 netmask 255.0.0.0 gw 192.168.0.250 +netmask & hashmask = 255.0.0.0 & 0.0.31.0 = 0.0.0.0 != hashmask => no hash key! + + +In the latter case, RTnet adds the new route to the list of key-less network +routes. This list is querried only if a network route lookup in the hash table +fails. Thus, the network routing process effectively consists of two stages: +the hash-key-based lookup and a potential query of the key-less list of routes. + +RTnet provides by default a pool of 16 network routes. This number can be +modified in the source code (see ipv4/route.c). Network routes are only +manually added or removed via rtroute. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap new file mode 100644 index 0000000..52b2ced --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap @@ -0,0 +1,39 @@ +Real-Time Ethernet Capturing (RTcap) +------------------------------------ + +RTnet can capture incoming and outgoing Ethernet packets with a very low time +stamp jitter, typically below 10 us (depends on the hardware). + +When it is configured and compiled with --enable-rtcap, some extensions will be +added to the RTnet stack and an additional module rtcap.o will be created. This +module has to be loaded *after* all NIC drivers are inserted and *before* any +device is started or a RTmac discipline is attached to it. It will create two +read-only Linux shadow network devices for every NIC: + + <rtdevX> (e.g. rteth0) and + <rtdevX>-mac (exception: loopback device will only be mirrored to "rtlo"). + +The first capturing device mirrors any incoming packet the hardware reports to +the stack and any outgoing packet sent on the local station using RTnet. The +second one captures only packets which have be delayed by an active RTmac +discipline. As the capturing time is dictated by the parent shadow device, +packet lists can be unchronologic, but it provides a deeper look on the +influence of RTmac on the packet transmission process. + +After these shadow devices are started up using ifconfig, any capturing tool +like tcpdump or Ethereal can be used for the actual analysis work. In order to +get hold of any packet on the network, the real-time NIC should be +furthermore switched to promiscuous mode when it is configured: + + rtifconfig <rtdevX> up <IP> promisc + +If you notice any potential packet losses while capturing, you can try to +increase the number of real-time buffer used for storing packets before they +can be processed by Linux. The module parameter rtcap_rtskb controls this +parameter. It is set to 128 by default. Generally you should also tell RTcap to +switch on the RTAI timer (module parameter: start_timer=1) and prevent any +other module or program to do so as well. + +The capturing support adds a slight overhead to both paths of packets, +therefore the compilation parameter should only be switched on when the service +is actually required. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg new file mode 100644 index 0000000..a0d7ff2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg @@ -0,0 +1,135 @@ + RTcfg + ===== + +The Real-Time Configuration Service (RTcfg) provides a mechanism to start up +RTnet nodes synchronously. It implements a rendezvous during the RTnet start-up +process, exchanges MAC addresses and optionally IP routes, and distributes +configuration data to all stations. + +RTcfg consists of a kernel module which can either be configured to run as a +server or a client. The server takes a list of all expected stations in the +network and waits for them to come up while broadcasting invitation messages to +the clients. The clients wait for the invitation, then exchange the +configuration with the server, and wait for all other clients to start up. +After all configuration steps are performed, the stations can use a further +rendezvous mechanisms before starting the user application. + + +Usage +----- + +The RTcfg server and client functionality is controlled by the command line +tool rtcfg. Note: Some feature may not be implemented yet so that the +respective options has no effect. + + +Server Commands +--------------- + +rtcfg <dev> server [-p period] [-b burstrate] [-h <heartbeat>] + [-t <threshold>] [-r] + +Starts a RTcfg server for the specified device <dev>. The server then sends +every 1000 ms stage 1 configuration frames to new clients. <period> (in +milliseconds) can be used to override the interval value. The number of +clients invited within one period is controlled by <burstrate> (default: 4). +This value also defines the number of stage 2 configuration fragments the +server should send as far as the client supports it (see also "announce"). +<heartbeat> specifies the Heartbeat period of the clients in milliseconds +(default: 1000 ms), the value 0 turns the heartbeat mechanism off. <threshold> +sets the number of missing heartbeats after which a client shall be considered +dead (default: 2). If -r is given, the server automatically reports to be +ready within its stage 1 configuration frame, thus disengading it from issuing +an explicite "ready" command. + +rtcfg <dev> add <address> [-hw <hw_address>] [-stage1 <stage1_file>] + [-stage2 <stage2_file>] [-t <timeout>] + +Adds a client to the server's list of potential participants of the network +connected to the specified device <dev>. <address> can be either an IP address +(A.B.C.D) or a physical address (AA:BB:CC:DD:EE:FF). If a physical address is +explicitely assigned using <hw_address>, the <address> parameter must define +the client's IP address. Optionally, files can specified which will be passed +during the different configuration stages. If <stage1_file> is "-", rtcfg will +read the stage 1 data from standard input. <timeout> (in milliseconds) defines +the internal timeout after which a half-finished client configuration is reset +to its initial state again. By default this reset is never performed. + +rtcfg <dev> del <address> + +Removes a client from the list of network participants. See above for details +about the address format. + +rtcfg <dev> wait [-t <timeout>] + +Waits until both configuration stages for all clients in the server's list are +completed. If <timeout> (in milliseconds) is given, rtcfg will return an error +code when the configuration cannot be completed within the specified time. The +default timeout is infinite. + +rtcfg <dev> ready [-t <timeout>] + +Reports that the server has completed its setup, generally including the RTmac +startup phase, and waits until all other stations are reporting to be ready as +well. If <timeout> (in milliseconds) is given, rtcfg will return an error code +when the synchronisation cannot be completed within the specified time. The +default timeout is infinite. + +rtcfg <dev> detach + +Stops the RTcfg server on the specified device <dev>. Afterwards, the device +can be re-configured to act as server or client. + + +Client Commands +--------------- + +rtcfg <dev> client [-t <timeout>] [-c|-f <stage1_file>] [-m maxstations] + +Waits until the first configuration stage is completed for the device <dev>. +If <timeout> (in milliseconds) is given, rtcfg will return an error code when +the configuration cannot be completed within the specified time. The default +timeout is infinite. The incoming configuration data is either send to the +standard output if -c is given or to <stage1_file> if specified. By default +clients can synchronise with up to 32 other stations (including the server). +This limit can be modified using the <maxstations> parameter. + +rtcfg <dev> announce [-t <timeout>] [-c|-f <stage2_file>] [-b burstrate] [-r] + +Sends an New Announcement frame over the device <dev> and waits until this +second configuration stage is completed. If <timeout> (in milliseconds) is +given, rtcfg will return an error code when the configuration cannot be +completed within the specified time. The default timeout is infinite. If -c or +-f is given, stage 2 configuration data is requested and either send to the +standard output or to <stage2_file>. <burstrate> controls the number of stage 2 +configuration fragments the client should accept (default: 4). The actual +amount is negotiated according to both the client's and the server's capability +(see also "server"). If -r is given, the client automatically reports to be +ready within its announcement frame, thus disengading it from issuing an +explicite "ready" command. + +rtcfg <dev> ready [-t <timeout>] + +Reports that the client has completed its setup and waits until all other +stations are reporting to be ready as well. If <timeout> (in milliseconds) is +given, rtcfg will return an error code when the synchronisation cannot be +completed within the specified time. The default timeout is infinite. + +rtcfg <dev> detach + +Stops the RTcfg client on the specified device <dev>. Afterwards, the device +can be re-configured to act as server or client. + + +Module Parameters +----------------- + +start_timer Set to zero if RTAI timer is already running. By default the + rtcfg module starts the timer when it is loaded. + +num_rtskbs Number of realtime socket buffers used by the rtcfg module. You + may have to increase the default value of 32 when you are + working with multiple interfaces. + + +2003-2005, Jan Kiszka <jan.kiszka-at-web.de> diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac new file mode 100644 index 0000000..008385b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac @@ -0,0 +1,341 @@ + Real-Time Media Access Control (RTmac) + ====================================== + +RTmac is a module designed to be used with RTnet. It provides a media access +control (MAC) infrastructure for RTnet. The actual access control mechanism is +implemented by so-called discipline modules. The current version comes with a +time division multiple access (TDMA) discipline. Because of the RTmac's modular +design, you can also easily attach your own MAC discipline optimised for the +specific application. + + +RTmac Layer +=========== + +Without RTmac: + + +---------------+ + |RT applications| + +-------v-------+ + | + +--------v---------+ + | RT UDP/IP stack | + +------------------+ + |RT ethernet driver| + +--------v---------+ + | + +----v---+ + | NIC | + +--------+ + +With RTmac inserted: + + +---------------+ +-------------------+ + |RT applications| | Linux network | + +-------v-------+ |stack (TCP/IP etc.)| + | +---------v---------+ + +--------v---------+ | + | RT UDP/IP stack | +--v--+ + +------------------+ |VNIC | + | RTmac | +--v--+ + | Layer | | + | .--------------. <------------+ + | |MAC algorithm | | + | `--------------´ | + +------------------+ + |RT ethernet driver| + +--------v---------+ + | + +----v---+ + | NIC | + +--------+ + +RTmac, if loaded, has the exclusive control over transmission of the network +driver. Every outgoing packet is passed to RTmac which forwards it to the MAC +discipline. It will decide then when the packets can be sent to the hardware +driver. + + + +TDMA - Time Division Multiple Access +==================================== + +The TDMA media access control discipline is based on a master/slave hierarchy. +A network master periodically publishes so-called Synchronisation frames, +forming elementary cycles. Network participants, including the master, have +exclusively assigned access windows (time slots) within these cycles, defined +relatively to the Synchronisation frames. In order to catch potential breakdowns +of the central master, additional backup masters can be set up which will take +over sending Synchronistation frames in case of the the primary master failing +to do so. + +A time slot can be used to transmit a single packet of up to a specified maximum +size. This discipline revision supports flexible assignment of time slots to +real-time network participants. It is possible to use multiple slots per cycle. +Furthermore, a slot can be shared between participants by occupying it only +every Nth cycle. Besides at least one payload slot per participant, slots have +to be reserved for the Synchronisation frame and, optionally, for one or more +backup Synchronisation frames. The concrete timing strongly depends on the +capability of all network participants. Therefore, timing requirements like +worst case jitters or minimum slot gaps are not statically specified, they can +be defined individually for every project. + +In contrast to earlier TDMA discipline revisions, the slave configuration is +no longer distributed by the TDMA master. This means that the slaves have to +be aware of their slot setup before sending any data to a TDMA-managed +network. Therefore, the required settings either have to be stored on the +slaves or, if a centralised management is desired, the RTnet configuration +service RTcfg has to be used (see related documentation for further details). + + +Slot Identification and Selection +--------------------------------- + +Time slots carry an internal ID number, unique per participant. These numbers +are used when determining the slot in which an outgoing packet shall be +transmitted. The TDMA discipline contains no automatic scheduling mechanism. +Instead, the sender, i.e. an user or a service, either explicitly provides a +desired slot ID or a default slot is used. + + Slot ID | Description + ---------+----------------------------------------------------------------- + 0 | default slot for RT; also default NRT slot if slot 1 is missing + 1 | non-RT slot; if missing, slot 0 is used + 2 | user slots, used for explicitly scheduled packets + : | + + +Configuration Files +------------------- + +To ease the setup of TDMA-based networks, the rtnet start script is provided +with the RTnet distribution. It is controlled by a configuration file which is +typically named rtnet.conf and stored in /etc. By setting the TDMA_MODE in this +file, the role of the station is set to either "master" or "slave". + +Beyond this common parameter, the start script supports two configuration modes +for TDMA. In the simple mode, only the IPs of all slaves have to listed in +TDMA_SLAVES, the cycle period has to be provided in TDMA_CYCLE, and the slot +offset difference must be specified in TDMA_OFFSET. Every station is then +assigned a single time slot with the ID 0, starting with offset 0 for the +master node, i.e. the master's payload frame will directly follow the +Synchronisation frame. Further offsets are calculated by incrementing the +previous value by TDMA_OFFSET for each further station. + +In contrast, the extended mode allows a detailed configuration of every node. +To enable this mode, a TDMA configuration file (typically /etc/tdma.conf) is +required. The path of this file has to be provided to rtnet.conf in the +variable TDMA_CONFIG, while TDMA_SLAVES, TDMA_CYCLE, and TDMA_OFFSET have to +be disabled, e.g. by commenting out. Beside TDMA-related paramters, also +individual stage-2 files can be set for every slave node, overwriting the +common STAGE_2_SRC variable in rtnet.conf (see RTcfg documentation for details +about the configuration concept). The format of the TDMA configuration file is +defined as follows: + +# Note: every station needs at least one time slot +master: +[ip 1.2.3.4] +cycle <cycle_in_us> +slot <id> <offset_in_us> [<phasing>/<period> [<size>]] +[slot ...] + +# Slave with known MAC address, IP is assigned by the RTcfg server +slave: +ip 1.2.3.4 +mac AA:BB:CC:DD:EE:FF +[stage2 <file>] +slot ... + +# Slave with unknown MAC address, it is aware of its IP when starting +slave: +ip 1.2.3.4 +[stage2 <file>] +slot ... + +# Slave with known MAC address without IP support +slave: +mac AA:BB:CC:DD:EE:FF +[stage2 <file>] +slot ... + +# Note: +# - multiple backup masters can be set up, always the one with the smallest +# backup-slot value will take over in case of a failure +# - the cycle period is already defined with the primary master +backup-master: +ip 1.2.3.4 (or IP+MAC or only MAC, see slave scenarios) +backup-slot <offset_in_us> +[stage2 <file>] +slot ... + + +Configuration Example +--------------------- + +An exemplary configuration consisting of two masters, one serving as backup, +and three slaves is shown below. The slot period is expressed in the form +<phasing>/<period>. For instance, 1/3 means that this slot will be used in +every first of three cycles, while 3/3 means in every third of three. + + +------+ +----------+ +---------+ +---------+ +----------+ + | | | Master 2 | | Slave A | | Slave B | | Master 1 | + | Sync | | Backup | | Slot 0 | | Slot 0 | | Slot 0 | + | | | Sync | | RT/NRT | | RT | | RT/NRT | + | 1/1 | | 1/1 | | 1/1 | | 1/1 | | 1/1 | +--+------+--+----------+--+---------+--+---------+--+----------+--... + + +----------+ + | Slave C | + | Slot 3 | + | RT | + | 3/3 | + +---------+ +----------+ + | Slave C | | Master 2 | + | Slot 0 | | Slot 0 | + | RT/NRT | | RT/NRT | + | 2/2 | | 2/3 | + +---------+ +---------+ +----------+ +------+ + | Slave B | | Slave C | | Slave A | | | + | Slot 1 | | Slot 2 | | Slot 2 | | Sync | + | NRT | | NRT | | RT | | | + | 1/2 | | 1/4 | | 1/3 | | 1/1 | +...--+---------+--------+---------+--+----------+-------------+------+--> + +A tdma.conf file describing this scenario is shown below (all time values are +examplary, only expressing relative relations): + +# Master 1 +master: +ip 10.0.0.1 +cycle 5000 +slot 0 800 + +# Master 2 +backup-master: +ip 10.0.0.2 +backup-offset 200 +slot 0 1500 2/3 + +# Slave A +slave: +ip 10.0.0.3 +slot 0 400 +slot 2 1500 1/3 + +# Slave B +ip 10.0.0.2 +slot 0 600 +slot 1 1000 1/2 + +# Slave C +ip 10.0.0.2 +slot 0 1000 2/2 +slot 2 1300 1/4 +slot 3 1500 3/3 + + +Management Interface +-------------------- + +The TDMA discipline is managed by the command line tool tdmacfg. In the +following, the usage of this tool is described. For typical setups, the rtnet +start script manages the execution of tdmacfg. + +tdmacfg <dev> master <cycle_period> [-b <backup_offset>] + [-c calibration_rounds] [-i max_slot_id] [-m max_calibration_requests] + +Starts a TDMA master on the specified device <dev>. The cycle period length is +given in microseconds using the <cycle_period> parameter. If <backup_offset> +is provided, the master becomes a backup system. In case the main master +fails, the backup master with the smallest <backup_offset> will start sending +Synchronisation frames with the specified offset in microseconds relative to +the scheduled cycle start. <calibration_rounds> specifies the number of clock +calibration requests the master will send to any other potentially already +active master during startup. By default, 100 rounds are performed. The +calibration will be performed when the first slot is added. By default, a +master can handle up to 64 calibration requests at the same time. This value +can be adapted by specifying the <max_calibration_requests> parameter. The +largest used slot ID is tunable by providing <max_slot_id> or will be limited +to 7 if this parameter is omitted. + +tdmacfg <dev> slave [-c calibration_rounds] [-i max_slot_id] + +Starts a TDMA slave on the specified device <dev>. <calibration_rounds> +specifies the number of clock calibration requests the slave sends to the +active master during startup. By default, 100 rounds are performed. The +calibration will be performed when the first slot is added. The largest used +slot ID is tunable by providing <max_slot_id> or will be limited to 7 if this +parameter is omitted. + +tdmacfg <dev> slot <id> [<offset> [-p <phasing>/<period>] [-s <size>] + [-j joint_slot] [-l calibration_log_file] [-t calibration_timeout]] + +Adds, reconfigures, or removes a time slot for outgoing data on a started TDMA +master or slave. <id> is used to distinguish between multiple slots. See above +slot ID table for predefined values. If <offset> is given, the time slot is +added or modified to send data with the specified offset in microseconds +relative to the scheduled cycle start, if omitted, the slot is removed from +the station's configuration. + +By default, a slot will be used in every cycle. When providing <phasing> and +<period>, the slot will only be occupied in every <phasing>-th of <period> +cycles. By assigning e.g. 1/2 to one and 2/2 to another slot, the usage of the +physical time slot will alternate between both slot owners. The <size> +parameter limits the maximum payload size in bytes which can be transmitted +within this slot. If no <size> parameter is provided, the maximum size the +hardware supports is applied. To share the same output queue among several +slots, secondary slots can be attached to a primary <joint_slot>. The slot +sizes must match for this purpose. + +The addition of the station's first slot will trigger the clock calibration +process. To store the results of each calibration handshake, a +<calibration_log_file> can be provided. By default, this command will not +terminate until the calibration is completed. The <calibration_timeout> +parameter can be used to specify an upper time limit. + +NOTE: Reconfiguring an existing slot during runtime can cause packet drops on +the involved output channel. You should stop all applications using this slot +before reconfiguring it. + +tdmacfg <dev> detach + +Detaches a master or slave from the given devices <dev>. Past this command, +the write access to the device is uncoordinated again and may interfere with +remaining real-time network participants. + + + +NoMAC - Void Media Access Control +================================= + +Formost as a skeleton for new MAC implementations, the NoMAC discipline module +is provided. It simply forwards every outgoing packet to the driver as soon as +the stack passes it over. NoMAC is configured using the command line tool +nomaccfg. To attach NoMAC to a real-time network adapter, call + +nomaccfg <dev> attach + +To detach it again, use + +nomaccfg <dev> detach + + + +VNIC configuration +================== + +As soon as an RTmac discipline is loaded and appropriately configured for a +real-time network adapter, a virtual network interface controller (VNIC) is +provided to standard Linux. It is named "vnic<n>", where <n> is the number of +the associated rteth device (e.g. rteth1 --> vnic1). You just have to configure +the VNIC as a normal network device using ifconfig. You are even free to assign +a different IP than the real-time interface uses. + + + +References +========== + + - Real-Time Media Access Control Framework (RTmac), revision 2.0 + - TDMA Media Access Control Discipline, revision 2.1a + - RTnet Configuration Service (RTcfg), revision 1.7 diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy new file mode 100644 index 0000000..647cdb6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy @@ -0,0 +1,74 @@ +README.rtnetproxy +=================== +08-Nov-2002, Mathias Koehrer <mathias_koehrer@yahoo.de> +02-May-2008, Wolfgang Grandegger <wg@grandegger.com> + + +RTnetproxy can be used to share a single network adapter for both - realtime +and non-realtime ethernet traffic. TCP/IP, UDP and ARP can be used via RTnet +(of course not in realtime!) + +RTnetproxy represents a network device to standard Linux and can be used +as any other Linux network device (ifconfig for configuration), the name +the network device is "rtproxy". + +Setup: +-------- +Get your RTnet working first! All IP addresses you are interested in have +to be set via "rtifconfig ethX route solicit IP_ADDRESS"! + + insmod rtnetproxy.o + +Now, you have a network device "rtproxy" ready to be used with Linux. +Configure this network device using "ifconfig": + +Example: + + ifconfig rtproxy up 192.168.10.10 netmask 255.255.255.0 + +That's it! + +Configuration options: +------------------------ +--enable-proxy: this enables RTnetproxy support, which is by default + restricted to IP-based protocols (TCP/IP!!!). Incoming frames from + ICMP are interpreted directly by RTnet and are not forwarded to the + RTnetproxy. UDP packets are forwarded if they are not requested by + an RTnet application. + +--enable-proxy-arp: this option enables ARP support for the rtproxy Linux + network device. Incoming ARP replys are delivered to both, the RTnet + and the Linux network stack. The rtproxy then gets attached to the + corresponding RTnet device, rteth0 by default. + +--disable-icmp: this option disables the RTnet IPv4 ICMP support. ICMP + will then be handled by the Linux network stack via the rtproxy Linux + network device. + +Important note: +----------------- +It is highly recommended to strictly separate realtime LAN traffic and non- +realtime LAN traffic. For a configuration/setup phase, TCP/IP is sometimes +very useful, buf for realtime data exchange the LAN should be reserved for +the realtime traffic using UDP! + + +How it works internally: +-------------------------- +RTnetproxy works on top of RTnet. +All data to be sent out or received is actually copied between RTnet and +RTnetproxy => The performance is not as good as with the standard Linux +network drivers. +All incoming IPv4 frames, having a IP protocol ID that is not handled by +RTnet are passed to RTnetproxy. +Incoming frames, that are passed to RTnetproxy (TCP frames) slow down the +realtime stuff a little bit - as all this is done in realtime mode context! + + +Possible enhancements: +----------------------- +Pass incoming frames to RTnetproxy not only by checking the protocol ID but +by actual checking, if a certain frame has been processed by RTnet or not. +This leads to a couple of changes in the RTnet implementation... + + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp new file mode 100644 index 0000000..73ccd8d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp @@ -0,0 +1,52 @@ +TCP implementation for RTnet +---------------------------- + The goal of implementing the TCP support for RTnet is to allow talking + to non-RTnet devices that only expose their features via a TCP interface. + TCP remains tricky to use under real-time constraints, so it may not be + the first choice when designing both sides from scratch. + + TCP is described in 130 RFCs (about 30 of them are already outdated) + for the last 30 years, and this complicates a complex enough stack + by itself. + + To keep things simple, some features are lacked, and some of them + could be improved. Below is a short list of misfeatures and features + in wishlist. + + *) PSH and URG packet flags are ignored and do not influence stack + or application behaviour. + *) All TCP packet options like MSS or window scaling are not parsed + in input packets and not generated. + *) The TCP stack is implemented with so known silly window syndrome + (see RFC 813 for details). In two words, SWS is a degeneration in + the throughput which develops over time, during a long data + transfer. Eventually it is not a challenging task to remove this + misfeature, but as for now it is present in the + implementation. If your application uses short TCP transfers, you + won't notice any discomfort, but if you would like to develop a + FTP or HTTP server over RTnet TCP, remember about this warning. + *) Server part of the stack is implemented in embryonic phase, so it + is even possible to create one server connection in non-POSIX + compilant way, but not more. Server connection socket descriptor + is the same, that you _pass_ to accept(), not _returned_ by the + call. As a consequence, listen() connection queue is not + implemented because of no further use. + *) Half closed connections, i. e. entered by shutdown() calls, are + not implemented. + *) sendmsg() and recvmsg() functions accept only one-element io + vertors. + *) Referencing to BSD code, anyone can find up to seven timers + related to every connection. In RTnet implementation it was + decided to exploit the idea of timerwheel data structure to + manage the only one timer for a connection - a packet + retransmission timer. It is possible to use timerwheels for + developing other kind of timers in price of one additional thread + in the stack for one kind of timers. + To simplify stack logic timers are missed for RTO, connection + establishment (retransmission timer is reused), delayed ACK, + persist timer, keepalive timer (half-implemented), FIN_WAIT_2 and + TIME_WAIT timers. + *) In comparison with Berkeley sockets lots of socket options are + not implemented. For now only SO_SNDTIMEO is implemented, and + SO_KEEPALIVE is half-implemented + *) TCP congestion avoidance is not covered at all. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec new file mode 100644 index 0000000..0745aab --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec @@ -0,0 +1,469 @@ + RTnet Configuration Service (RTcfg) + =================================== + + Revision: 1.8 + + +RTcfg is a configuration service for setting up a RTnet network and +distributing additional user-defined configuration data. This document +describes the protocol and the user interface of RTcfg. + + + +Sequence Diagram +================ + +Normal Startup +-------------- + +Configuration Existing + Server New Client Client + | | | + | Client Config, Stage 1 | | + | (unicast/broadcast, single frame) | (if broadcasted) | + |-----------------------------------> | -------------------->| + | +-+ | + | | | | + | | | Set | + | | | Config 1 | + | | | | + | +-+ | + . . . + . . . + | Announce (broadcast) | | + | <-----------------------------------|--------------------> | + +-+ | +-+ + | | | | | + | | | | | Update + | | | | | Tables + | | | | | + | | | Announce (unicast) +-+ + | | Update | <------------------- | + | | Tables +-+ | + | | | | | + | | | | Update | + | | | | Tables | + | | | | | + | | Client Config, Stage 2 +-+ | + +-+ (unicast, multiple frames) | | + | ----------------------------------> | | + | +-+ | + | | | | + | | | Receive | + | | | Config 2 | + | | | | + | Acknowledge Config (unicast) +-+ | + |<----------------------------------- | | + | | | + | +-+ | + | | | | + | | | Process | + | | | Config 2 | + | | | | + | +-+ | + . . . + . . . + | Ready (broadcast) | | + |<------------------------------------|--------------------->| + . . . + . . . + | Ready (broadcast) | | + |------------------------------------>|--------------------->| + | | | + + + +Normal Operation +---------------- + +Configuration + Server Client A Client B + | Heartbeat (unicast) | | + |<------------------------------------| | + | | | + . . . + . . . + | Heartbeat (unicast) | | + |<-----------------------------------------------------------| + | | | + + + +Failing Client +-------------- + +Configuration + Server Client A Client B + | | | + +-+ | | + | | | | + | | Missing | | + | | Heartbeat | | + | | Detection | | + | | | | + +-+ Dead Station (broadcast) | | + | ----------------------------------> | -------------------> | + +-+ +-+ +-+ + | | | | | | + | | Update | | Update | | Update + | | Tables | | Tables | | Tables + | | | | | | + +-+ +-+ +-+ + | | | + + + +Server Restart +-------------- + +Configuration Running Running + Server Client A Client B + | | | + | Client Config, Stage 1 | | + | (unicast/broadcast, single frame) | (if broadcasted) | + |-----------------------------------> | -------------------->| + | +-+ | + | | | | + | | | Receive | + | | | Config 1 | + | | | | + | Announce (unicast) +-+ | + |<----------------------------------- | | + +-+ +-+ | + | | | | | + | | Update | | Update | + | | Client Status | | Server Address | + | | and Tables | | and Tables | + | | | | | + +-+ +-+ | + | | | + +Note: The configuration of a restarted or replace server must not differ from + the configuration the currently running clients originally received. The + only exception are the servers physical and logical addresses. + + + +Frame Formats +============= + +RTcfg frames are identified by the hexadecimal Ethernet type 9022. All frame +fields are encoded in network order (big endian). The first field consists of +an identification byte as illustrated below. Currently, the version bits are +zero in all frames, but they must be ignored in order to remain compatible +with possible future extensions. + + +---------------+------------------------+ + | Bits 7 - 5 | Bits 4 - 0 | + | Frame Version | Frame Identifier | + +---------------+------------------------+ + +When using RTmac, the lowest real-time priority is applied to RTcfg frames. + + + +Stage 1 Configuration Frame +--------------------------- + + +----------+----------------+----------------+----------------+ - - + | ID: 0 | Client Address | Client Address | Server Address | + | (1 byte) | Type (1 byte) | (variable) | (variable) | + +----------+----------------+----------------+----------------+ - - + - - +---------------+-----------------+-----------------+ + | Stage 2 Burst | Configuration | Configuration | + | Rate (1 Byte) | Length (2 bytes)| Data (variable) | + - - +---------------+-----------------+-----------------+ + +The overall frame length must not be greater than the MTU of the network +interface (typical: 1500 bytes). It might be limited by the installed RTmac +discipline. + +Valid address types are: + + Symbolic Name | Value | Address Length [Bytes per Field] + ------------------+-------+---------------------------------- + RTCFG_ADDR_MAC | 0 | 0 + RTCFG_ADDR_IP | 1 | 4 + <extensible> | ... | ... + +Stage 1 Configuration frames are sent as unicast when either only physical +client addresses are used (RTCFG_MAC), or when the linkage of physical and +logical (e.g. RTCFG_ADDR_IP) address is known. In any other case the frames +are broadcasted to all stations. + +The Stage 2 Burst Rate field specifies the number of stage 2 configuration +frames the server is able to send without receiving an Acknowledge +Configuration frame. See below for the handshake mechanism to determine the +actual burst rate. + +The configuration data of the first stage typically consists of parameters (or +even shell commands) which are required for the new client to become part of +an RTmac-managed network. If no data is available for this stage (e.g. when +RTmac is not used), the server sets the Configuration Length field to zero. + + + +Announcement Frames +------------------- + +New Announcement Frame: + +----------+----------------+----------------+----------+---------------+ + | ID: 1 | Client Address | Client Address | Flags | Stage 2 Burst | + | (1 byte) | Type (1 byte) | (variable) | (1 byte) | Rate (1 byte) | + +----------+----------------+----------------+----------+---------------+ + +Reply Announcement Frame: + +----------+----------------+----------------+----------+---------------+ + | ID: 2 | Client Address | Client Address | Flags | Padding Field | + | (1 byte) | Type (1 byte) | (variable) | (1 byte) | (1 byte) | + +----------+----------------+----------------+----------+---------------+ + +See "Stage 1 Configuration Frame" for valid address types and lengths. + +New Announcement frames are sent as broadcast so that every other client can +update its ARP and routing table appropriately. In contrast, the Reply +Announcement frame is sent directly to the new client. A Reply Announcement +frame is also sent to the server if a client received a Stage 1 Configuration +frame while already being in operation mode. This occurs when the server is +restarted or replaced after a failure. + +Flags are encoded as follows: + + Bit Number | Interpretation if set + ------------+--------------------------------------------------------------- + 0 | requests available stage 2 configuration data from the server + 1 | client is ready (i.e. will not send an explicit Ready frame) + 2-7 | <reserved> + +Furthermore, the client reports its own Stage 2 Burst Rate back to the server. +The minimum of the server and the client value is selected as the actual burst +rate. After the server has send the according number of Stage 2 Configuration +frames, it will wait for an Acknowledge Configuration frame from the client. + + + +Stage 2 Configuration Frames +---------------------------- + +Initial Frame: + +----------+----------+-----------------+------------------+ - - + | ID: 3 | Flags | Active Stations | Heartbeat Period | + | (1 byte) | (1 byte) | (4 bytes) | (2 bytes) | + +----------+----------+-----------------+------------------+ - - + - - +----------------------+--------------------+ + | Configuration Length | Configuration Data | + | (4 bytes) | (variable) | + - - +----------------------+--------------------+ + +Subsequent Fragments: + +----------+-----------------+--------------------+ + | ID: 4 | Fragment Offset | Configuration Data | + | (1 byte) | (4 bytes) | (variable) | + +----------+-----------------+--------------------+ + +The maximum length of a fragment is determined by the available MTU. + +Stage 2 Configuration frames are always sent as unicast. + +The Active Stations field contains the number of currently running stations, +including the server, but excluding the new client. This number is used be the +client to detect when all other clients have sent their Reply Announcement +frames, and when all stations have reported to be ready. + +If the heartbeat mechanism shall be enabled on the new client, the Heartbeat +Period field contains the client's period in milliseconds for sending Heartbeat +frames. Otherwise it is set to zero. + +Flags are encoded as follows: + + Bit Number | Interpretation if set + ------------+--------------------------------------------------------------- + 0 | <reserved> + 1 | server is ready (i.e. will not send an explicit Ready frame) + 2-7 | <reserved> + +The second configuration stage can be used to distribute user-defined +configurations, applications, etc. (e.g. by sending a tar archive). If no +data is available for this stage, the server sets the Configuration Length +field to zero. + + + +Acknowledge Configuration Frames +-------------------------------- + + +----------+--------------------+ + | ID: 5 | Acknowledge Length | + | (1 byte) | (4 bytes) | + +----------+--------------------+ + +An Acknowledge Configuration frame is sent by a new client after it has either +received the number of Stage 2 Configuration frames specified by the negotiated +burst rate (see above), or the last expected Stage 2 Configuration frame has +arrived. + +The Acknowledge Length field is set to the number of yet successfully received +bytes. If the client has detected an inconsistent fragment, this number only +reflects the amount of data which was correctly received. The server will then +continue the Stage 2 Configuration frame transmission according to the +specified offset. + + + +Ready Frame +----------- + + +----------+ + | ID: 6 | + | (1 byte) | + +----------+ + +After a station has finished its setup procedures, it signals this state to all +other stations by sending a Ready frame as broadcast. This allows the server +and the clients to synchronise the completion of their configuration phase. The +frame is not sent if the client has already set the Ready Bit in its New +Announcement frame. + + + +Heartbeat Frame +--------------- + + +----------+ + | ID: 7 | + | (1 byte) | + +----------+ + +Every client has to send Heartbeat frames within the period specified in the +Stage 2 Configuration frame as unicast to the server. + + + +Dead Station Frame +------------------ + + +----------+----------------+--------------------+--------------------+ + | ID: 8 | Client Address | Logical Client | Physical Client | + | (1 byte) | Type (1 byte) | Address (variable) | Address (32 bytes) | + +----------+----------------+--------------------+--------------------+ + +See "Stage 1 Configuration Frame" for valid address types and lengths. + +When the server detects that a client failed to send a heartbeat frame within +the specified maximum period, it broadcasts a Dead Station frame to all other +clients. Every station will then remove the corresponding entries from its ARP +and routing tables. + + + +Management Tool +=============== + +NOTE: The following specifications are OPTIONAL. They describe the internal + realisation of RTcfg as applied to the implementation in RTnet. + +The RTcfg server and client functionality is controlled by the command line +tool rtcfg. + + + +Server Commands +--------------- + +rtcfg <dev> server [-p period] [-b burstrate] [-h <heartbeat>] + [-t <threshold>] [-r] + +Starts a RTcfg server for the specified device <dev>. The server then sends +every 1000 ms stage 1 configuration frames to new clients. <period> (in +milliseconds) can be used to override the interval value. The number of +clients invited within one period is controlled by <burstrate> (default: 4). +This value also defines the number of stage 2 configuration fragments the +server should send as far as the client supports it (see also "announce"). +<heartbeat> specifies the Heartbeat period of the clients in milliseconds +(default: 1000 ms), the value 0 turns the heartbeat mechanism off. <threshold> +sets the number of missing heartbeats after which a client shall be considered +dead (default: 2). If -r is given, the server automatically reports to be +ready within its stage 1 configuration frame, thus disengading it from issuing +an explicite "ready" command. + +rtcfg <dev> add <address> [-hw <hw_address>] [-stage1 <stage1_file>] + [-stage2 <stage2_file>] [-t <timeout>] + +Adds a client to the server's list of potential participants of the network +connected to the specified device <dev>. <address> can be either an IP address +(A.B.C.D) or a physical address (AA:BB:CC:DD:EE:FF). If a physical address is +explicitely assigned using <hw_address>, the <address> parameter must define +the client's IP address. Optionally, files can specified which will be passed +during the different configuration stages. If <stage1_file> is "-", rtcfg will +read the stage 1 data from standard input. <timeout> (in milliseconds) defines +the internal timeout after which a half-finished client configuration is reset +to its initial state again. By default this reset is never performed. + +rtcfg <dev> del <address> + +Removes a client from the list of network participants. See above for details +about the address format. + +rtcfg <dev> wait [-t <timeout>] + +Waits until both configuration stages for all clients in the server's list are +completed. If <timeout> (in milliseconds) is given, rtcfg will return an error +code when the configuration cannot be completed within the specified time. The +default timeout is infinite. + +rtcfg <dev> ready [-t <timeout>] + +Reports that the server has completed its setup, generally including the RTmac +startup phase, and waits until all other stations are reporting to be ready as +well. If <timeout> (in milliseconds) is given, rtcfg will return an error code +when the synchronisation cannot be completed within the specified time. The +default timeout is infinite. + +rtcfg <dev> detach + +Stops the RTcfg client on the specified device <dev>. Afterwards, the device +can be re-configured to act as server or client. + + + +Client Commands +--------------- + +rtcfg <dev> client [-t <timeout>] [-c|-f <stage1_file>] [-m maxstations] + +Waits until the first configuration stage is completed for the device <dev>. +If <timeout> (in milliseconds) is given, rtcfg will return an error code when +the configuration cannot be completed within the specified time. The default +timeout is infinite. The incoming configuration data is either send to the +standard output if -c is given or to <stage1_file> if specified. By default +clients can synchronise with up to 32 other stations (including the server). +This limit can be modified using the <maxstations> parameter. + +rtcfg <dev> announce [-t <timeout>] [-c|-f <stage2_file>] [-b burstrate] [-r] + +Sends an New Announcement frame over the device <dev> and waits until this +second configuration stage is completed. If <timeout> (in milliseconds) is +given, rtcfg will return an error code when the configuration cannot be +completed within the specified time. The default timeout is infinite. If -c or +-f is given, stage 2 configuration data is requested and either send to the +standard output or to <stage2_file>. <burstrate> controls the number of stage 2 +configuration fragments the client should accept (default: 4). The actual +amount is negotiated according to both the client's and the server's capability +(see also "server"). If -r is given, the client automatically reports to be +ready within its announcement frame, thus disengading it from issuing an +explicite "ready" command. + +rtcfg <dev> ready [-t <timeout>] + +Reports that the client has completed its setup and waits until all other +stations are reporting to be ready as well. If <timeout> (in milliseconds) is +given, rtcfg will return an error code when the synchronisation cannot be +completed within the specified time. The default timeout is infinite. + +rtcfg <dev> detach + +Stops the RTcfg client on the specified device <dev>. Afterwards, the device +can be re-configured to act as server or client. + + +2003-2005, Jan Kiszka <jan.kiszka-at-web.de> diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec new file mode 100644 index 0000000..4e22547 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec @@ -0,0 +1,44 @@ + Real-Time Media Access Control Framework (RTmac) + ================================================ + + Revision: 2.0 + + +This document describes the protocol header of the real-time media access +control framework for RTnet. The actual control protocol is implemented by +so-called disciplines, see related specifications. + + + +Frame Format +============ + +RTmac frames are identified by the hexadecimal Ethernet type 0x9021. All frame +fields are encoded in network order (big endian). The version identifier of +the RTmac header shall only be changed if the format becomes incompatible to +the previous revision. Currently, this version field contains the hexadecimal +value 0x02. + + + +RTmac Frame Header +------------------ + + +----------------------+---------------+---------------+ + | Type | Version: 0x02 | Flags | + | (2 bytes) | (1 byte) | (1 byte) | + +----------------------+---------------+---------------+ + +Depending on the tunnelling flag, the type field either contains the +identifier of the succeeding discipline frame or the Ethernet type of a +tunnelled non-real-time packet introduced by this header. + +Flags are encoded as follows: + + Bit Number | Interpretation if set + ------------+--------------------------------------------------------------- + 0 | tunnelling frame if set, otherwise discipline frame + 1-7 | <reserved> + + +2004, Jan Kiszka <jan.kiszka-at-web.de> diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy new file mode 100644 index 0000000..7db150f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy @@ -0,0 +1,1150 @@ +# If you run "doxygen RTnet.oxy", a subdirectory RTnet.oxy.html will +# be generated, where you can find the overview about RTnet. +# +# This overview is not meant as reference documentation for users but +# as overview over the data structures of RTnet for developers. +# +# Doxygen requires the package graphviz to generate the graphics. + +# Doxyfile 1.3.7 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = RTnet + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 2 levels of 10 sub-directories under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of source +# files, where putting all generated files in the same directory would otherwise +# cause performance problems for the file system. + +CREATE_SUBDIRS = yes + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, +# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en +# (Japanese with English messages), Korean, Korean-en, Norwegian, Polish, Portuguese, +# Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# This tag can be used to specify the encoding used in the generated output. +# The encoding is not always determined by the language that is chosen, +# but also whether or not the output is meant for Windows or non-Windows users. +# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES +# forces the Windows encoding (this is the default for the Windows binary), +# whereas setting the tag to NO uses a Unix-style encoding (the default for +# all platforms other than Windows). + +USE_WINDOWS_ENCODING = NO + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is used +# as the annotated text. Otherwise, the brief description is used as-is. If left +# blank, the following values are used ("$name" is automatically replaced with the +# name of the entity): "The $name class" "The $name widget" "The $name file" +# "is" "provides" "specifies" "contains" "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited +# members of a class in the documentation of that class as if those members were +# ordinary class members. Constructors, destructors and assignment operators of +# the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the DETAILS_AT_TOP tag is set to YES then Doxygen +# will output the detailed description near the top, like JavaDoc. +# If set to NO, the detailed description appears after the member +# documentation. + +DETAILS_AT_TOP = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = yes + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources +# only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = yes + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = .. + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp +# *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories +# that are symbolic links (a Unix filesystem feature) are excluded from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. + +EXCLUDE_PATTERNS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command <filter> <input-file>, where <filter> +# is the value of the INPUT_FILTER tag, and <input-file> is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. + +INPUT_FILTER = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = yes + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = yes + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = yes + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = RTnet.oxy.html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = NO + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_PREDEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse the +# parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or +# super classes. Setting the tag to NO turns the diagrams off. Note that this +# option is superseded by the HAVE_DOT option below. This is only a fallback. It is +# recommended to install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = yes + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = yes + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found on the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_WIDTH = 1024 + +# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height +# (in pixels) of the graphs generated by dot. If a graph becomes larger than +# this value, doxygen will try to truncate the graph, so that it fits within +# the specified constraint. Beware that most browsers cannot cope with very +# large images. + +MAX_DOT_GRAPH_HEIGHT = 1024 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes that +# lay further from the root node will be omitted. Note that setting this option to +# 1 or 2 may greatly reduce the computation time needed for large code bases. Also +# note that a graph may be further truncated if the graph's image dimensions are +# not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH and MAX_DOT_GRAPH_HEIGHT). +# If 0 is used for the depth value (the default), the graph is not depth-constrained. + +MAX_DOT_GRAPH_DEPTH = 0 + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec new file mode 100644 index 0000000..d9e189b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec @@ -0,0 +1,597 @@ + TDMA Media Access Control Discipline + ==================================== + + Revision: 2.1a + + +This document describes the second generation of a TDMA-based (Time Division +Multiple Access) real-time media access control discipline for RTnet. Clock +synchronisation is managed by a participant acting as a master. Additional +backup masters are supported in order to compensate a failing master. Slave +participants can be added in arbitrary order without influence on existing +real-time communication. In the following, the TDMA protocol and its +management interface are specified. + + + +Sequence Diagram +================ + +Normal Startup +-------------- + + Master Slave A Slave B + | | | + +-+ | | + | | Detect | | + | | Other Master | | INIT + | | (3 x Cycle Period) | | PHASE + . . | | + . . | | + | | | | + +-+ Synchronisation (broadcast) | | +- - | --------------------------------> | -------------------> | - - - - - - - + | +-+ +-+ + | | | Start | | Start + | | | Slot Timer | | Slot Timer + | +-+ +-+ + | | | + . . . + . . . + | | | + | +-+ | + | | | Slot | + | | | Timeout | + | Calibration Request (unicast) +-+ | + | <-------------------------------- | | + +-+ | | + | | Queue | | + | | Reply | | + +-+ | | + | | | + . . . + . . . + | | | + | | +-+ + | | | | Slot + | | | | Timeout + | Calibration Request (unicast) | +-+ + | <---------------------------------|--------------------- | + +-+ | | + | | Queue | | + | | Reply | | + +-+ | | + | | | + . . . + . . . + | | | + +-+ | | + | | Cycle Timeout | | + | | | | + +-+ Synchronisation (broadcast) | | CALIBRATION + | --------------------------------->|--------------------->| PHASE + | | | + . . . + . . . + | | | + +-+ | | + | | Slot Timeout | | + | | | | + +-+ Calibration Reply (unicast) | | + | --------------------------------> | | + | +-+ | + | | | Calculate | + | | | Transmission | + | | | Delay | + | +-+ | + | | | + . . . + . . . + | | | + +-+ | | + | | Slot Timeout | | + | | | | + +-+ Calibration Reply (unicast) | | + | ----------------------------------|--------------------> | + | | +-+ + | | | | Calculate + | | | | Transmission + | | | | Delay + | | +-+ + | | | + . . . + . . . + | | | + +-+ | | + | | Cycle Timeout | | + | | | | + +-+ Synchronisation (broadcast) | | +- - | --------------------------------> | -------------------> | - - - - - - - + | | | + +Note: The calibration phase is repeated several times in order to estimate the + average transmission delay. The number of repetitions depends on the + expected variance of the measurings and has to be chosen appropriately. + + + +Failing Master +-------------- + + Master Backup Master Slave + | | | + +-+ | | + | | Cycle Timeout | | + | | | | + +-+ Synchronisation (broadcast) | | + | --------------------------------> | -------------------> | + | +-+ +-+ + | | | Sync With | | Start + | | | Alive Master | | Slot Timer + | +-+ +-+ + | | | + . . . + . . . + | | | + | +-+ | + | | | Backup Cycle | + | | | Timeout | + | | | (ignore) | + | +-+ | + | | | + . . . + . . . + | | | + | | +-+ + | | | | Slot + | | | | Timeout + | | Payload +-+ + | | <------------ | + . . . + . . . + | | | + X Failure | | + . . + . . + | | + +-+ | + | | Backup | + | | Cycle | + | | Timeout | + Synchronisation (broadcast) +-+ | + <--------------------------------- | -------------------> | + | +-+ + | | | Start + | | | Slot Timer + | +-+ + | | + . . + . . + | | + | +-+ + | | | Slot + | | | Timeout + | Payload +-+ + | <------------ | + | | + + + +Master Restart +-------------- + + Master Backup Master Slave + | | | | + | | +-+ | + | | Detect | | Backup | INIT + | | Other Master | | Cycle | PHASE + | | | | Timeout | + +-+ Synchronisation (broadcast) +-+ | +- - | <-------------------------------- | -------------------> | - - - - - - - + +-+ | +-+ + | | Start | | | Start + | | Slot Timer | | | Slot Timer + +-+ | +-+ + | | | + . . . + . . . + | | | + +-+ | | + | | Slot Timeout | | + | | | | + +-+ Calibration Request (unicast) | | CALIBRATION + | --------------------------------> | | PHASE + | +-+ | + | | | Queue | + | | | Reply | + | +-+ | + | | | + . . . + . <continue calibration as described above> . + . . . + | | | + +-+ | | + | | Cycle Timeout | | + | | | | + +-+ Synchronisation (broadcast) | | +- - | --------------------------------> | -------------------> | - - - - - - - + | +-+ +-+ + | | | Sync With | | Start + | | | Alive Master | | Slot Timer + | +-+ +-+ + | | | + . . . + . . . + | | | + | +-+ | + | | | Backup Cycle | + | | | Timeout | + | | | (ignore) | + | +-+ | + | | | + + + +Frame Formats +============= + +TDMA frames are introduced by the generic RTmac discipline header as described +in the related document. The hexadecimal RTmac type identifier is 0x0001. All +frame fields are encoded in network byte order (big endian). Version +identifiers of TDMA frames shall only be changed if the format becomes +incompatible to the previous revision. Currently, all frames carry the +hexadecimal value 0x0201. + + + +Synchronisation Frame +--------------------- + + +------------------+------------------+----------------------+ - - + | Version: 0x0201 | Frame ID: 0x0000 | Cycle Number | + | (2 bytes) | (2 bytes) | (4 bytes) | + +------------------+------------------+----------------------+ - - + - - +-----------------------------+-----------------------------+ + | Transmission Time Stamp | Scheduled Transmission Time | + | (8 bytes) | (8 bytes) | + - - +-----------------------------+-----------------------------+ + +Synchronisation frames are sent as broadcast by the currently active master. +They signal the beginning of a new elementary cycle and distribute the value +of the reference clock. + +The Cycle Number field is incremented by one for every new cycle, and it is +reset to zero on overflow. The Transmission Time Stamp contains the value of +the reference clock, typically located on the master, in nanoseconds. It shall +be acquired with minimum jitter relative to the physical packet transmission +time. The Scheduled Transmission Time, also in nanoseconds, contains the +reference time when the transmission was intended to be performed. + +By comparing the Transmission Time Stamp and the Scheduled Transmission Time, +receivers of Synchronisation frames are able to reduce the deviation between +claimed and actual transmission time on the master station. This helps to +improve global time synchronisation. Furthermore, backup masters use the main +master's Scheduled Transmission Time value when submitting their replacement +Synchronisation frames, although these frames are scheduled for a different +time slot. As a result, the slave will automatically compensate the time shift +of Synchronisation frames sent by backup masters. + + + +Calibration Frames +------------------ + +Request Calibration Frame: + +------------------+------------------+-----------------------------+ - - + | Version: 0x0201 | Frame ID: 0x0010 | Transmission Time Stamp | + | (2 bytes) | (2 bytes) | (8 bytes) | + +------------------+------------------+-----------------------------+ - - + - - +----------------------+-----------------------------+ + | Reply Cycle | Reply Slot Offset | + | Number (4 bytes) | (8 bytes) | + - - +----------------------+-----------------------------+ + +Reply Calibration Frame: + +------------------+------------------+-----------------------------+ - - + | Version: 0x0201 | Frame ID: 0x0011 | Request Transmission Time | + | (2 bytes) | (2 bytes) | (8 bytes) | + +------------------+------------------+-----------------------------+ - - + - - +-----------------------------+-----------------------------+ + | Reception Time Stamp | Transmission Time Stamp | + | (8 bytes) | (8 bytes) | + - - +-----------------------------+-----------------------------+ + +Calibration frames are sent as unicast to the respective receiver. They are +used to estimate the average delay between the transmission of Synchronisation +frames by a master and their reception on the slave side. Request Calibration +frames are sent by participants to the currently active master. The master +returns one Reply Calibration frame for every request frame in a time slot +specified by the sender. + +The Transmission Time Stamp fields in both frame types contain the value of +the sender's local clock in nanoseconds. It shall be acquired with minimum +jitter relative to the physical packet transmission time. The slave determines +in which cycle (Reply Cycle Number) and with which offset relative to the +cycle's Synchronisation frame (Reply Slot Offset) the master shall send the +reply. Only time slots actually owned by the slave can be specified here, and +the slave must not use these released slots for own transmissions in the +following. + +The Transmission Time Stamp field of the Request Calibration frame is copied +into the Request Transmission Time field of the Reply Calibration frame. On +reception of a request frame, a local time stamp is acquired and stored in the +Reception Time Stamp field of the corresponding reply frame. The acquisition +shall be performed with minimum jitter relative to the physical packet +reception. All times are in nanoseconds. + + + +Time Arithmetics +================ + +Synchronisation on Global Clock +------------------------------- + + Master Slave + | | + T_sched -|- - - - - - -|- T'_sched + | | + | | + T_xmit -|- Synchronisation | + /|\ | \ Frame | + | | \ ----> | + t_trans | \_________________ | + | | \ | + | | \ | + \|/ | \ | + T_recv -|- - - - - - -|- T'_recv + | | + . . + . . + | | + T -|- - - - - - -|- T' + | | + +Calculate the clock offset: + t_offs = T_recv - T'_recv = + = T_xmit + t_trans - T'_recv + +Calculate a global time: + T = T' + t_offs + +Calculate a time relative to a Synchronisation frame: + T' = T'_sched + t = + = T_sched - t_offs + t + +Symbols: + T_sched Scheduled transmission time (global clock) of the + Synchronisation frame. It is distributed in the + Scheduled Transmission Time field of the + Synchronisation frame. + T'_sched T_sched in units of the slave's local clock + T_xmit Actual transmission time (global clock) of the + Synchronisation frame. It is distributed in the + Transmission Time Stamp field of the + Synchronisation frame. + t_trans Average time between transmission of a frame by the + master and its reception by the slave. This value is + acquired during the calibration phase. + T_recv Reception time of the Synchronisation frame in units + of the global clock. + T'_recv Reception time of the Synchronisation frame in units + of the slave's local clock. + T, T' An arbitrary time in global and local clock units. + t_offs Offset between local and global clock. + t An arbitrary offset relative to a Synchronisation + frame + + + +Calibration of the Transmission Delay +------------------------------------- + + Master Slave + | | + | Calibration -|- T'_xmit_req + | Request Frame / | + | <---- / | + | _________________/ | + | / | + | / | + | / | + T_recv_req -|- | + | | + . . + . . + | | + T_xmit_rpl -|- Calibration | + | \ Reply Frame | + | \ ----> | + | \_________________ | + | \ | + | \ | + | \ | + | -|- T'_recv_rpl + | | + +Calculate the transmission delay: + t_trans = 1/2 * ((T'_recv_rpl - T'_xmit_req) - + (T_xmit_rpl - T_recv_req)) + +The overall transmission delay shall be averaged over several calibration +rounds. As the measuring is only performed against the main master, backup +masters should be selected so that they show similar timing characteristics. + +Symbols: + T'_xmit_req Time stamp taken on the transmission of a Calibration + Request frame in units of the slave's local clock. + This value is stored in the Transmission Time Stamp + field of the request frame and later copied to the + Request Transmission Time field of the corresponding + reply frame. + T_recv_req Time stamp taken on the reception of a Calibration + Request frame in units of the master's local clock. + This value is stored in the Reception Time Stamp field + of the Calibration Reply frame. + T_xmit_rpl Time stamp taken on the transmission of a Calibration + Reply frame in units of the master's local clock. This + value is stored in the Transmission Time Stamp field + of the Calibration Reply frame. + T'_recv_rpl Time stamp taken on the reception of a Calibration + Reply frame in units of the slave's local clock. + + + +Time Slots +========== + +A time slot can be used to transmit a single packet of up to a specified maximum +size. This TDMA discipline revision supports flexible assignment of time slots +to real-time network participants. It is now possible to use multiple slots per +cycle. Furthermore, a slot can be shared between participants by occupying it +only every Nth cycle. Besides at least one payload slot per participant, slots +have to be reserved for the Synchronisation frame and, optionally, for one or +more backup Synchronisation frames. The concrete timing strongly depends on the +capability of all network participants. Therefore, timing requirements like +worst case jitters or minimum slot gaps are not specified here. + +In contrast to earlier TDMA discipline revisions, the slave configuration is +no longer distributed by the TDMA master. This means that the slaves have to +be aware of their slot setup before sending any data to a TDMA-managed +network. Therefore, the required settings either have to be stored on the +slaves or, if a centralised management is desired, the RTnet configuration +service RTcfg has to be used (see related specification for further details). + + + +Slot Identification and Selection +--------------------------------- + +NOTE: The following specifications are OPTIONAL. They describe the internal + realisation of this TDMA discipline as applied to the first + implementation in RTnet. + +Time slots carry an internal ID number, unique per participant. These numbers +are used when determining the slot in which an outgoing packet shall be +transmitted. The TDMA discipline contains no automatic scheduling mechanism. +Instead, the sender, i.e. an user or a service, either explicitly provides a +desired slot ID or a default slot is used. + + Slot ID | Description + ---------+----------------------------------------------------------------- + 0 | default slot for RT; also default NRT slot if slot 1 is missing + 1 | non-RT slot; if missing, slot 0 is used + 2 | user slots, used for explicitly scheduled packets + : | + + + +Configuration Example +--------------------- + +An exemplary configuration consisting of two masters, one serving as backup, +and three slaves is shown below. The slot period is expressed in the form +<phasing>/<period>. For instance, 1/3 means that this slot will be used in +every first of three cycles, while 3/3 means in every third or three. + + +------+ +----------+ +---------+ +---------+ +----------+ + | | | Master 2 | | Slave A | | Slave B | | Master 1 | + | Sync | | Backup | | Slot 0 | | Slot 0 | | Slot 0 | + | | | Sync | | RT/NRT | | RT | | RT/NRT | + | 1/1 | | 1/1 | | 1/1 | | 1/1 | | 1/1 | +--+------+--+----------+--+---------+--+---------+--+----------+--... + + +----------+ + | Slave C | + | Slot 3 | + | RT | + | 3/3 | + +---------+ +----------+ + | Slave C | | Master 2 | + | Slot 0 | | Slot 0 | + | RT/NRT | | RT/NRT | + | 2/2 | | 2/3 | + +---------+ +---------+ +----------+ +------+ + | Slave B | | Slave C | | Slave A | | | + | Slot 1 | | Slot 2 | | Slot 2 | | Sync | + | NRT | | NRT | | RT | | | + | 1/2 | | 1/4 | | 1/3 | | 1/1 | +...--+---------+--------+---------+--+----------+-------------+------+--> + + + +Management Interface +==================== + +NOTE: The following specifications are OPTIONAL. They describe the internal + realisation of this TDMA discipline as applied to the first + implementation in RTnet. + +The TDMA discipline is managed by the command line tool tdmacfg. In the +following, the usage of this tool is described. + + + +Commands +-------- + +tdmacfg <dev> master <cycle_period> [-b <backup_offset>] + [-c calibration_rounds] [-i max_slot_id] [-m max_calibration_requests] + +Starts a TDMA master on the specified device <dev>. The cycle period length is +given in microseconds using the <cycle_period> parameter. If <backup_offset> +is provided, the master becomes a backup system. In case the main master +fails, the backup master with the smallest <backup_offset> will start sending +Synchronisation frames with the specified offset in microseconds relative to +the scheduled cycle start. <calibration_rounds> specifies the number of clock +calibration requests the master will send to any other potentially already +active master during startup. By default, 100 rounds are performed. The +calibration will be performed when the first slot is added. By default, a +master can handle up to 64 calibration requests at the same time. This value +can be adapted by specifying the <max_calibration_requests> parameter. The +largest used slot ID is tunable by providing <max_slot_id> or will be limited +to 7 if this parameter is omitted. + +tdmacfg <dev> slave [-c calibration_rounds] [-i max_slot_id] + +Starts a TDMA slave on the specified device <dev>. <calibration_rounds> +specifies the number of clock calibration requests the slave sends to the +active master during startup. By default, 100 rounds are performed. The +calibration will be performed when the first slot is added. The largest used +slot ID is tunable by providing <max_slot_id> or will be limited to 7 if this +parameter is omitted. + +tdmacfg <dev> slot <id> [<offset> [-p <phasing>/<period>] [-s <size>] + [-j joint_slot] [-l calibration_log_file] [-t calibration_timeout]] + +Adds, reconfigures, or removes a time slot for outgoing data on a started TDMA +master or slave. <id> is used to distinguish between multiple slots. See above +slot ID table for predefined values. If <offset> is given, the time slot is +added or modified to send data with the specified offset in microseconds +relative to the scheduled cycle start, if omitted, the slot is removed from +the station's configuration. + +By default, a slot will be used in every cycle. When providing <phasing> and +<period>, the slot will only be occupied in every <phasing>-th of <period> +cycles. By assigning e.g. 1/2 to one and 2/2 to another slot, the usage of the +physical time slot will alternate between both slot owners. The <size> +parameter limits the maximum payload size in bytes which can be transmitted +within this slot. If no <size> parameter is provided, the maximum size the +hardware supports is applied. To share the same output queue among several +slots, secondary slots can be attached to a primary <joint_slot>. The slot +sizes must match for this purpose. + +The addition of the station's first slot will trigger the clock calibration +process. To store the results of each calibration handshake, a +<calibration_log_file> can be provided. By default, this command will not +terminate until the calibration is completed. The <calibration_timeout> +parameter can be used to specify an upper time limit. + +tdmacfg <dev> detach + +Detaches a master or slave from the given devices <dev>. Past this command, +the write access to the device is uncoordinated again and may interfere with +remaining real-time network participants. + + +2004, 2005, Jan Kiszka <jan.kiszka-at-web.de> diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c new file mode 100644 index 0000000..c335b30 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c @@ -0,0 +1,1733 @@ +/*** + * rt_8139too.c - Realtime driver for + * for more information, look to end of file or '8139too.c' + * + * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + /* + * This Version was modified by Fabian Koch + * It includes a different implementation of the 'cards' module parameter + * we are using an array of integers to determine which cards to use + * for RTnet (e.g. cards=0,1,0) + * + * Thanks to Jan Kiszka for this idea + */ + +#define DRV_NAME "rt_8139too" +#define DRV_VERSION "0.9.24-rt0.7" + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if.h> +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/completion.h> +#include <linux/crc32.h> +#include <linux/uaccess.h> +#include <asm/io.h> + +/* *** RTnet *** */ +#include <rtnet_port.h> + +#define MAX_UNITS 8 +#define DEFAULT_RX_POOL_SIZE 16 + +static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; +static int media[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = -1 }; +static unsigned int rx_pool_size = DEFAULT_RX_POOL_SIZE; +module_param_array(cards, int, NULL, 0444); +module_param_array(media, int, NULL, 0444); +module_param(rx_pool_size, uint, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); +MODULE_PARM_DESC(media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps"); +MODULE_PARM_DESC(rx_pool_size, "number of receive buffers"); + +/* *** RTnet *** */ + + +#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION +#define PFX DRV_NAME ": " + +/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */ +/* *** RTnet *** +#ifdef CONFIG_8139TOO_PIO +#define USE_IO_OPS 1 +#endif + *** RTnet *** */ + +/* Size of the in-memory receive ring. */ +#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */ +#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX) +#define RX_BUF_PAD 16 +#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */ +#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD) + +/* Number of Tx descriptor registers. */ +#define NUM_TX_DESC 4 + +/* max supported ethernet frame size -- must be at least (rtdev->mtu+14+4).*/ +#define MAX_ETH_FRAME_SIZE 1536 + +/* Size of the Tx bounce buffers -- must be at least (rtdev->mtu+14+4). */ +#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE +#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) + +/* PCI Tuning Parameters + Threshold is bytes transferred to chip before transmission starts. */ +#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */ + +/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ +#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */ + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6*HZ) + + +enum { + HAS_MII_XCVR = 0x010000, + HAS_CHIP_XCVR = 0x020000, + HAS_LNK_CHNG = 0x040000, +}; + +#define RTL_MIN_IO_SIZE 0x80 +#define RTL8139B_IO_SIZE 256 + +#define RTL8129_CAPS HAS_MII_XCVR +#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG + +typedef enum { + RTL8139 = 0, + RTL8139_CB, + SMC1211TX, + /*MPX5030,*/ + DELTA8139, + ADDTRON8139, + DFE538TX, + DFE690TXD, + FE2000VX, + ALLIED8139, + RTL8129, +} board_t; + + +/* indexed by board_t, above */ +static struct { + const char *name; + u32 hw_flags; +} board_info[] = { + { "RealTek RTL8139", RTL8139_CAPS }, + { "RealTek RTL8129", RTL8129_CAPS }, +}; + + +static struct pci_device_id rtl8139_pci_tbl[] = { + {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + +#ifdef CONFIG_SH_SECUREEDGE5410 + /* Bogus 8139 silicon reports 8129 without external PROM :-( */ + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, +#endif +#ifdef CONFIG_8139TOO_8129 + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 }, +#endif + + /* some crazy cards report invalid vendor ids like + * 0x0001 here. The other ids are valid and constant, + * so we simply don't match on the main vendor id. + */ + {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 }, + + {0,} +}; +MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl); + +/* The rest of these values should never change. */ + +/* Symbolic offsets to registers. */ +enum RTL8139_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + ChipVersion = 0x43, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + FlashReg = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + TxSummary = 0x60, + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, + /* Undocumented registers, but required for proper operation. */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ +}; + +enum ClearBitMasks { + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), +}; + +enum ChipCmdBits { + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatusBits { + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, +}; + +enum TxStatusBits { + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, +}; +enum RxStatusBits { + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, +}; + +/* Bits in RxConfig. */ +enum rx_mode_bits { + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +}; + +/* Bits in TxConfig. */ +enum tx_config_bits { + + /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ +}; + +/* Bits in Config1 */ +enum Config1Bits { + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_Driver_Load = 0x20, + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ +}; + +/* Bits in Config3 */ +enum Config3Bits { + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ +}; + +/* Bits in Config4 */ +enum Config4Bits { + LWPTN = (1 << 2), /* not on 8139, 8139A */ +}; + +/* Bits in Config5 */ +enum Config5Bits { + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ +}; + +enum RxConfigBits { + /* rx fifo threshold */ + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), + + /* Max DMA burst */ + RxCfgDMAShift = 8, + RxCfgDMAUnlimited = (7 << RxCfgDMAShift), + + /* rx ring buffer length */ + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), + + /* Disable packet wrap at end of Rx buffer */ + RxNoWrap = (1 << 7), +}; + + +/* Twister tuning parameters from RealTek. + Completely undocumented, but required to tune bad links. */ +enum CSCRBits { + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, +}; + + +enum Cfg9346Bits { + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, +}; + + +#define PARA78_default 0x78fa8388 +#define PARA7c_default 0xcb38de43 /* param[0][3] */ +#define PARA7c_xxx 0xcb38de43 +/*static const unsigned long param[4][4] = { + {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} +};*/ + +typedef enum { + CH_8139 = 0, + CH_8139_K, + CH_8139A, + CH_8139B, + CH_8130, + CH_8139C, +} chip_t; + +enum chip_flags { + HasHltClk = (1 << 0), + HasLWake = (1 << 1), +}; + + +/* directly indexed by chip_t, above */ +const static struct { + const char *name; + u8 version; /* from RTL8139C docs */ + u32 flags; +} rtl_chip_info[] = { + { "RTL-8139", + 0x40, + HasHltClk, + }, + + { "RTL-8139 rev K", + 0x60, + HasHltClk, + }, + + { "RTL-8139A", + 0x70, + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139A rev G", + 0x72, + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139B", + 0x78, + HasLWake, + }, + + { "RTL-8130", + 0x7C, + HasLWake, + }, + + { "RTL-8139C", + 0x74, + HasLWake, + }, + + { "RTL-8100", + 0x7A, + HasLWake, + }, + + { "RTL-8100B/8139D", + 0x75, + HasHltClk /* XXX undocumented? */ + | HasLWake, + }, + + { "RTL-8101", + 0x77, + HasLWake, + }, +}; + +struct rtl_extra_stats { + unsigned long early_rx; + unsigned long tx_buf_mapped; + unsigned long tx_timeouts; + unsigned long rx_lost_in_ring; +}; + +struct rtl8139_private { + void *mmio_addr; + int drv_flags; + struct pci_dev *pci_dev; + struct net_device_stats stats; + unsigned char *rx_ring; + unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ + unsigned int tx_flag; + unsigned long cur_tx; + unsigned long dirty_tx; + unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ + unsigned char *tx_bufs; /* Tx bounce buffer region. */ + dma_addr_t rx_ring_dma; + dma_addr_t tx_bufs_dma; + signed char phys[4]; /* MII device addresses. */ + char twistie, twist_row, twist_col; /* Twister tune state. */ + unsigned int default_port:4; /* Last rtdev->if_port value. */ + unsigned int medialock:1; /* Don't sense media type. */ + rtdm_lock_t lock; + chip_t chipset; + pid_t thr_pid; + u32 rx_config; + struct rtl_extra_stats xstats; + int time_to_die; + struct mii_if_info mii; + rtdm_irq_t irq_handle; +}; + +MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>"); +MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); +MODULE_LICENSE("GPL"); + +static int read_eeprom (void *ioaddr, int location, int addr_len); +static int mdio_read (struct rtnet_device *rtdev, int phy_id, int location); +static void mdio_write (struct rtnet_device *rtdev, int phy_id, int location, int val); + + +static int rtl8139_open (struct rtnet_device *rtdev); +static int rtl8139_close (struct rtnet_device *rtdev); +static int rtl8139_interrupt (rtdm_irq_t *irq_handle); +static int rtl8139_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev); + +static int rtl8139_ioctl(struct rtnet_device *, struct ifreq *rq, int cmd); +static struct net_device_stats *rtl8139_get_stats(struct rtnet_device*rtdev); + +static void rtl8139_init_ring (struct rtnet_device *rtdev); +static void rtl8139_set_rx_mode (struct rtnet_device *rtdev); +static void __set_rx_mode (struct rtnet_device *rtdev); +static void rtl8139_hw_start (struct rtnet_device *rtdev); + +#ifdef USE_IO_OPS + +#define RTL_R8(reg) inb (((unsigned long)ioaddr) + (reg)) +#define RTL_R16(reg) inw (((unsigned long)ioaddr) + (reg)) +#define RTL_R32(reg) inl (((unsigned long)ioaddr) + (reg)) +#define RTL_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg)) +#define RTL_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg)) +#define RTL_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg)) +#define RTL_W8_F RTL_W8 +#define RTL_W16_F RTL_W16 +#define RTL_W32_F RTL_W32 +#undef readb +#undef readw +#undef readl +#undef writeb +#undef writew +#undef writel +#define readb(addr) inb((unsigned long)(addr)) +#define readw(addr) inw((unsigned long)(addr)) +#define readl(addr) inl((unsigned long)(addr)) +#define writeb(val,addr) outb((val),(unsigned long)(addr)) +#define writew(val,addr) outw((val),(unsigned long)(addr)) +#define writel(val,addr) outl((val),(unsigned long)(addr)) + +#else + +/* write MMIO register, with flush */ +/* Flush avoids rtl8139 bug w/ posted MMIO writes */ +#define RTL_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0) +#define RTL_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0) +#define RTL_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) + + +#define MMIO_FLUSH_AUDIT_COMPLETE 1 +#if MMIO_FLUSH_AUDIT_COMPLETE + +/* write MMIO register */ +#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) + +#else + +/* write MMIO register, then flush */ +#define RTL_W8 RTL_W8_F +#define RTL_W16 RTL_W16_F +#define RTL_W32 RTL_W32_F + +#endif /* MMIO_FLUSH_AUDIT_COMPLETE */ + +/* read MMIO register */ +#define RTL_R8(reg) readb (ioaddr + (reg)) +#define RTL_R16(reg) readw (ioaddr + (reg)) +#define RTL_R32(reg) readl (ioaddr + (reg)) + +#endif /* USE_IO_OPS */ + + +static const u16 rtl8139_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | + TxErr | TxOK | RxErr | RxOK; + +static const unsigned int rtl8139_rx_config = + RxCfgRcv32K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); + +static const unsigned int rtl8139_tx_config = + TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift); + + + + +static void rtl8139_chip_reset (void *ioaddr) +{ + int i; + + /* Soft reset the chip. */ + RTL_W8 (ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 1000; i > 0; i--) { + barrier(); + if ((RTL_R8 (ChipCmd) & CmdReset) == 0) + break; + udelay (10); + } +} + + +static int rtl8139_init_board (struct pci_dev *pdev, + struct rtnet_device **dev_out) +{ + void *ioaddr; + struct rtnet_device *rtdev; + struct rtl8139_private *tp; + u8 tmp8; + int rc; + unsigned int i; +#ifdef USE_IO_OPS + u32 pio_start, pio_end, pio_flags, pio_len; +#endif + unsigned long mmio_start, mmio_flags, mmio_len; + u32 tmp; + + + *dev_out = NULL; + + /* dev and rtdev->priv zeroed in alloc_etherdev */ + rtdev=rt_alloc_etherdev(sizeof (struct rtl8139_private), + rx_pool_size + NUM_TX_DESC); + if (rtdev==NULL) { + rtdm_printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev)); + return -ENOMEM; + } + rtdev_alloc_name(rtdev, "rteth%d"); + + rt_rtdev_connect(rtdev, &RTDEV_manager); + + rtdev->vers = RTDEV_VERS_2_0; + rtdev->sysbind = &pdev->dev; + tp = rtdev->priv; + tp->pci_dev = pdev; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device (pdev); + if (rc) + goto err_out; + + rc = pci_request_regions (pdev, "rtnet8139too"); + if (rc) + goto err_out; + + /* enable PCI bus-mastering */ + pci_set_master (pdev); + + mmio_start = pci_resource_start (pdev, 1); + mmio_flags = pci_resource_flags (pdev, 1); + mmio_len = pci_resource_len (pdev, 1); + + /* set this immediately, we need to know before + * we talk to the chip directly */ +#ifdef USE_IO_OPS + pio_start = pci_resource_start (pdev, 0); + pio_end = pci_resource_end (pdev, 0); + pio_flags = pci_resource_flags (pdev, 0); + pio_len = pci_resource_len (pdev, 0); + + /* make sure PCI base addr 0 is PIO */ + if (!(pio_flags & IORESOURCE_IO)) { + rtdm_printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev)); + rc = -ENODEV; + goto err_out; + } + /* check for weird/broken PCI region reporting */ + if (pio_len < RTL_MIN_IO_SIZE) { + rtdm_printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev)); + rc = -ENODEV; + goto err_out; + } +#else + /* make sure PCI base addr 1 is MMIO */ + if (!(mmio_flags & IORESOURCE_MEM)) { + rtdm_printk(KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev)); + rc = -ENODEV; + goto err_out; + } + if (mmio_len < RTL_MIN_IO_SIZE) { + rtdm_printk(KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev)); + rc = -ENODEV; + goto err_out; + } +#endif + +#ifdef USE_IO_OPS + ioaddr = (void *) pio_start; + rtdev->base_addr = pio_start; + tp->mmio_addr = ioaddr; +#else + /* ioremap MMIO region */ + ioaddr = ioremap (mmio_start, mmio_len); + if (ioaddr == NULL) { + rtdm_printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev)); + rc = -EIO; + goto err_out; + } + rtdev->base_addr = (long) ioaddr; + tp->mmio_addr = ioaddr; +#endif /* USE_IO_OPS */ + + /* Bring old chips out of low-power mode. */ + RTL_W8 (HltClk, 'R'); + + /* check for missing/broken hardware */ + if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { + rtdm_printk(KERN_ERR PFX "%s: Chip not responding, ignoring board\n", pci_name(pdev)); + rc = -EIO; + goto err_out; + } + + /* identify chip attached to board */ + tmp = RTL_R8 (ChipVersion); + for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++) + if (tmp == rtl_chip_info[i].version) { + tp->chipset = i; + goto match; + } + + rtdm_printk("rt8139too: unknown chip version, assuming RTL-8139\n"); + rtdm_printk("rt8139too: TxConfig = 0x%08x\n", RTL_R32 (TxConfig)); + + tp->chipset = 0; + +match: + if (tp->chipset >= CH_8139B) { + u8 new_tmp8 = tmp8 = RTL_R8 (Config1); + if ((rtl_chip_info[tp->chipset].flags & HasLWake) && + (tmp8 & LWAKE)) + new_tmp8 &= ~LWAKE; + new_tmp8 |= Cfg1_PM_Enable; + if (new_tmp8 != tmp8) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tmp8); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + tmp8 = RTL_R8 (Config4); + if (tmp8 & LWPTN) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config4, tmp8 & ~LWPTN); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + } + } else { + tmp8 = RTL_R8 (Config1); + tmp8 &= ~(SLEEP | PWRDN); + RTL_W8 (Config1, tmp8); + } + + rtl8139_chip_reset (ioaddr); + + *dev_out = rtdev; + return 0; + +err_out: +#ifndef USE_IO_OPS + if (tp->mmio_addr) iounmap (tp->mmio_addr); +#endif /* !USE_IO_OPS */ + /* it's ok to call this even if we have no regions to free */ + pci_release_regions (pdev); + rtdev_free(rtdev); + pci_set_drvdata (pdev, NULL); + + return rc; +} + + + + +static int rtl8139_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct rtnet_device *rtdev = NULL; + struct rtl8139_private *tp; + int i, addr_len; + int option; + void *ioaddr; + static int board_idx = -1; + + board_idx++; + + if( cards[board_idx] == 0) + return -ENODEV; + + /* when we're built into the kernel, the driver version message + * is only printed if at least one 8139 board has been found + */ +#ifndef MODULE + { + static int printed_version; + if (!printed_version++) + rtdm_printk (KERN_INFO RTL8139_DRIVER_NAME "\n"); + } +#endif + + if ((i=rtl8139_init_board (pdev, &rtdev)) < 0) + return i; + + + tp = rtdev->priv; + ioaddr = tp->mmio_addr; + + addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; + for (i = 0; i < 3; i++) + ((u16 *) (rtdev->dev_addr))[i] = + le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); + + /* The Rtl8139-specific entries in the device structure. */ + rtdev->open = rtl8139_open; + rtdev->stop = rtl8139_close; + rtdev->hard_header = &rt_eth_header; + rtdev->hard_start_xmit = rtl8139_start_xmit; + rtdev->do_ioctl = rtl8139_ioctl; + rtdev->get_stats = rtl8139_get_stats; + + /*rtdev->set_multicast_list = rtl8139_set_rx_mode; */ + rtdev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; + + rtdev->irq = pdev->irq; + + /* rtdev->priv/tp zeroed and aligned in init_etherdev */ + tp = rtdev->priv; + + /* note: tp->chipset set in rtl8139_init_board */ + tp->drv_flags = board_info[ent->driver_data].hw_flags; + tp->mmio_addr = ioaddr; + rtdm_lock_init (&tp->lock); + + if ( (i=rt_register_rtnetdev(rtdev)) ) + goto err_out; + + pci_set_drvdata (pdev, rtdev); + + tp->phys[0] = 32; + + /* The lower four bits are the media type. */ + option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; + if (option > 0) { + tp->mii.full_duplex = (option & 0x210) ? 1 : 0; + tp->default_port = option & 0xFF; + if (tp->default_port) + tp->medialock = 1; + } + if (tp->default_port) { + rtdm_printk(KERN_INFO " Forcing %dMbps %s-duplex operation.\n", + (option & 0x20 ? 100 : 10), + (option & 0x10 ? "full" : "half")); + mdio_write(rtdev, tp->phys[0], 0, + ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */ + ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ + } + + + /* Put the chip into low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; + + +err_out: +#ifndef USE_IO_OPS + if (tp->mmio_addr) iounmap (tp->mmio_addr); +#endif /* !USE_IO_OPS */ + /* it's ok to call this even if we have no regions to free */ + pci_release_regions (pdev); + rtdev_free(rtdev); + pci_set_drvdata (pdev, NULL); + + return i; +} + + +static void rtl8139_remove_one (struct pci_dev *pdev) +{ + struct rtnet_device *rtdev = pci_get_drvdata(pdev); + +#ifndef USE_IO_OPS + struct rtl8139_private *tp = rtdev->priv; + + if (tp->mmio_addr) + iounmap (tp->mmio_addr); +#endif /* !USE_IO_OPS */ + + /* it's ok to call this even if we have no regions to free */ + rt_unregister_rtnetdev(rtdev); + rt_rtdev_disconnect(rtdev); + + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + + rtdev_free(rtdev); +} + + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x08 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x00 +#define EE_WRITE_1 0x02 +#define EE_DATA_READ 0x01 /* EEPROM chip data out. */ +#define EE_ENB (0x80 | EE_CS) + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. + */ + +#define eeprom_delay() readl(ee_addr) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD (5) +#define EE_READ_CMD (6) +#define EE_ERASE_CMD (7) + +static int read_eeprom (void *ioaddr, int location, int addr_len) +{ + int i; + unsigned retval = 0; + void *ee_addr = ioaddr + Cfg9346; + int read_cmd = location | (EE_READ_CMD << addr_len); + + writeb (EE_ENB & ~EE_CS, ee_addr); + writeb (EE_ENB, ee_addr); + eeprom_delay (); + + /* Shift the read command bits out. */ + for (i = 4 + addr_len; i >= 0; i--) { + int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + writeb (EE_ENB | dataval, ee_addr); + eeprom_delay (); + writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); + eeprom_delay (); + } + writeb (EE_ENB, ee_addr); + eeprom_delay (); + + for (i = 16; i > 0; i--) { + writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); + eeprom_delay (); + retval = + (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : + 0); + writeb (EE_ENB, ee_addr); + eeprom_delay (); + } + + /* Terminate the EEPROM access. */ + writeb (~EE_CS, ee_addr); + eeprom_delay (); + + return retval; +} + +/* MII serial management: mostly bogus for now. */ +/* Read and write the MII management registers using software-generated + serial MDIO protocol. + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +#define MDIO_DIR 0x80 +#define MDIO_DATA_OUT 0x04 +#define MDIO_DATA_IN 0x02 +#define MDIO_CLK 0x01 +#define MDIO_WRITE0 (MDIO_DIR) +#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) + +#define mdio_delay(mdio_addr) readb(mdio_addr) + + + +static char mii_2_8139_map[8] = { + BasicModeCtrl, + BasicModeStatus, + 0, + 0, + NWayAdvert, + NWayLPAR, + NWayExpansion, + 0 +}; + +#ifdef CONFIG_8139TOO_8129 +/* Syncronize the MII management interface by shifting 32 one bits out. */ +static void mdio_sync (void *mdio_addr) +{ + int i; + + for (i = 32; i >= 0; i--) { + writeb (MDIO_WRITE1, mdio_addr); + mdio_delay (mdio_addr); + writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr); + mdio_delay (mdio_addr); + } +} +#endif + + +static int mdio_read (struct rtnet_device *rtdev, int phy_id, int location) +{ + struct rtl8139_private *tp = rtdev->priv; + int retval = 0; +#ifdef CONFIG_8139TOO_8129 + void *mdio_addr = tp->mmio_addr + Config4; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + return location < 8 && mii_2_8139_map[location] ? + readw (tp->mmio_addr + mii_2_8139_map[location]) : 0; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (mdio_addr); + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; + + writeb (MDIO_DIR | dataval, mdio_addr); + mdio_delay (mdio_addr); + writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr); + mdio_delay (mdio_addr); + } + + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + writeb (0, mdio_addr); + mdio_delay (mdio_addr); + retval = (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 : 0); + writeb (MDIO_CLK, mdio_addr); + mdio_delay (mdio_addr); + } +#endif + + return (retval >> 1) & 0xffff; +} + + +static void mdio_write (struct rtnet_device *rtdev, int phy_id, int location, + int value) +{ + struct rtl8139_private *tp = rtdev->priv; +#ifdef CONFIG_8139TOO_8129 + void *mdio_addr = tp->mmio_addr + Config4; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void *ioaddr = tp->mmio_addr; + if (location == 0) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W16 (BasicModeCtrl, value); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } else if (location < 8 && mii_2_8139_map[location]) + RTL_W16 (mii_2_8139_map[location], value); + return; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (mdio_addr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = + (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + writeb (dataval, mdio_addr); + mdio_delay (mdio_addr); + writeb (dataval | MDIO_CLK, mdio_addr); + mdio_delay (mdio_addr); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + writeb (0, mdio_addr); + mdio_delay (mdio_addr); + writeb (MDIO_CLK, mdio_addr); + mdio_delay (mdio_addr); + } +#endif +} + +static int rtl8139_open (struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + int retval; + + rt_stack_connect(rtdev, &STACK_manager); + + retval = rtdm_irq_request(&tp->irq_handle, rtdev->irq, + rtl8139_interrupt, RTDM_IRQTYPE_SHARED, + rtdev->name, rtdev); + if (retval) + return retval; + + tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + &tp->tx_bufs_dma, GFP_ATOMIC); + tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + &tp->rx_ring_dma, GFP_ATOMIC); + + if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { + rtdm_irq_free(&tp->irq_handle); + if (tp->tx_bufs) + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + if (tp->rx_ring) + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + + return -ENOMEM; + } + /* FIXME: create wrapper for duplex_lock vs. force_media + tp->mii.full_duplex = tp->mii.duplex_lock; */ + tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; + tp->twistie = 1; + tp->time_to_die = 0; + + rtl8139_init_ring (rtdev); + rtl8139_hw_start (rtdev); + + return 0; +} + + +static void rtl_check_media (struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + u16 mii_lpa; + + if (tp->phys[0] < 0) + return; + + mii_lpa = mdio_read(rtdev, tp->phys[0], MII_LPA); + if (mii_lpa == 0xffff) + return; + + tp->mii.full_duplex = (mii_lpa & LPA_100FULL) == LPA_100FULL || + (mii_lpa & 0x00C0) == LPA_10FULL; +} + + +/* Start the hardware at open or resume. */ +static void rtl8139_hw_start (struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + void *ioaddr = tp->mmio_addr; + u32 i; + u8 tmp; + + /* Bring old chips out of low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'R'); + + rtl8139_chip_reset(ioaddr); + + /* unlock Config[01234] and BMCR register writes */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Restore our idea of the MAC address. */ + RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (rtdev->dev_addr + 0))); + RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (rtdev->dev_addr + 4))); + + tp->cur_rx = 0; + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + + /* Check this value: the documentation for IFG contradicts ifself. */ + RTL_W32 (TxConfig, rtl8139_tx_config); + + rtl_check_media (rtdev); + + if (tp->chipset >= CH_8139B) { + /* Disable magic packet scanning, which is enabled + * when PM is enabled in Config1. It can be reenabled + * via ETHTOOL_SWOL if desired. */ + RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic); + } + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Tx buffer DMA addresses */ + for (i = 0; i < NUM_TX_DESC; i++) + RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); + + RTL_W32 (RxMissed, 0); + + rtl8139_set_rx_mode (rtdev); + + /* no early-rx interrupts */ + RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear); + + /* make sure RxTx has started */ + tmp = RTL_R8 (ChipCmd); + if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb))) + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + /* Enable all known interrupts by setting the interrupt mask. */ + RTL_W16 (IntrMask, rtl8139_intr_mask); + + rtnetif_start_queue (rtdev); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void rtl8139_init_ring (struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + int i; + + tp->cur_rx = 0; + tp->cur_tx = 0; + tp->dirty_tx = 0; + + for (i = 0; i < NUM_TX_DESC; i++) + tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE]; +} + + +static void rtl8139_tx_clear (struct rtl8139_private *tp) +{ + tp->cur_tx = 0; + tp->dirty_tx = 0; + + /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ +} + + + +static int rtl8139_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + + void *ioaddr = tp->mmio_addr; + unsigned int entry; + unsigned int len = skb->len; + rtdm_lockctx_t context; + + /* Calculate the next Tx descriptor entry. */ + entry = tp->cur_tx % NUM_TX_DESC; + + if (likely(len < TX_BUF_SIZE)) { + if (unlikely(skb->xmit_stamp != NULL)) { + rtdm_lock_irqsave(context); + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + + *skb->xmit_stamp); + /* typically, we are only copying a few bytes here */ + rtskb_copy_and_csum_dev(skb, tp->tx_buf[entry]); + } else { + /* copy larger packets outside the lock */ + rtskb_copy_and_csum_dev(skb, tp->tx_buf[entry]); + rtdm_lock_irqsave(context); + } + } else { + dev_kfree_rtskb(skb); + tp->stats.tx_dropped++; + return 0; + } + + + /* Note: the chip doesn't have auto-pad! */ + rtdm_lock_get(&tp->lock); + RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); + tp->cur_tx++; + wmb(); + if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) + rtnetif_stop_queue (rtdev); + rtdm_lock_put_irqrestore(&tp->lock, context); + + dev_kfree_rtskb(skb); + +#ifdef DEBUG + rtdm_printk ("%s: Queued Tx packet size %u to slot %d.\n", rtdev->name, len, entry); +#endif + return 0; +} + +static int rtl8139_ioctl(struct rtnet_device *rtdev, struct ifreq *ifr, int cmd) +{ + struct rtl8139_private *tp = rtdev->priv; + void *ioaddr = tp->mmio_addr; + int nReturn = 0; + struct ethtool_value *value; + + switch (cmd) { + case SIOCETHTOOL: + /* TODO: user-safe parameter access, most probably one layer higher */ + value = (struct ethtool_value *)ifr->ifr_data; + if (value->cmd == ETHTOOL_GLINK) + { + if (RTL_R16(CSCR) & CSCR_LinkOKBit) + value->data = 1; + else + value->data = 0; + } + break; + + default: + nReturn = -EOPNOTSUPP; + break; + } + return nReturn; +} + +static struct net_device_stats *rtl8139_get_stats(struct rtnet_device*rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + return &tp->stats; +} + +static void rtl8139_tx_interrupt (struct rtnet_device *rtdev, + struct rtl8139_private *tp, + void *ioaddr) +{ + unsigned long dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + int entry = dirty_tx % NUM_TX_DESC; + int txstatus; + + txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32))); + + if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted))) + break; /* It still hasn't been Txed */ + + /* Note: TxCarrierLost is always asserted at 100mbps. */ + if (txstatus & (TxOutOfWindow | TxAborted)) { + /* There was an major error, log it. */ + rtdm_printk("%s: Transmit error, Tx status %8.8x.\n", + rtdev->name, txstatus); + tp->stats.tx_errors++; + if (txstatus & TxAborted) { + tp->stats.tx_aborted_errors++; + RTL_W32 (TxConfig, TxClearAbt); + RTL_W16 (IntrStatus, TxErr); + wmb(); + } + if (txstatus & TxCarrierLost) + tp->stats.tx_carrier_errors++; + if (txstatus & TxOutOfWindow) + tp->stats.tx_window_errors++; +#ifdef ETHER_STATS + if ((txstatus & 0x0f000000) == 0x0f000000) + tp->stats.collisions16++; +#endif + } else { + if (txstatus & TxUnderrun) { + /* Add 64 to the Tx FIFO threshold. */ + if (tp->tx_flag < 0x00300000) + tp->tx_flag += 0x00020000; + tp->stats.tx_fifo_errors++; + } + tp->stats.collisions += (txstatus >> 24) & 15; + tp->stats.tx_bytes += txstatus & 0x7ff; + tp->stats.tx_packets++; + } + + dirty_tx++; + tx_left--; + } + + /* only wake the queue if we did work, and the queue is stopped */ + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + mb(); + if (rtnetif_queue_stopped (rtdev)) + rtnetif_wake_queue (rtdev); + } +} + + +/* TODO: clean this up! Rx reset need not be this intensive */ +static void rtl8139_rx_err +(u32 rx_status, struct rtnet_device *rtdev, struct rtl8139_private *tp, void *ioaddr) +{ +/* u8 tmp8; +#ifndef CONFIG_8139_NEW_RX_RESET + int tmp_work; +#endif */ + + /* RTnet-TODO: We really need an error manager to handle such issues... */ + rtdm_printk("%s: FATAL - Ethernet frame had errors, status %8.8x.\n", + rtdev->name, rx_status); +} + + +static void rtl8139_rx_interrupt (struct rtnet_device *rtdev, + struct rtl8139_private *tp, void *ioaddr, + nanosecs_abs_t *time_stamp) +{ + unsigned char *rx_ring; + u16 cur_rx; + + rx_ring = tp->rx_ring; + cur_rx = tp->cur_rx; + + while ((RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { + int ring_offset = cur_rx % RX_BUF_LEN; + u32 rx_status; + unsigned int rx_size; + unsigned int pkt_size; + struct rtskb *skb; + + rmb(); + + /* read size+status of next frame from DMA ring buffer */ + rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); + rx_size = rx_status >> 16; + pkt_size = rx_size - 4; + + /* Packet copy from FIFO still in progress. + * Theoretically, this should never happen + * since EarlyRx is disabled. + */ + if (rx_size == 0xfff0) { + tp->xstats.early_rx++; + break; + } + + /* If Rx err or invalid rx_size/rx_status received + * (which happens if we get lost in the ring), + * Rx process gets reset, so we abort any further + * Rx processing. + */ + if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) || + (rx_size < 8) || + (!(rx_status & RxStatusOK))) { + rtl8139_rx_err (rx_status, rtdev, tp, ioaddr); + return; + } + + /* Malloc up new buffer, compatible with net-2e. */ + /* Omit the four octet CRC from the length. */ + + /* TODO: consider allocating skb's outside of + * interrupt context, both to speed interrupt processing, + * and also to reduce the chances of having to + * drop packets here under memory pressure. + */ + + skb = rtnetdev_alloc_rtskb(rtdev, pkt_size + 2); + if (skb) { + skb->time_stamp = *time_stamp; + rtskb_reserve (skb, 2); /* 16 byte align the IP fields. */ + + + /* eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); */ + memcpy (skb->data, &rx_ring[ring_offset + 4], pkt_size); + rtskb_put (skb, pkt_size); + skb->protocol = rt_eth_type_trans (skb, rtdev); + rtnetif_rx (skb); + tp->stats.rx_bytes += pkt_size; + tp->stats.rx_packets++; + } else { + rtdm_printk (KERN_WARNING"%s: Memory squeeze, dropping packet.\n", rtdev->name); + tp->stats.rx_dropped++; + } + + cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; + RTL_W16 (RxBufPtr, cur_rx - 16); + + if (RTL_R16 (IntrStatus) & RxAckBits) + RTL_W16_F (IntrStatus, RxAckBits); + } + + tp->cur_rx = cur_rx; +} + + +static void rtl8139_weird_interrupt (struct rtnet_device *rtdev, + struct rtl8139_private *tp, + void *ioaddr, + int status, int link_changed) +{ + rtdm_printk ("%s: Abnormal interrupt, status %8.8x.\n", + rtdev->name, status); + + /* Update the error count. */ + tp->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + if ((status & RxUnderrun) && link_changed && (tp->drv_flags & HAS_LNK_CHNG)) { + /* Really link-change on new chips. */ + status &= ~RxUnderrun; + } + + /* XXX along with rtl8139_rx_err, are we double-counting errors? */ + if (status & + (RxUnderrun | RxOverflow | RxErr | RxFIFOOver)) + tp->stats.rx_errors++; + + if (status & PCSTimeout) + tp->stats.rx_length_errors++; + + if (status & (RxUnderrun | RxFIFOOver)) + tp->stats.rx_fifo_errors++; + + if (status & PCIErr) { + u16 pci_cmd_status; + pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); + pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status); + + rtdm_printk (KERN_ERR "%s: PCI Bus error %4.4x.\n", rtdev->name, pci_cmd_status); + } +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static int rtl8139_interrupt(rtdm_irq_t *irq_handle) +{ + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct rtl8139_private *tp = rtdev->priv; + void *ioaddr = tp->mmio_addr; + int ackstat; + int status; + int link_changed = 0; /* avoid bogus "uninit" warning */ + int saved_status = 0; + int ret = RTDM_IRQ_NONE; + + rtdm_lock_get(&tp->lock); + + status = RTL_R16(IntrStatus); + + /* h/w no longer present (hotplug?) or major error, bail */ + if (unlikely(status == 0xFFFF) || unlikely(!(status & rtl8139_intr_mask))) + goto out; + + ret = RTDM_IRQ_HANDLED; + + /* close possible race with dev_close */ + if (unlikely(!rtnetif_running(rtdev))) { + RTL_W16(IntrMask, 0); + goto out; + } + + /* Acknowledge all of the current interrupt sources ASAP, but + first get an additional status bit from CSCR. */ + if (unlikely(status & RxUnderrun)) + link_changed = RTL_R16(CSCR) & CSCR_LinkChangeBit; + + /* The chip takes special action when we clear RxAckBits, + * so we clear them later in rtl8139_rx_interrupt + */ + ackstat = status & ~(RxAckBits | TxErr); + if (ackstat) + RTL_W16(IntrStatus, ackstat); + + if (status & RxAckBits) { + saved_status |= RxAckBits; + rtl8139_rx_interrupt(rtdev, tp, ioaddr, &time_stamp); + } + + /* Check uncommon events with one test. */ + if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr))) + rtl8139_weird_interrupt(rtdev, tp, ioaddr, status, link_changed); + + if (status & (TxOK |TxErr)) { + rtl8139_tx_interrupt(rtdev, tp, ioaddr); + if (status & TxErr) { + RTL_W16(IntrStatus, TxErr); + saved_status |= TxErr; + } + } + out: + rtdm_lock_put(&tp->lock); + + if (saved_status & RxAckBits) + rt_mark_stack_mgr(rtdev); + + if (saved_status & TxErr) + rtnetif_err_tx(rtdev); + + return ret; +} + + +static int rtl8139_close (struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + void *ioaddr = tp->mmio_addr; + rtdm_lockctx_t context; + + printk ("%s: Shutting down ethercard, status was 0x%4.4x.\n", rtdev->name, RTL_R16 (IntrStatus)); + + rtnetif_stop_queue (rtdev); + + rtdm_lock_get_irqsave (&tp->lock, context); + /* Stop the chip's Tx and Rx DMA processes. */ + RTL_W8 (ChipCmd, 0); + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0); + /* Update the error counts. */ + tp->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + rtdm_lock_put_irqrestore (&tp->lock, context); + + rtdm_irq_free(&tp->irq_handle); + + rt_stack_disconnect(rtdev); + + rtl8139_tx_clear (tp); + + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, tp->rx_ring, + tp->rx_ring_dma); + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, tp->tx_bufs, + tp->tx_bufs_dma); + tp->rx_ring = NULL; + tp->tx_bufs = NULL; + + /* Green! Put the chip in low-power mode. */ + RTL_W8 (Cfg9346, Cfg9346_Unlock); + + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; +} + + + +/* Set or clear the multicast filter for this adaptor. + This routine is not state sensitive and need not be SMP locked. */ +static void __set_rx_mode (struct rtnet_device *rtdev) +{ + struct rtl8139_private *tp = rtdev->priv; + void *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp; + +#ifdef DEBUG + rtdm_printk ("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n", + rtdev->name, rtdev->flags, RTL_R32 (RxConfig)); +#endif + + /* Note: do not reorder, GCC is clever about common statements. */ + if (rtdev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + /*printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", rtdev->name);*/ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if (rtdev->flags & IFF_ALLMULTI) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + } + + /* We can safely update without stopping the chip. */ + tmp = rtl8139_rx_config | rx_mode; + if (tp->rx_config != tmp) { + RTL_W32_F (RxConfig, tmp); + tp->rx_config = tmp; + } + RTL_W32_F (MAR0 + 0, mc_filter[0]); + RTL_W32_F (MAR0 + 4, mc_filter[1]); +} + +static void rtl8139_set_rx_mode (struct rtnet_device *rtdev) +{ + rtdm_lockctx_t context; + struct rtl8139_private *tp = rtdev->priv; + + rtdm_lock_get_irqsave (&tp->lock, context); + __set_rx_mode(rtdev); + rtdm_lock_put_irqrestore (&tp->lock, context); +} + +static struct pci_driver rtl8139_pci_driver = { + name: DRV_NAME, + id_table: rtl8139_pci_tbl, + probe: rtl8139_init_one, + remove: rtl8139_remove_one, + suspend: NULL, + resume: NULL, +}; + + +static int __init rtl8139_init_module (void) +{ + /* when we're a module, we always print a version message, + * even if no 8139 board is found. + */ + +#ifdef MODULE + printk (KERN_INFO RTL8139_DRIVER_NAME "\n"); +#endif + + return pci_register_driver (&rtl8139_pci_driver); +} + + +static void __exit rtl8139_cleanup_module (void) +{ + pci_unregister_driver (&rtl8139_pci_driver); +} + + +module_init(rtl8139_init_module); +module_exit(rtl8139_cleanup_module); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig new file mode 100644 index 0000000..d71d7ec --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig @@ -0,0 +1,147 @@ +menu "Drivers" + depends on XENO_DRIVERS_NET + +comment "Common PCI Drivers" + depends on PCI + +config XENO_DRIVERS_NET_DRV_PCNET32 + depends on XENO_DRIVERS_NET && PCI + tristate "AMD PCnet32" + + +config XENO_DRIVERS_NET_DRV_TULIP + depends on XENO_DRIVERS_NET && PCI + tristate "DEC Tulip" + + +config XENO_DRIVERS_NET_DRV_EEPRO100 + depends on XENO_DRIVERS_NET && PCI + tristate "Intel EtherExpress PRO/100" + default y + +config XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT + depends on XENO_DRIVERS_NET && PCI + int "Command Timeout" + depends on XENO_DRIVERS_NET_DRV_EEPRO100 + default 20 + help + Timeout in microseconds of transmission or configuration commands that + are issued in real-time contexts. + +config XENO_DRIVERS_NET_DRV_EEPRO100_DBG + depends on XENO_DRIVERS_NET && PCI + bool "Enable debugging and instrumentation" + depends on XENO_DRIVERS_NET_DRV_EEPRO100 + help + This option switches on internal debugging code of the EEPRO/100 driver. + It also enables the collection of worst-case command delays in real-time + contexts in order to reduce the command timeout (which, effectively, will + also reduce the worst-case transmission latency). + + +config XENO_DRIVERS_NET_DRV_E1000 + depends on XENO_DRIVERS_NET && PCI + tristate "Intel(R) PRO/1000 (Gigabit)" + default y + +config XENO_DRIVERS_NET_DRV_E1000E + depends on XENO_DRIVERS_NET && PCI + tristate "New Intel(R) PRO/1000 PCIe (Gigabit)" + + +config XENO_DRIVERS_NET_DRV_NATSEMI + depends on XENO_DRIVERS_NET && PCI + tristate "NatSemi" + + +config XENO_DRIVERS_NET_DRV_8139 + depends on XENO_DRIVERS_NET && PCI + tristate "Realtek 8139" + default y + + +config XENO_DRIVERS_NET_DRV_VIA_RHINE + depends on XENO_DRIVERS_NET && PCI + tristate "VIA Rhine" + + +config XENO_DRIVERS_NET_DRV_IGB + select I2C + select I2C_ALGOBIT + depends on XENO_DRIVERS_NET && PCI + tristate "Intel(R) 82575 (Gigabit)" + + +config XENO_DRIVERS_NET_DRV_R8169 + depends on XENO_DRIVERS_NET && PCI + tristate "Realtek 8169 (Gigabit)" + + +if PPC + +comment "Embedded MPC Drivers" + depends on XENO_DRIVERS_NET + +config XENO_DRIVERS_NET_DRV_FCC_ENET + depends on XENO_DRIVERS_NET + tristate "MPC8260 FCC Ethernet" + + +config XENO_DRIVERS_NET_DRV_FEC_ENET + depends on XENO_DRIVERS_NET + tristate "MPC8xx FEC Ethernet" + + +config XENO_DRIVERS_NET_DRV_SCC_ENET + depends on XENO_DRIVERS_NET + tristate "MPC8xx SCC Ethernet" + + +config XENO_DRIVERS_NET_DRV_MPC52XX_FEC + depends on XENO_DRIVERS_NET + tristate "MPC52xx FEC Ethernet" + +endif + + +comment "Misc Drivers" + +config XENO_DRIVERS_NET_DRV_LOOPBACK + depends on XENO_DRIVERS_NET + tristate "Loopback" + default y + +if ARM + +config XENO_DRIVERS_NET_DRV_AT91_ETHER + depends on XENO_DRIVERS_NET && SOC_AT91RM9200 + select XENO_DRIVERS_NET_DRV_MACB + tristate "AT91RM9200 Board Ethernet Driver" + +config XENO_DRIVERS_NET_DRV_MACB + depends on XENO_DRIVERS_NET + select AT91_PROGRAMMABLE_CLOCKS if ARCH_AT91 + tristate "Cadence MACB/GEM devices" + help + Driver for internal MAC-controller on AT91SAM926x microcontrollers. + Porting by Cristiano Mantovani and Stefano Banzi (Marposs SpA). + +endif + +if ARM64 + +config XENO_DRIVERS_NET_FEC + depends on XENO_DRIVERS_NET + tristate "Freescale FEC" + depends on ARCH_MXC || SOC_IMX28 + select PHYLIB + imply PTP_1588_CLOCK + help + For built-in 10/100 Fast ethernet controller on Freescale i.MX + processors. + +endif + +source "drivers/xenomai/net/drivers/experimental/Kconfig" + +endmenu diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile new file mode 100644 index 0000000..3c07320 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile @@ -0,0 +1,63 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_EXP_DRIVERS) += experimental/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000) += e1000/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000E) += e1000e/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MPC52XX_FEC) += mpc52xx_fec/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_TULIP) += tulip/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_IGB) += igb/ + +obj-$(CONFIG_XENO_DRIVERS_NET_FEC) += freescale/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_8139) += rt_8139too.o + +rt_8139too-y := 8139too.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_AT91_ETHER) += rt_at91_ether.o + +rt_at91_ether-y := at91_ether.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100) += rt_eepro100.o + +rt_eepro100-y := eepro100.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK) += rt_loopback.o + +rt_loopback-y := loopback.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FCC_ENET) += rt_mpc8260_fcc_enet.o + +rt_mpc8260_fcc_enet-y := mpc8260_fcc_enet.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_SCC_ENET) += rt_mpc8xx_enet.o + +rt_mpc8xx_enet-y := mpc8xx_enet.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FEC_ENET) += rt_mpc8xx_fec.o + +rt_mpc8xx_fec-y := mpc8xx_fec.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_NATSEMI) += rt_natsemi.o + +rt_natsemi-y := natsemi.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_PCNET32) += rt_pcnet32.o + +rt_pcnet32-y := pcnet32.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MACB) += rt_macb.o + +rt_macb-y := macb.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_VIA_RHINE) += rt_via-rhine.o + +rt_via-rhine-y := via-rhine.o + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_R8169) += rt_r8169.o + +rt_r8169-y := r8169.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169 b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169 new file mode 100644 index 0000000..69942f9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169 @@ -0,0 +1,42 @@ +For general informations on RTnet an Gigabit Ethernet please have a look +at the README.gigabit file in the RTnet 'Documentation' directory. + +This is the RTnet driver for NICs based on the Realtek RTL8169(S) chipset. +The following cards should have this chipset: + + o Buffalo LGY-PCI-GT (8169S) + o Corega CG-LAPCIGT (8169S) + o D-Link DGE-528T (8169S) + o Gigabyte 7N400 Pro2 Integrated Gigabit Ethernet (8110S) + o LevelOne GNC-0105T (8169S) + o Linksys EG1032v3 (8169S) + o Netgear GA511 PC Card (8169) + o PLANEX COMMUNICATIONS Inc. GN-1200TC (8169S) + o Surecom EP-320G-TX1 (8169S) + o US Robotics USR997902 (8169S) + o Xterasys XN-152 10/100/1000 NIC (8169) + +(see <http://www.openbsd.org/cgi-bin/man.cgi?query=re&arch=macppc&sektion=4>) + +This driver was actually only tested with a D-Link DGE-528T; for other NICs +you may have to extend the PCI device id table within the driver. + +You can set various debugging levels while loading the module: + +DEBUG_RX_SYNC 1 Show received TDMA synchronisation frames +DEBUG_RX_OTHER 2 Show other received packets +DEBUG_TX_SYNC 4 Show sent TDMA synchronisation frames +DEBUG_TX_OTHER 8 Show other sent packets +DEBUG_RUN 16 Show general debugging infos when running... + +(The debugging output is in work, DEBUG_RX_* don't work yet). + +To see all sent packets (except TDMA sync frames) and general debugging +output, just load the driver like this: + + modprobe rt_r8169 debug=24 + (DEBUG_TX_OTHER + DEBUG_RUN = 8 + 16 = 24) + + +More documentation follows... =8-) + Klaus Keppler diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c new file mode 100644 index 0000000..f8223b0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c @@ -0,0 +1,460 @@ +/* + * Ethernet driver for the Atmel AT91RM9200 (Thunder) + * + * Copyright (C) 2003 SAN People (Pty) Ltd + * + * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. + * Initial version by Rick Bronson 01/11/2003 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * RTnet port: + * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/ethtool.h> +#include <linux/platform_data/macb.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/gfp.h> +#include <linux/phy.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_net.h> + +#include <rtdev.h> +#include <rtdm/net.h> +#include <rtnet_port.h> +#include <rtskb.h> +#include "rt_macb.h" + +/* 1518 rounded up */ +#define MAX_RBUFF_SZ 0x600 +/* max number of receive buffers */ +#define MAX_RX_DESCR 9 + +/* Initialize and start the Receiver and Transmit subsystems */ +static int at91ether_start(struct rtnet_device *dev) +{ + struct macb *lp = rtnetdev_priv(dev); + dma_addr_t addr; + u32 ctl; + int i; + + lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, + (MAX_RX_DESCR * + sizeof(struct macb_dma_desc)), + &lp->rx_ring_dma, GFP_KERNEL); + if (!lp->rx_ring) + return -ENOMEM; + + lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, + MAX_RX_DESCR * MAX_RBUFF_SZ, + &lp->rx_buffers_dma, GFP_KERNEL); + if (!lp->rx_buffers) { + dma_free_coherent(&lp->pdev->dev, + MAX_RX_DESCR * sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + return -ENOMEM; + } + + addr = lp->rx_buffers_dma; + for (i = 0; i < MAX_RX_DESCR; i++) { + lp->rx_ring[i].addr = addr; + lp->rx_ring[i].ctrl = 0; + addr += MAX_RBUFF_SZ; + } + + /* Set the Wrap bit on the last descriptor */ + lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); + + /* Reset buffer index */ + lp->rx_tail = 0; + + /* Program address of descriptor list in Rx Buffer Queue register */ + macb_writel(lp, RBQP, lp->rx_ring_dma); + + /* Enable Receive and Transmit */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); + + return 0; +} + +/* Open the ethernet interface */ +static int at91ether_open(struct rtnet_device *dev) +{ + struct macb *lp = rtnetdev_priv(dev); + u32 ctl; + int ret; + + rt_stack_connect(dev, &STACK_manager); + + /* Clear internal statistics */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); + + rtmacb_set_hwaddr(lp); + + ret = at91ether_start(dev); + if (ret) + return ret; + + /* Enable MAC interrupts */ + macb_writel(lp, IER, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + /* schedule a link state check */ + phy_start(lp->phy_dev); + + rtnetif_start_queue(dev); + + return 0; +} + +/* Close the interface */ +static int at91ether_close(struct rtnet_device *dev) +{ + struct macb *lp = rtnetdev_priv(dev); + u32 ctl; + + /* Disable Receiver and Transmitter */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); + + /* Disable MAC interrupts */ + macb_writel(lp, IDR, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + rtnetif_stop_queue(dev); + + dma_free_coherent(&lp->pdev->dev, + MAX_RX_DESCR * sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + + dma_free_coherent(&lp->pdev->dev, + MAX_RX_DESCR * MAX_RBUFF_SZ, + lp->rx_buffers, lp->rx_buffers_dma); + lp->rx_buffers = NULL; + + rt_stack_disconnect(dev); + + return 0; +} + +/* Transmit packet */ +static int at91ether_start_xmit(struct rtskb *skb, struct rtnet_device *dev) +{ + struct macb *lp = rtnetdev_priv(dev); + + if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { + rtnetif_stop_queue(dev); + + /* Store packet information (to free when Tx completed) */ + lp->skb = skb; + lp->skb_length = skb->len; + lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, + DMA_TO_DEVICE); + + /* Set address of the data in the Transmit Address register */ + macb_writel(lp, TAR, lp->skb_physaddr); + /* Set length of the packet in the Transmit Control register */ + macb_writel(lp, TCR, skb->len); + + } else { + rtdev_err(dev, "%s called, but device is busy!\n", __func__); + return RTDEV_TX_BUSY; + } + + return RTDEV_TX_OK; +} + +/* Extract received frame from buffer descriptors and sent to upper layers. + * (Called from interrupt context) + */ +static bool at91ether_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp) +{ + struct macb *lp = rtnetdev_priv(dev); + unsigned char *p_recv; + struct rtskb *skb; + unsigned int pktlen; + bool ret = false; + + while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { + p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ; + pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); + skb = rtnetdev_alloc_rtskb(dev, pktlen + 2); + if (skb) { + rtskb_reserve(skb, 2); + memcpy(rtskb_put(skb, pktlen), p_recv, pktlen); + + skb->protocol = rt_eth_type_trans(skb, dev); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pktlen; + ret = true; + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + } else { + lp->stats.rx_dropped++; + } + + if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) + lp->stats.multicast++; + + /* reset ownership bit */ + lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); + + /* wrap after last buffer */ + if (lp->rx_tail == MAX_RX_DESCR - 1) + lp->rx_tail = 0; + else + lp->rx_tail++; + } + + return ret; +} + +/* MAC interrupt handler */ +static int at91ether_interrupt(rtdm_irq_t *irq_handle) +{ + void *dev_id = rtdm_irq_get_arg(irq_handle, void); + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtnet_device *dev = dev_id; + struct macb *lp = rtnetdev_priv(dev); + u32 intstatus, ctl; + + /* MAC Interrupt Status register indicates what interrupts are pending. + * It is automatically cleared once read. + */ + intstatus = macb_readl(lp, ISR); + + /* Receive complete */ + if ((intstatus & MACB_BIT(RCOMP)) && at91ether_rx(dev, &time_stamp)) + rt_mark_stack_mgr(dev); + + /* Transmit complete */ + if (intstatus & MACB_BIT(TCOMP)) { + /* The TCOM bit is set even if the transmission failed */ + if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) + lp->stats.tx_errors++; + + if (lp->skb) { + dev_kfree_rtskb(lp->skb); + lp->skb = NULL; + dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); + lp->stats.tx_packets++; + lp->stats.tx_bytes += lp->skb_length; + } + rtnetif_wake_queue(dev); + } + + /* Work-around for EMAC Errata section 41.3.1 */ + if (intstatus & MACB_BIT(RXUBR)) { + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); + macb_writel(lp, NCR, ctl | MACB_BIT(RE)); + } + + if (intstatus & MACB_BIT(ISR_ROVR)) + rtdev_err(dev, "ROVR error\n"); + + return RTDM_IRQ_HANDLED; +} + +#if defined(CONFIG_OF) +static const struct of_device_id at91ether_dt_ids[] = { + { .compatible = "cdns,at91rm9200-emac" }, + { .compatible = "cdns,emac" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, at91ether_dt_ids); +#endif + +/* Detect MAC & PHY and perform ethernet interface initialization */ +static int __init at91ether_probe(struct platform_device *pdev) +{ + struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev); + struct resource *regs; + struct rtnet_device *dev; + struct phy_device *phydev; + struct macb *lp; + int res; + u32 reg; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + const char *mac; +#endif + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENOENT; + + dev = rt_alloc_etherdev(sizeof(struct macb), MAX_RX_DESCR * 2 + 2); + if (!dev) + return -ENOMEM; + + rtdev_alloc_name(dev, "rteth%d"); + rt_rtdev_connect(dev, &RTDEV_manager); + dev->vers = RTDEV_VERS_2_0; + dev->sysbind = &pdev->dev; + + lp = rtnetdev_priv(dev); + lp->pdev = pdev; + lp->dev = dev; + rtdm_lock_init(&lp->lock); + + /* physical base address */ + dev->base_addr = regs->start; + lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); + if (!lp->regs) { + res = -ENOMEM; + goto err_free_dev; + } + + /* Clock */ + lp->pclk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(lp->pclk)) { + res = PTR_ERR(lp->pclk); + goto err_free_dev; + } + clk_enable(lp->pclk); + + lp->hclk = ERR_PTR(-ENOENT); + lp->tx_clk = ERR_PTR(-ENOENT); + + /* Install the interrupt handler */ + dev->irq = platform_get_irq(pdev, 0); + res = rtdm_irq_request(&lp->irq_handle, dev->irq, at91ether_interrupt, 0, dev->name, dev); + if (res) + goto err_disable_clock; + + dev->open = at91ether_open; + dev->stop = at91ether_close; + dev->hard_start_xmit = at91ether_start_xmit; + dev->do_ioctl = rtmacb_ioctl; + dev->get_stats = rtmacb_get_stats; + + platform_set_drvdata(pdev, dev); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) + res = of_get_mac_address(pdev->dev.of_node, lp->dev->dev_addr); + if (res) +#else + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + memcpy(lp->dev->dev_addr, mac, ETH_ALEN); + else +#endif + rtmacb_get_hwaddr(lp); + + res = of_get_phy_mode(pdev->dev.of_node); + if (res < 0) { + if (board_data && board_data->is_rmii) + lp->phy_interface = PHY_INTERFACE_MODE_RMII; + else + lp->phy_interface = PHY_INTERFACE_MODE_MII; + } else { + lp->phy_interface = res; + } + + macb_writel(lp, NCR, 0); + + reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); + if (lp->phy_interface == PHY_INTERFACE_MODE_RMII) + reg |= MACB_BIT(RM9200_RMII); + + macb_writel(lp, NCFGR, reg); + + /* Register the network interface */ + res = rt_register_rtnetdev(dev); + if (res) + goto err_irq_free; + + res = rtmacb_mii_init(lp); + if (res) + goto err_out_unregister_netdev; + + /* will be enabled in open() */ + rtnetif_carrier_off(dev); + + phydev = lp->phy_dev; + rtdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), + phydev->irq); + + /* Display ethernet banner */ + rtdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n", + dev->base_addr, dev->irq, dev->dev_addr); + + return 0; + +err_out_unregister_netdev: + rt_unregister_rtnetdev(dev); +err_irq_free: + rtdm_irq_free(&lp->irq_handle); +err_disable_clock: + clk_disable(lp->pclk); +err_free_dev: + rtdev_free(dev); + return res; +} + +static int at91ether_remove(struct platform_device *pdev) +{ + struct rtnet_device *dev = platform_get_drvdata(pdev); + struct macb *lp = rtnetdev_priv(dev); + + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + + mdiobus_unregister(lp->mii_bus); + if (lp->phy_phony_net_device) + free_netdev(lp->phy_phony_net_device); + kfree(lp->mii_bus->irq); + rt_rtdev_disconnect(dev); + rtdm_irq_free(&lp->irq_handle); + mdiobus_free(lp->mii_bus); + rt_unregister_rtnetdev(dev); + clk_disable(lp->pclk); + rtdev_free(dev); + + return 0; +} + +static struct platform_driver at91ether_driver = { + .remove = at91ether_remove, + .driver = { + .name = "at91_ether", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(at91ether_dt_ids), + }, +}; + +module_platform_driver_probe(at91ether_driver, at91ether_probe); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); +MODULE_AUTHOR("Andrew Victor"); +MODULE_ALIAS("platform:at91_ether"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile new file mode 100644 index 0000000..1c28452 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile @@ -0,0 +1,8 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000) += rt_e1000.o + +rt_e1000-y := \ + e1000_hw.o \ + e1000_main.o \ + e1000_param.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h new file mode 100644 index 0000000..44f1efa --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h @@ -0,0 +1,391 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include <linux/stddef.h> +#include <linux/module.h> +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/string.h> +#include <linux/pagemap.h> +#include <linux/bitops.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <linux/capability.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <net/pkt_sched.h> +#include <linux/list.h> +#include <linux/reboot.h> +#ifdef NETIF_F_ISO +#undef NETIF_F_ISO +#endif + +#ifdef NETIF_F_TSO +#include <net/checksum.h> +#endif +#ifdef SIOCGMIIPHY +#include <linux/mii.h> +#endif +#ifdef SIOCETHTOOL +#include <linux/ethtool.h> +#endif + +#ifdef NETIF_F_HW_VLAN_TX +#undef NETIF_F_HW_VLAN_TX +#endif + +#ifdef NETIF_F_HW_VLAN_TX +#include <linux/if_vlan.h> +#endif + +// RTNET +#include <rtnet_port.h> + + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +#include "kcompat.h" +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#ifdef DBG +#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) +#else +#define E1000_DBG(args...) +#endif + +#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args) + +#define PFX "e1000: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __FUNCTION__ , ## args)) + +#define E1000_MAX_INTR 10 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 80 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 80 +#define E1000_MAX_82544_RXD 4096 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_ICH8_APME 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#ifdef NETIF_F_HW_VLAN_TX +#define E1000_MNG_VLAN_NONE -1 +#endif +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 + +/* only works for sizes that are powers of 2 */ +#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct e1000_buffer { + struct rtskb *skb; + dma_addr_t dma; + unsigned long time_stamp; + uint16_t length; + uint16_t next_to_watch; +}; + + +struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; +struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + rtdm_lock_t tx_lock; + uint16_t tdh; + uint16_t tdt; + boolean_t last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; + + /* cpu for rx queue */ + int cpu; + + uint16_t rdh; + uint16_t rdt; +}; + +#define E1000_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { +#ifdef NETIF_F_HW_VLAN_TX + struct vlan_group *vlgrp; + uint16_t mng_vlan_id; +#endif + uint32_t bd_number; + uint32_t rx_buffer_len; + uint32_t part_num; + uint32_t wol; + uint32_t ksp3_port_a; + uint32_t smartspeed; + uint32_t en_mng_pt; + uint16_t link_speed; + uint16_t link_duplex; +#ifdef CONFIG_E1000_NAPI + spinlock_t tx_queue_lock; +#endif + atomic_t irq_sem; + struct work_struct reset_task; + uint8_t fc_autoneg; + +#ifdef ETHTOOL_PHYS_ID + struct timer_list blink_timer; + unsigned long led_status; +#endif + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned long tx_queue_len; + uint32_t txd_cmd; + uint32_t tx_int_delay; + uint32_t tx_abs_int_delay; + uint32_t gotcl; + uint64_t gotcl_old; + uint64_t tpt_old; + uint64_t colc_old; + uint32_t tx_timeout_count; + uint32_t tx_fifo_head; + uint32_t tx_head_addr; + uint32_t tx_fifo_size; + uint8_t tx_timeout_factor; + atomic_t tx_fifo_stall; + boolean_t pcix_82544; + boolean_t detect_tx_hung; + + /* RX */ +#ifdef CONFIG_E1000_NAPI + boolean_t (*clean_rx) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +#else + boolean_t (*clean_rx) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +#endif + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ +#ifdef CONFIG_E1000_NAPI + struct net_device *polling_netdev; /* One per active queue */ +#endif + int num_tx_queues; + int num_rx_queues; + + uint64_t hw_csum_err; + uint64_t hw_csum_good; + uint64_t rx_hdr_split; + uint32_t alloc_rx_buff_failed; + uint32_t rx_int_delay; + uint32_t rx_abs_int_delay; + boolean_t rx_csum; + unsigned int rx_ps_pages; + uint32_t gorcl; + uint64_t gorcl_old; + uint16_t rx_ps_bsize0; + + /* Interrupt Throttle Rate */ + uint32_t itr; + + /* OS defined structs */ + struct rtnet_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + + rtdm_irq_t irq_handle; + boolean_t data_received; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + +#ifdef ETHTOOL_TEST + uint32_t test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; +#endif + +#ifdef E1000_COUNT_ICR + uint64_t icr_txdw; + uint64_t icr_txqe; + uint64_t icr_lsc; + uint64_t icr_rxseq; + uint64_t icr_rxdmt; + uint64_t icr_rxo; + uint64_t icr_rxt; + uint64_t icr_mdac; + uint64_t icr_rxcfg; + uint64_t icr_gpi; +#endif + + uint32_t *config_space; + int msg_enable; +#ifdef CONFIG_PCI_MSI + boolean_t have_msi; +#endif + /* to not mess up cache alignment, always add to the bottom */ +#ifdef NETIF_F_TSO + boolean_t tso_force; +#endif + boolean_t smart_power_down; /* phy smart power down */ + unsigned long flags; + + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_DRIVER_TESTING, + __E1000_RESETTING, +}; +#endif /* _E1000_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c new file mode 100644 index 0000000..59c55ff --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c @@ -0,0 +1,9094 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + + +#include "e1000_hw.h" + +static int32_t e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static int32_t e1000_setup_copper_link(struct e1000_hw *hw); +static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data, + uint16_t count); +static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw); +static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw, + uint16_t offset, uint16_t words, + uint16_t *data); +static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data, + uint16_t count); +static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, + uint16_t phy_data); +static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr, + uint16_t *phy_data); +static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count); +static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static int32_t e1000_set_vco_speed(struct e1000_hw *hw); +static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static int32_t e1000_set_phy_mode(struct e1000_hw *hw); +static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); +static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); +static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, + uint16_t duplex); +static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); + +/* IGP cable length table */ +static const +uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = + { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; + +static const +uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + + +/****************************************************************************** + * Set the phy type member in the hw struct. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_set_phy_type(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_set_phy_type"); + + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) { + hw->phy_type = e1000_phy_igp; + break; + } + fallthrough; + case IGP03E1000_E_PHY_ID: + hw->phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + hw->phy_type = e1000_phy_ife; + break; + case GG82563_E_PHY_ID: + if (hw->mac_type == e1000_80003es2lan) { + hw->phy_type = e1000_phy_gg82563; + break; + } + fallthrough; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + + +/****************************************************************************** + * IGP phy init script - initializes the GbE PHY + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +e1000_phy_init_script(struct e1000_hw *hw) +{ + uint32_t ret_val; + uint16_t phy_saved_data; + + DEBUGFUNC("e1000_phy_init_script"); + + if (hw->phy_init_script) { + msec_delay(20); + + /* Save off the current value of register 0x2F5B to be restored at + * the end of this routine. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + msec_delay(20); + + e1000_write_phy_reg(hw,0x0000,0x0140); + + msec_delay(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + + msec_delay(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + uint16_t fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused); + e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/****************************************************************************** + * Set the mac type member in the hw struct. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_set_mac_type(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_82571EB_COPPER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: + hw->mac_type = e1000_82571; + break; + case E1000_DEV_ID_82572EI_COPPER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82572EI_SERDES: + case E1000_DEV_ID_82572EI: + hw->mac_type = e1000_82572; + break; + case E1000_DEV_ID_82573E: + case E1000_DEV_ID_82573E_IAMT: + case E1000_DEV_ID_82573L: + hw->mac_type = e1000_82573; + break; + case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: + case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->mac_type = e1000_80003es2lan; + break; + case E1000_DEV_ID_ICH8_IGP_M_AMT: + case E1000_DEV_ID_ICH8_IGP_AMT: + case E1000_DEV_ID_ICH8_IGP_C: + case E1000_DEV_ID_ICH8_IFE: + case E1000_DEV_ID_ICH8_IFE_GT: + case E1000_DEV_ID_ICH8_IFE_G: + case E1000_DEV_ID_ICH8_IGP_M: + hw->mac_type = e1000_ich8lan; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_ich8lan: + hw->swfwhw_semaphore_present = TRUE; + hw->asf_firmware_present = TRUE; + break; + case e1000_80003es2lan: + hw->swfw_sync_present = TRUE; + fallthrough; + case e1000_82571: + case e1000_82572: + case e1000_82573: + hw->eeprom_semaphore_present = TRUE; + fallthrough; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = TRUE; + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/***************************************************************************** + * Set media type and TBI compatibility. + * + * hw - Struct containing variables accessed by shared code + * **************************************************************************/ +void +e1000_set_media_type(struct e1000_hw *hw) +{ + uint32_t status; + + DEBUGFUNC("e1000_set_media_type"); + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = FALSE; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ich8lan: + case e1000_82573: + /* The STATUS_TBIMODE bit is reserved or reused for the this + * device. + */ + hw->media_type = e1000_media_type_copper; + break; + default: + status = E1000_READ_REG(hw, STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = FALSE; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/****************************************************************************** + * Reset the transmit and receive units; mask and clear all interrupts. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_reset_hw(struct e1000_hw *hw) +{ + uint32_t ctrl; + uint32_t ctrl_ext; + uint32_t icr; + uint32_t manc; + uint32_t led_ctrl; + uint32_t timeout; + uint32_t extcnf_ctrl; + int32_t ret_val; + + DEBUGFUNC("e1000_reset_hw"); + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + if (hw->bus_type == e1000_bus_type_pci_express) { + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + } + + /* Clear interrupt mask to stop board from generating interrupts */ + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + E1000_WRITE_REG(hw, RCTL, 0); + E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = FALSE; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); + msec_delay(5); + } + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. */ + if (hw->mac_type == e1000_82573) { + timeout = 10; + + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + do { + E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + else + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + msec_delay(2); + timeout--; + } while (timeout); + } + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac_type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + E1000_WRITE_REG(hw, PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + E1000_WRITE_REG(hw, PBS, E1000_PBS_16K); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + DEBUGOUT("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ich8lan: + if (!hw->phy_reset_disable && + e1000_check_phy_reset_block(hw) == E1000_SUCCESS) { + /* e1000_ich8lan PHY HW reset requires MAC CORE reset + * at the same time to make sure the interface between + * MAC and the external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + } + + e1000_get_software_flag(hw); + E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); + msec_delay(5); + break; + default: + E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings to + * device. Later controllers reload the EEPROM automatically, so just wait + * for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + /* Wait for EEPROM reload */ + msec_delay(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msec_delay(20); + break; + case e1000_82573: + if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } + fallthrough; + case e1000_82571: + case e1000_82572: + case e1000_ich8lan: + case e1000_80003es2lan: + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + break; + default: + /* Wait for EEPROM reload (it happens automatically) */ + msec_delay(5); + break; + } + + /* Disable HW ARPs */ + manc = E1000_READ_REG(hw, MANC); + manc &= ~(E1000_MANC_ARP_EN | E1000_MANC_ARP_RES_EN); + E1000_WRITE_REG(hw, MANC, manc); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = E1000_READ_REG(hw, LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr = E1000_READ_REG(hw, ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + if (hw->mac_type == e1000_ich8lan) { + uint32_t kab = E1000_READ_REG(hw, KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + E1000_WRITE_REG(hw, KABGTXD, kab); + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + *****************************************************************************/ +int32_t +e1000_init_hw(struct e1000_hw *hw) +{ + uint32_t ctrl; + uint32_t i; + int32_t ret_val; + uint16_t pcix_cmd_word; + uint16_t pcix_stat_hi_word; + uint16_t cmd_mmrbc; + uint16_t stat_mmrbc; + uint32_t mta_size; + uint32_t reg_data; + uint32_t ctrl_ext; + + DEBUGFUNC("e1000_init_hw"); + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ + if (hw->mac_type != e1000_ich8lan) { + if (hw->mac_type < e1000_82545_rev_3) + E1000_WRITE_REG(hw, VET, 0); + e1000_clear_vfta(hw); + } + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(hw); + msec_delay(5); + } + + /* Setup the receive address. This involves initializing all of the Receive + * Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + E1000_WRITE_REG(hw, RCTL, 0); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + if (hw->mac_type == e1000_ich8lan) + mta_size = E1000_MC_TBL_SIZE_ICH8LAN; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occuring when accessing our register space */ + E1000_WRITE_FLUSH(hw); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = E1000_READ_REG(hw, CTRL); + E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ + if (hw->bus_type == e1000_bus_type_pcix) { + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, + &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd_word & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, + &pcix_cmd_word); + } + } + break; + } + + /* More time needed for PHY to initialize */ + if (hw->mac_type == e1000_ich8lan) + msec_delay(15); + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = E1000_READ_REG(hw, TXDCTL); + ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; + switch (hw->mac_type) { + default: + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_ich8lan: + case e1000_80003es2lan: + ctrl |= E1000_TXDCTL_COUNT_DESC; + break; + } + E1000_WRITE_REG(hw, TXDCTL, ctrl); + } + + if (hw->mac_type == e1000_82573) { + e1000_enable_tx_pkt_filtering(hw); + } + + switch (hw->mac_type) { + default: + break; + case e1000_80003es2lan: + /* Enable retransmit on late collisions */ + reg_data = E1000_READ_REG(hw, TCTL); + reg_data |= E1000_TCTL_RTLC; + E1000_WRITE_REG(hw, TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = E1000_READ_REG(hw, TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; + E1000_WRITE_REG(hw, TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = E1000_READ_REG(hw, TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; + E1000_WRITE_REG(hw, TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data); + fallthrough; + case e1000_82571: + case e1000_82572: + case e1000_ich8lan: + ctrl = E1000_READ_REG(hw, TXDCTL1); + ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; + if (hw->mac_type >= e1000_82571) + ctrl |= E1000_TXDCTL_COUNT_DESC; + E1000_WRITE_REG(hw, TXDCTL1, ctrl); + break; + } + + + + if (hw->mac_type == e1000_82573) { + uint32_t gcr = E1000_READ_REG(hw, GCR); + gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + E1000_WRITE_REG(hw, GCR, gcr); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + /* ICH8/Nahum No-snoop bits are opposite polarity. + * Set to snoop by default after reset. */ + if (hw->mac_type == e1000_ich8lan) + e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/****************************************************************************** + * Adjust SERDES output amplitude based on EEPROM setting. + * + * hw - Struct containing variables accessed by shared code. + *****************************************************************************/ +static int32_t +e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + uint16_t eeprom_data; + int32_t ret_val; + + DEBUGFUNC("e1000_adjust_serdes_amplitude"); + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data); + if (ret_val) { + return ret_val; + } + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Configures flow control and link settings. + * + * hw - Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the apropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + *****************************************************************************/ +int32_t +e1000_setup_link(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + int32_t ret_val; + uint16_t eeprom_data; + + DEBUGFUNC("e1000_setup_link"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not have to set it up again. */ + if (e1000_check_phy_reset_block(hw)) + return E1000_SUCCESS; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == e1000_fc_default) { + switch (hw->mac_type) { + case e1000_ich8lan: + case e1000_82573: + hw->fc = e1000_fc_full; + break; + default: + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = e1000_fc_none; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = e1000_fc_tx_pause; + else + hw->fc = e1000_fc_full; + break; + } + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~e1000_fc_tx_pause); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~e1000_fc_rx_pause); + + hw->original_fc = hw->fc; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : + e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + + /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ + if (hw->mac_type != e1000_ich8lan) { + E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); + } + + E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & e1000_fc_tx_pause)) { + E1000_WRITE_REG(hw, FCRTL, 0); + E1000_WRITE_REG(hw, FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water marks + * as well as (optionally) enabling the transmission of XON frames. + */ + if (hw->fc_send_xon) { + E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); + } else { + E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water); + E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/****************************************************************************** + * Sets up link for a fiber based or serdes based adapter + * + * hw - Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + *****************************************************************************/ +static int32_t +e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + uint32_t ctrl; + uint32_t status; + uint32_t txcw = 0; + uint32_t i; + uint32_t signal = 0; + int32_t ret_val; + + DEBUGFUNC("e1000_setup_fiber_serdes_link"); + + /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists + * until explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) + E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK); + + /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value set in + * the EEPROM. + */ + ctrl = E1000_READ_REG(hw, CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then software + * will have to set the "PAUSE" bits to the correct value in the Tranmsit + * Config Word Register (TXCW) and re-start auto-negotiation. However, if + * auto-negotiation is disabled, then software will have to manually + * configure the two flow control enable bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case e1000_fc_none: + /* Flow control is completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled and TX Flow control is disabled by a + * software over-ride. Since there really isn't a way to advertise + * that we are capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is disabled, by a + * software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the link + * will be in reset, because we previously reset the chip). This will + * restart auto-negotiation. If auto-neogtiation is successful then the + * link-up status bit will be set and the flow control enable bits (RFCE + * and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, TXCW, txcw); + E1000_WRITE_REG(hw, CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + hw->txcw = txcw; + msec_delay(1); + + /* If we have a signal (the cable is plugged in) then poll for a "Link-Up" + * indication in the Device Status Register. Time-out if a link isn't + * seen in 500 milliseconds seconds (Auto-negotiation should complete in + * less than 500 milliseconds even if the other end is doing it in SW). + * For internal serdes, we just assume a signal is present, then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) { + DEBUGOUT("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msec_delay(10); + status = E1000_READ_REG(hw, STATUS); + if (status & E1000_STATUS_LU) break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the link up if + * we detect a signal. This will allow us to communicate with + * non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + DEBUGOUT("Valid Link Found\n"); + } + } else { + DEBUGOUT("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/****************************************************************************** +* Make sure we have a valid PHY and change PHY mode before link setup. +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + uint32_t ctrl; + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_copper_link_preconfig"); + + ctrl = E1000_READ_REG(hw, CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to what + * the PHY speed and duplex configuration is. In addition, we need to + * perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, CTRL, ctrl); + } else { + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + E1000_WRITE_REG(hw, CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + DEBUGOUT("Error, did not detect valid phy.\n"); + return ret_val; + } + DEBUGOUT1("Phy ID = %x \n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = FALSE; + + return E1000_SUCCESS; +} + + +/******************************************************************** +* Copper link setup for e1000_phy_igp series. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + uint32_t led_ctrl; + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_copper_link_igp_setup"); + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msec_delay(15); + if (hw->mac_type != e1000_ich8lan) { + /* Configure activity LED after PHY reset */ + led_ctrl = E1000_READ_REG(hw, LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, LEDCTL, led_ctrl); + } + + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + return ret_val; + } + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisment is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/******************************************************************** +* Copper link setup for e1000_phy_gg82563 series. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_ggp_setup(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + uint32_t reg_data; + + DEBUGFUNC("e1000_copper_link_ggp_setup"); + + if (!hw->phy_reset_disable) { + + /* Enable CRS on TX for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000BASE-T for Tx clock */ + phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ; + + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (hw->mdix) { + case 1: + phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (hw->disable_polarity_correction == 1) + phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); + + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } + } /* phy_reset_disable */ + + if (hw->mac_type == e1000_80003es2lan) { + /* Bypass RX and TX FIFO's */ + ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL, + E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data); + + if (ret_val) + return ret_val; + + reg_data = E1000_READ_REG(hw, CTRL_EXT); + reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); + E1000_WRITE_REG(hw, CTRL_EXT, reg_data); + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (e1000_check_mng_mode(hw) == FALSE) { + /* Enable Electrical Idle on the PHY */ + phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, + phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + phy_data); + + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, + &phy_data); + if (ret_val) + return ret_val; + phy_data |= GG82563_ICR_DIS_PADDING; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/******************************************************************** +* Copper link setup for e1000_phy_m88 series. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_copper_link_mgp_setup"); + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/******************************************************************** +* Setup auto-negotiation and flow control advertisements, +* and then perform auto-negotiation. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE phy only supports 10/100 */ + if (hw->phy_type == e1000_phy_ife) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = TRUE; + + return E1000_SUCCESS; +} + +/******************************************************************** +* Copper link setup for e1000_phy_ife (Fast Ethernet PHY) series. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_ife_setup(struct e1000_hw *hw) +{ + if (hw->phy_reset_disable) + return E1000_SUCCESS; + return E1000_SUCCESS; +} + +/****************************************************************************** +* Config the MAC and the PHY after link is up. +* 1) Set up the MAC to the current PHY speed/duplex +* if we are on 82543. If we +* are on newer silicon, we only need to configure +* collision distance in the Transmit Control Register. +* 2) Set up flow control on the MAC to that established with +* the link partner. +* 3) Config DSP to improve Gigabit link quality for some PHY revisions. +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + int32_t ret_val; + DEBUGFUNC("e1000_copper_link_postconfig"); + + if (hw->mac_type >= e1000_82544) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + DEBUGOUT("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + DEBUGOUT("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, TRUE); + if (ret_val) { + DEBUGOUT("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Detects which PHY is present and setup the speed and duplex +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_setup_copper_link(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t i; + uint16_t phy_data; + uint16_t reg_data; + + DEBUGFUNC("e1000_setup_copper_link"); + + switch (hw->mac_type) { + case e1000_80003es2lan: + case e1000_ich8lan: + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. */ + ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + return ret_val; + default: + break; + } + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + switch (hw->mac_type) { + case e1000_80003es2lan: + /* Kumeran registers are written-only */ + reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT; + reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + + if (hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_3 || + hw->phy_type == e1000_phy_igp_2) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_gg82563) { + ret_val = e1000_copper_link_ggp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_ife) { + ret_val = e1000_copper_link_ife_setup(hw); + if (ret_val) + return ret_val; + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. */ + DEBUGOUT("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + DEBUGOUT("Valid link established!!!\n"); + return E1000_SUCCESS; + } + usec_delay(10); + } + + DEBUGOUT("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/****************************************************************************** +* Configure the MAC-to-PHY interface for 10/100Mbps +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) +{ + int32_t ret_val = E1000_SUCCESS; + uint32_t tipg; + uint16_t reg_data; + + DEBUGFUNC("e1000_configure_kmrn_for_10_100"); + + reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; + E1000_WRITE_REG(hw, TIPG, tipg); + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + + if (ret_val) + return ret_val; + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return ret_val; +} + +static int32_t +e1000_configure_kmrn_for_1000(struct e1000_hw *hw) +{ + int32_t ret_val = E1000_SUCCESS; + uint16_t reg_data; + uint32_t tipg; + + DEBUGFUNC("e1000_configure_kmrn_for_1000"); + + reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; + E1000_WRITE_REG(hw, TIPG, tipg); + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + + if (ret_val) + return ret_val; + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return ret_val; +} + +/****************************************************************************** +* Configures PHY autoneg and flow control advertisement settings +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +int32_t +e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t mii_autoneg_adv_reg; + uint16_t mii_1000t_ctrl_reg; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (hw->phy_type != e1000_phy_ife) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } else + mii_1000t_ctrl_reg=0; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + if (hw->phy_type == e1000_phy_ife) { + DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n"); + } + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case e1000_fc_none: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + *hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type != e1000_phy_ife) { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Force PHY speed and duplex settings to hw->forced_speed_duplex +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + uint32_t ctrl; + int32_t ret_val; + uint16_t mii_ctrl_reg; + uint16_t mii_status_reg; + uint16_t phy_data; + uint16_t i; + + DEBUGFUNC("e1000_phy_force_speed_duplex"); + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = e1000_fc_none; + + DEBUGOUT1("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = E1000_READ_REG(hw, CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits in the + * Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits in + * the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + DEBUGOUT("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + E1000_WRITE_REG(hw, CTRL, ctrl); + + if ((hw->phy_type == e1000_phy_m88) || + (hw->phy_type == e1000_phy_gg82563)) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + /* Disable MDI-X support for 10/100 */ + } else if (hw->phy_type == e1000_phy_ife) { + ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IFE_PMC_AUTO_MDIX; + phy_data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); + if (ret_val) + return ret_val; + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + usec_delay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + DEBUGOUT("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg Complete bit + * to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) break; + msec_delay(100); + } + if ((i == 0) && + ((hw->phy_type == e1000_phy_m88) || + (hw->phy_type == e1000_phy_gg82563))) { + /* We didn't get link. Reset the DSP and wait again for link. */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + DEBUGOUT("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) break; + msec_delay(100); + /* Read the MII Status Register and wait for Auto-Neg Complete bit + * to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This value + * defaults back to a 2.5MHz clock when the PHY is reset. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to enable CRS on + * TX. This must be set for both full and half duplex operation. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && + (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } else if (hw->phy_type == e1000_phy_gg82563) { + /* The TX_CLK of the Extended PHY Specific Control Register defaults + * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if + * we're not in a forced 10/duplex configuration. */ + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if ((hw->forced_speed_duplex == e1000_10_full) || + (hw->forced_speed_duplex == e1000_10_half)) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ; + + /* Also due to the reset, we need to enable CRS on Tx. */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + return E1000_SUCCESS; +} + +/****************************************************************************** +* Sets the collision distance in the Transmit Control register +* +* hw - Struct containing variables accessed by shared code +* +* Link should have been established previously. Reads the speed and duplex +* information from the Device Status register. +******************************************************************************/ +void +e1000_config_collision_dist(struct e1000_hw *hw) +{ + uint32_t tctl, coll_dist; + + DEBUGFUNC("e1000_config_collision_dist"); + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = E1000_READ_REG(hw, TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/****************************************************************************** +* Sets MAC speed and duplex settings to reflect the those in the PHY +* +* hw - Struct containing variables accessed by shared code +* mii_reg - data to write to the MII control register +* +* The contents of the PHY register containing the needed information need to +* be passed in. +******************************************************************************/ +static int32_t +e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + uint32_t ctrl; + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_config_mac_to_phy"); + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration.*/ + if (hw->mac_type >= e1000_82544) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = E1000_READ_REG(hw, CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + + /* Write the configured values back to the Device Control Reg. */ + E1000_WRITE_REG(hw, CTRL, ctrl); + return E1000_SUCCESS; +} + +/****************************************************************************** + * Forces the MAC's flow control settings. + * + * hw - Struct containing variables accessed by shared code + * + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + *****************************************************************************/ +int32_t +e1000_force_mac_fc(struct e1000_hw *hw) +{ + uint32_t ctrl; + + DEBUGFUNC("e1000_force_mac_fc"); + + /* Get the current configuration of the Device Control Register */ + ctrl = E1000_READ_REG(hw, CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + E1000_WRITE_REG(hw, CTRL, ctrl); + return E1000_SUCCESS; +} + +/****************************************************************************** + * Configures flow control settings after link is established + * + * hw - Struct containing variables accessed by shared code + * + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automaticaly set to the negotiated flow control mode. + *****************************************************************************/ +int32_t +e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t mii_status_reg; + uint16_t mii_nway_adv_reg; + uint16_t mii_nway_lp_ability_reg; + uint16_t speed; + uint16_t duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_internal_serdes) && + (hw->autoneg_failed)) || + ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page Ability + * Register (Address 5) to determine how flow control was + * negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == e1000_fc_full) { + hw->fc = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->original_fc == e1000_fc_none || + hw->original_fc == e1000_fc_tx_pause) || + hw->fc_strict_ieee) { + hw->fc = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } else { + hw->fc = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } else { + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/****************************************************************************** + * Checks to see if the link status of the hardware has changed. + * + * hw - Struct containing variables accessed by shared code + * + * Called by any function that needs to check the link status of the adapter. + *****************************************************************************/ +int32_t +e1000_check_for_link(struct e1000_hw *hw) +{ + uint32_t rxcw = 0; + uint32_t ctrl; + uint32_t status; + uint32_t rctl; + uint32_t icr; + uint32_t signal = 0; + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_check_for_link"); + + ctrl = E1000_READ_REG(hw, CTRL); + status = E1000_READ_REG(hw, STATUS); + + /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + rxcw = E1000_READ_REG(hw, RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + if (status & E1000_STATUS_LU) + hw->get_link_status = FALSE; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = FALSE; + /* Check if there was DownShift, must be checked immediately after + * link-up */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the polarity + * reversal workaround. We disable interrupts first, and upon + * returning, place the devices interrupt state to its previous + * value except for the link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + E1000_WRITE_REG(hw, IMC, 0xffffffff); + ret_val = e1000_polarity_reversal_workaround(hw); + icr = E1000_READ_REG(hw, ICR); + E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC)); + E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, FALSE); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, TRUE); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if (hw->mac_type >= e1000_82544) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + DEBUGOUT("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. First, we + * need to restore the desired flow control settings because we may + * have had to re-autoneg with a different link partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the link + * partner capability register. We use the link speed to determine if + * TBI compatibility needs to be turned on or off. If the link is not + * at gigabit speed, then TBI compatibility is not needed. If we are + * at gigabit speed, we turn on TBI compatibility. + */ + if (hw->tbi_compatibility_en) { + uint16_t speed, duplex; + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we do not need + * to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, turn it off. */ + rctl = E1000_READ_REG(hw, RCTL); + rctl &= ~E1000_RCTL_SBP; + E1000_WRITE_REG(hw, RCTL, rctl); + hw->tbi_compatibility_on = FALSE; + } + } else { + /* If TBI compatibility is was previously off, turn it on. For + * compatibility with a TBI link partner, we will store bad + * packets. Some frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = TRUE; + rctl = E1000_READ_REG(hw, RCTL); + rctl |= E1000_RCTL_SBP; + E1000_WRITE_REG(hw, RCTL, rctl); + } + } + } + } + /* If we don't have link (auto-negotiation failed or link partner cannot + * auto-negotiate), the cable is plugged in (we have signal), and our + * link partner is not trying to auto-negotiate with us (we are receiving + * idles or data), we need to force link up. We also need to give + * auto-negotiation time to complete, in case the cable was just plugged + * in. The autoneg_failed flag does this. + */ + else if ((((hw->media_type == e1000_media_type_fiber) && + ((ctrl & E1000_CTRL_SWDPIN1) == signal)) || + (hw->media_type == e1000_media_type_internal_serdes)) && + (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + return 0; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } + /* If we are forcing link and we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable forced link in the + * Device Control register in an attempt to auto-negotiate with our link + * partner. + */ + else if (((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) && + (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, TXCW, hw->txcw); + E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_link_down = FALSE; + } + /* If we force link for non-auto-negotiation switch, check link status + * based on MAC synchronization for internal serdes media type. + */ + else if ((hw->media_type == e1000_media_type_internal_serdes) && + !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_link_down = FALSE; + DEBUGOUT("SERDES: Link is up.\n"); + } + } else { + hw->serdes_link_down = TRUE; + DEBUGOUT("SERDES: Link is down.\n"); + } + } + if ((hw->media_type == e1000_media_type_internal_serdes) && + (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { + hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS)); + } + return E1000_SUCCESS; +} + +/****************************************************************************** + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + *****************************************************************************/ +int32_t +e1000_get_speed_and_duplex(struct e1000_hw *hw, + uint16_t *speed, + uint16_t *duplex) +{ + uint32_t status; + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_get_speed_and_duplex"); + + if (hw->mac_type >= e1000_82543) { + status = E1000_READ_REG(hw, STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT(" Half Duplex\n"); + } + } else { + DEBUGOUT("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade even + * if it is operating at half duplex. Here we set the duplex settings to + * match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || + (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + if ((hw->mac_type == e1000_80003es2lan) && + (hw->media_type == e1000_media_type_copper)) { + if (*speed == SPEED_1000) + ret_val = e1000_configure_kmrn_for_1000(hw); + else + ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex); + if (ret_val) + return ret_val; + } + + if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { + ret_val = e1000_kumeran_lock_loss_workaround(hw); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Blocks until autoneg completes or times out (~4.5 seconds) +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +int32_t +e1000_wait_autoneg(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t i; + uint16_t phy_data; + + DEBUGFUNC("e1000_wait_autoneg"); + DEBUGOUT("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) { + return E1000_SUCCESS; + } + msec_delay(100); + } + return E1000_SUCCESS; +} + +/****************************************************************************** +* Raises the Management Data Clock +* +* hw - Struct containing variables accessed by shared code +* ctrl - Device control register's current value +******************************************************************************/ +static void +e1000_raise_mdi_clk(struct e1000_hw *hw, + uint32_t *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the MDC + * bit), and then delay 10 microseconds. + */ + E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/****************************************************************************** +* Lowers the Management Data Clock +* +* hw - Struct containing variables accessed by shared code +* ctrl - Device control register's current value +******************************************************************************/ +static void +e1000_lower_mdi_clk(struct e1000_hw *hw, + uint32_t *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the MDC + * bit), and then delay 10 microseconds. + */ + E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/****************************************************************************** +* Shifts data bits out to the PHY +* +* hw - Struct containing variables accessed by shared code +* data - Data to send out to the PHY +* count - Number of bits to shift out +* +* Bits are shifted out in MSB to LSB order. +******************************************************************************/ +static void +e1000_shift_out_mdi_bits(struct e1000_hw *hw, + uint32_t data, + uint16_t count) +{ + uint32_t ctrl; + uint32_t mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = E1000_READ_REG(hw, CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and + * then raising and lowering the Management Data Clock. A "0" is + * shifted out to the PHY by setting the MDIO bit to "0" and then + * raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/****************************************************************************** +* Shifts data bits in from the PHY +* +* hw - Struct containing variables accessed by shared code +* +* Bits are shifted in in MSB to LSB order. +******************************************************************************/ +static uint16_t +e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + uint32_t ctrl; + uint16_t data = 0; + uint8_t i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are used + * to avoid contention on the MDIO pin when a read operation is performed. + * These two bits are ignored by us and thrown away. Bits are "shifted in" + * by raising the input to the Management Data Clock (setting the MDC bit), + * and then reading the value of the MDIO bit. + */ + ctrl = E1000_READ_REG(hw, CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + /* Raise and Lower the clock before reading in the data. This accounts for + * the turnaround bits. The first clock occurred when we clocked out the + * last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = E1000_READ_REG(hw, CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + +int32_t +e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) +{ + uint32_t swfw_sync = 0; + uint32_t swmask = mask; + uint32_t fwmask = mask << 16; + int32_t timeout = 200; + + DEBUGFUNC("e1000_swfw_sync_acquire"); + + if (hw->swfwhw_semaphore_present) + return e1000_get_software_flag(hw); + + if (!hw->swfw_sync_present) + return e1000_get_hw_eeprom_semaphore(hw); + + while (timeout) { + if (e1000_get_hw_eeprom_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) { + break; + } + + /* firmware currently using resource (fwmask) */ + /* or other software thread currently using resource (swmask) */ + e1000_put_hw_eeprom_semaphore(hw); + msec_delay_irq(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); + + e1000_put_hw_eeprom_semaphore(hw); + return E1000_SUCCESS; +} + +void +e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) +{ + uint32_t swfw_sync; + uint32_t swmask = mask; + + DEBUGFUNC("e1000_swfw_sync_release"); + + if (hw->swfwhw_semaphore_present) { + e1000_release_software_flag(hw); + return; + } + + if (!hw->swfw_sync_present) { + e1000_put_hw_eeprom_semaphore(hw); + return; + } + + /* if (e1000_get_hw_eeprom_semaphore(hw)) + * return -E1000_ERR_SWFW_SYNC; */ + while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); + /* empty */ + + swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); + swfw_sync &= ~swmask; + E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); + + e1000_put_hw_eeprom_semaphore(hw); +} + +/***************************************************************************** +* Reads the value from a PHY register, if the value is on a specific non zero +* page, sets the page first. +* hw - Struct containing variables accessed by shared code +* reg_addr - address of the PHY register to read +******************************************************************************/ +int32_t +e1000_read_phy_reg(struct e1000_hw *hw, + uint32_t reg_addr, + uint16_t *phy_data) +{ + uint32_t ret_val; + uint16_t swfw; + + DEBUGFUNC("e1000_read_phy_reg"); + + if ((hw->mac_type == e1000_80003es2lan) && + (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { + swfw = E1000_SWFW_PHY1_SM; + } else { + swfw = E1000_SWFW_PHY0_SM; + } + if (e1000_swfw_sync_acquire(hw, swfw)) + return -E1000_ERR_SWFW_SYNC; + + if ((hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_3 || + hw->phy_type == e1000_phy_igp_2) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (uint16_t)reg_addr); + if (ret_val) { + e1000_swfw_sync_release(hw, swfw); + return ret_val; + } + } else if (hw->phy_type == e1000_phy_gg82563) { + if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) || + (hw->mac_type == e1000_80003es2lan)) { + /* Select Configuration Page */ + if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, + (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + ret_val = e1000_write_phy_reg_ex(hw, + GG82563_PHY_PAGE_SELECT_ALT, + (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); + } + + if (ret_val) { + e1000_swfw_sync_release(hw, swfw); + return ret_val; + } + } + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + + e1000_swfw_sync_release(hw, swfw); + return ret_val; +} + +int32_t +e1000_read_phy_reg_ex(struct e1000_hw *hw, + uint32_t reg_addr, + uint16_t *phy_data) +{ + uint32_t i; + uint32_t mdic = 0; + const uint32_t phy_addr = 1; + + DEBUGFUNC("e1000_read_phy_reg_ex"); + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < 64; i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, MDIC); + if (mdic & E1000_MDIC_READY) break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (uint16_t) mdic; + } else { + /* We must first send a preamble through the MDIO pin to signal the + * beginning of an MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The format of + * a MII read instruction consists of a shift out of 14 bits and is + * defined as follows: + * <Preamble><SOF><Op Code><Phy Addr><Reg Addr> + * followed by a shift in of 18 bits. This first two bits shifted in + * are TurnAround bits used to avoid contention on the MDIO pin when a + * READ operation is performed. These two bits are thrown away + * followed by a shift in of 16 bits which contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we need to + * "shift in" the 16-bit value (18 total bits) of the requested PHY + * register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/****************************************************************************** +* Writes a value to a PHY register +* +* hw - Struct containing variables accessed by shared code +* reg_addr - address of the PHY register to write +* data - data to write to the PHY +******************************************************************************/ +int32_t +e1000_write_phy_reg(struct e1000_hw *hw, + uint32_t reg_addr, + uint16_t phy_data) +{ + uint32_t ret_val; + uint16_t swfw; + + DEBUGFUNC("e1000_write_phy_reg"); + + if ((hw->mac_type == e1000_80003es2lan) && + (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { + swfw = E1000_SWFW_PHY1_SM; + } else { + swfw = E1000_SWFW_PHY0_SM; + } + if (e1000_swfw_sync_acquire(hw, swfw)) + return -E1000_ERR_SWFW_SYNC; + + if ((hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_3 || + hw->phy_type == e1000_phy_igp_2) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (uint16_t)reg_addr); + if (ret_val) { + e1000_swfw_sync_release(hw, swfw); + return ret_val; + } + } else if (hw->phy_type == e1000_phy_gg82563) { + if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) || + (hw->mac_type == e1000_80003es2lan)) { + /* Select Configuration Page */ + if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, + (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + ret_val = e1000_write_phy_reg_ex(hw, + GG82563_PHY_PAGE_SELECT_ALT, + (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); + } + + if (ret_val) { + e1000_swfw_sync_release(hw, swfw); + return ret_val; + } + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + + e1000_swfw_sync_release(hw, swfw); + return ret_val; +} + +int32_t +e1000_write_phy_reg_ex(struct e1000_hw *hw, + uint32_t reg_addr, + uint16_t phy_data) +{ + uint32_t i; + uint32_t mdic = 0; + const uint32_t phy_addr = 1; + + DEBUGFUNC("e1000_write_phy_reg_ex"); + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data intended + * for the PHY register in the MDI Control register. The MAC will take + * care of interfacing with the PHY to send the desired data. + */ + mdic = (((uint32_t) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < 641; i++) { + usec_delay(5); + mdic = E1000_READ_REG(hw, MDIC); + if (mdic & E1000_MDIC_READY) break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + /* We'll need to use the SW defined pins to shift the write command + * out to the PHY. We first send a preamble to the PHY to signal the + * beginning of the MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate a + * write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the command. The + * format of a MII write instruction is as follows: + * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (uint32_t) phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +int32_t +e1000_read_kmrn_reg(struct e1000_hw *hw, + uint32_t reg_addr, + uint16_t *data) +{ + uint32_t reg_val; + uint16_t swfw; + DEBUGFUNC("e1000_read_kmrn_reg"); + + if ((hw->mac_type == e1000_80003es2lan) && + (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { + swfw = E1000_SWFW_PHY1_SM; + } else { + swfw = E1000_SWFW_PHY0_SM; + } + if (e1000_swfw_sync_acquire(hw, swfw)) + return -E1000_ERR_SWFW_SYNC; + + /* Write register address */ + reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & + E1000_KUMCTRLSTA_OFFSET) | + E1000_KUMCTRLSTA_REN; + E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); + usec_delay(2); + + /* Read the data returned */ + reg_val = E1000_READ_REG(hw, KUMCTRLSTA); + *data = (uint16_t)reg_val; + + e1000_swfw_sync_release(hw, swfw); + return E1000_SUCCESS; +} + +int32_t +e1000_write_kmrn_reg(struct e1000_hw *hw, + uint32_t reg_addr, + uint16_t data) +{ + uint32_t reg_val; + uint16_t swfw; + DEBUGFUNC("e1000_write_kmrn_reg"); + + if ((hw->mac_type == e1000_80003es2lan) && + (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { + swfw = E1000_SWFW_PHY1_SM; + } else { + swfw = E1000_SWFW_PHY0_SM; + } + if (e1000_swfw_sync_acquire(hw, swfw)) + return -E1000_ERR_SWFW_SYNC; + + reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & + E1000_KUMCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); + usec_delay(2); + + e1000_swfw_sync_release(hw, swfw); + return E1000_SUCCESS; +} + +/****************************************************************************** +* Returns the PHY to the power-on reset state +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +int32_t +e1000_phy_hw_reset(struct e1000_hw *hw) +{ + uint32_t ctrl, ctrl_ext; + uint32_t led_ctrl; + int32_t ret_val; + uint16_t swfw; + + DEBUGFUNC("e1000_phy_hw_reset"); + + /* In the case of the phy reset being blocked, it's not an error, we + * simply return success without performing the reset. */ + ret_val = e1000_check_phy_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + + DEBUGOUT("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + if ((hw->mac_type == e1000_80003es2lan) && + (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { + swfw = E1000_SWFW_PHY1_SM; + } else { + swfw = E1000_SWFW_PHY0_SM; + } + if (e1000_swfw_sync_acquire(hw, swfw)) { + e1000_release_software_semaphore(hw); + return -E1000_ERR_SWFW_SYNC; + } + /* Read the device control register and assert the E1000_CTRL_PHY_RST + * bit. Then, take it out of reset. + * For pre-e1000_82571 hardware, we delay for 10ms between the assert + * and deassert. For e1000_82571 hardware and later, we instead delay + * for 50us between and 10ms after the deassertion. + */ + ctrl = E1000_READ_REG(hw, CTRL); + E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + if (hw->mac_type < e1000_82571) + msec_delay(10); + else + usec_delay(100); + + E1000_WRITE_REG(hw, CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + if (hw->mac_type >= e1000_82571) + msec_delay_irq(10); + e1000_swfw_sync_release(hw, swfw); + } else { + /* Read the Extended Device Control Register, assert the PHY_RESET_DIR + * bit to put the PHY into reset. Then, take it out of reset. + */ + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + msec_delay(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } + usec_delay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = E1000_READ_REG(hw, LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + ret_val = e1000_get_phy_cfg_done(hw); + e1000_release_software_semaphore(hw); + + if ((hw->mac_type == e1000_ich8lan) && + (hw->phy_type == e1000_phy_igp_3)) { + ret_val = e1000_init_lcd_from_nvm(hw); + if (ret_val) + return ret_val; + } + return ret_val; +} + +/****************************************************************************** +* Resets the PHY +* +* hw - Struct containing variables accessed by shared code +* +* Sets bit 15 of the MII Control regiser +******************************************************************************/ +int32_t +e1000_phy_reset(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_phy_reset"); + + /* In the case of the phy reset being blocked, it's not an error, we + * simply return success without performing the reset. */ + ret_val = e1000_check_phy_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82541_rev_2: + case e1000_82571: + case e1000_82572: + case e1000_ich8lan: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Work-around for 82566 power-down: on D3 entry- +* 1) disable gigabit link +* 2) write VR power-down enable +* 3) read it back +* if successful continue, else issue LCD reset and repeat +* +* hw - struct containing variables accessed by shared code +******************************************************************************/ +void +e1000_phy_powerdown_workaround(struct e1000_hw *hw) +{ + int32_t reg; + uint16_t phy_data; + int32_t retry = 0; + + DEBUGFUNC("e1000_phy_powerdown_workaround"); + + if (hw->phy_type != e1000_phy_igp_3) + return; + + do { + /* Disable link */ + reg = E1000_READ_REG(hw, PHY_CTRL); + E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + + /* Write VR power-down enable */ + e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); + e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data | + IGP3_VR_CTRL_MODE_SHUT); + + /* Read it back and test */ + e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); + if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = E1000_READ_REG(hw, CTRL); + E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); + + return; + +} + +/****************************************************************************** +* Work-around for 82566 Kumeran PCS lock loss: +* On link status change (i.e. PCI reset, speed change) and link is up and +* speed is gigabit- +* 0) if workaround is optionally disabled do nothing +* 1) wait 1ms for Kumeran link to come up +* 2) check Kumeran Diagnostic register PCS lock loss bit +* 3) if not set the link is locked (all is good), otherwise... +* 4) reset the PHY +* 5) repeat up to 10 times +* Note: this is only called for IGP3 copper when speed is 1gb. +* +* hw - struct containing variables accessed by shared code +******************************************************************************/ +int32_t +e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) +{ + int32_t ret_val; + int32_t reg; + int32_t cnt; + uint16_t phy_data; + + if (hw->kmrn_lock_loss_workaround_disabled) + return E1000_SUCCESS; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouls up link + * stability */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + + if (phy_data & MII_SR_LINK_STATUS) { + for (cnt = 0; cnt < 10; cnt++) { + /* read once to clear */ + ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return E1000_SUCCESS; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + msec_delay_irq(5); + } + /* Disable GigE link negotiation */ + reg = E1000_READ_REG(hw, PHY_CTRL); + E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + + /* unable to acquire PCS lock */ + return E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Probes the expected PHY address for known PHY IDs +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +int32_t +e1000_detect_gig_phy(struct e1000_hw *hw) +{ + int32_t phy_init_status, ret_val; + uint16_t phy_id_high, phy_id_low; + boolean_t match = FALSE; + + DEBUGFUNC("e1000_detect_gig_phy"); + + /* The 82571 firmware may still be configuring the PHY. In this + * case, we cannot access the PHY until the configuration is done. So + * we explicitly set the PHY values. */ + if (hw->mac_type == e1000_82571 || + hw->mac_type == e1000_82572) { + hw->phy_id = IGP01E1000_I_PHY_ID; + hw->phy_type = e1000_phy_igp_2; + return E1000_SUCCESS; + } + + /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work- + * around that forces PHY page 0 to be set or the reads fail. The rest of + * the code in this routine uses e1000_read_phy_reg to read the PHY ID. + * So for ESB-2 we need to have this set so our reads won't fail. If the + * attached PHY is not a e1000_phy_gg82563, the routines below will figure + * this out as well. */ + if (hw->mac_type == e1000_80003es2lan) + hw->phy_type = e1000_phy_gg82563; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (uint32_t) (phy_id_high << 16); + usec_delay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; + break; + case e1000_82573: + if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; + break; + case e1000_80003es2lan: + if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; + break; + case e1000_ich8lan: + if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE; + if (hw->phy_id == IFE_E_PHY_ID) match = TRUE; + if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE; + if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE; + break; + default: + DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/****************************************************************************** +* Resets the PHY's DSP +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + int32_t ret_val; + DEBUGFUNC("e1000_phy_reset_dsp"); + + do { + if (hw->phy_type != e1000_phy_gg82563) { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) break; + } + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/****************************************************************************** +* Get PHY information from various PHY registers for igp PHY only. +* +* hw - Struct containing variables accessed by shared code +* phy_info - PHY information structure +******************************************************************************/ +int32_t +e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + int32_t ret_val; + uint16_t phy_data, polarity, min_length, max_length, average; + + DEBUGFUNC("e1000_phy_igp_get_info"); + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. */ + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT; + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid at 1000 Mbps */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT; + phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Get PHY information from various PHY registers for ife PHY only. +* +* hw - Struct containing variables accessed by shared code +* phy_info - PHY information structure +******************************************************************************/ +int32_t +e1000_phy_ife_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + int32_t ret_val; + uint16_t phy_data, polarity; + + DEBUGFUNC("e1000_phy_ife_get_info"); + + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); + if (ret_val) + return ret_val; + phy_info->polarity_correction = + (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >> + IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT; + + if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) { + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced. */ + polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >> + IFE_PSC_FORCE_POLARITY_SHIFT; + } + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >> + IFE_PMC_MDIX_MODE_SHIFT; + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Get PHY information from various PHY registers fot m88 PHY only. +* +* hw - Struct containing variables accessed by shared code +* phy_info - PHY information structure +******************************************************************************/ +int32_t +e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + int32_t ret_val; + uint16_t phy_data, polarity; + + DEBUGFUNC("e1000_phy_m88_get_info"); + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. */ + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT; + phy_info->polarity_correction = + (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + if (hw->phy_type != e1000_phy_gg82563) { + phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + } else { + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + phy_info->cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT; + + phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Get PHY information from various PHY registers +* +* hw - Struct containing variables accessed by shared code +* phy_info - PHY information structure +******************************************************************************/ +int32_t +e1000_phy_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_phy_get_info"); + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + DEBUGOUT("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + DEBUGOUT("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_3 || + hw->phy_type == e1000_phy_igp_2) + return e1000_phy_igp_get_info(hw, phy_info); + else if (hw->phy_type == e1000_phy_ife) + return e1000_phy_ife_get_info(hw, phy_info); + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +int32_t +e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_mdi_settings"); + + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + + +/****************************************************************************** + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. Additionally, if this is ICH8, the flash controller GbE + * registers must be mapped, or this will crash. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint32_t eecd = E1000_READ_REG(hw, EECD); + int32_t ret_val = E1000_SUCCESS; + uint16_t eeprom_size; + + DEBUGFUNC("e1000_init_eeprom_params"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; + break; + case e1000_82571: + case e1000_82572: + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; + break; + case e1000_82573: + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + eeprom->use_eerd = TRUE; + eeprom->use_eewr = TRUE; + if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { + eeprom->type = e1000_eeprom_flash; + eeprom->word_size = 2048; + + /* Ensure that the Autonomous FLASH update bit is cleared due to + * Flash update issue on parts which use a FLASH for NVM. */ + eecd &= ~E1000_EECD_AUPDEN; + E1000_WRITE_REG(hw, EECD, eecd); + } + break; + case e1000_80003es2lan: + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + eeprom->use_eerd = TRUE; + eeprom->use_eewr = FALSE; + break; + case e1000_ich8lan: + { + int32_t i = 0; + uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG); + + eeprom->type = e1000_eeprom_ich8; + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; + eeprom->word_size = E1000_SHADOW_RAM_WORDS; + + /* Zero the shadow RAM structure. But don't load it from NVM + * so as to save time for driver init */ + if (hw->eeprom_shadow_ram != NULL) { + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + hw->eeprom_shadow_ram[i].modified = FALSE; + hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; + } + } + + hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) * + ICH8_FLASH_SECTOR_SIZE; + + hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1; + hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK); + hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE; + hw->flash_bank_size /= 2 * sizeof(uint16_t); + + break; + } + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to + * 32KB (incremented by powers of 2). + */ + if (hw->mac_type <= e1000_82547_rev_2) { + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to 256B) + * is never the result used in the shifting logic below. */ + if (eeprom_size) + eeprom_size++; + } else { + eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + } + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/****************************************************************************** + * Raises the EEPROM's clock input. + * + * hw - Struct containing variables accessed by shared code + * eecd - EECD's current value + *****************************************************************************/ +static void +e1000_raise_ee_clk(struct e1000_hw *hw, + uint32_t *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait <delay> microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->eeprom.delay_usec); +} + +/****************************************************************************** + * Lowers the EEPROM's clock input. + * + * hw - Struct containing variables accessed by shared code + * eecd - EECD's current value + *****************************************************************************/ +static void +e1000_lower_ee_clk(struct e1000_hw *hw, + uint32_t *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and then + * wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->eeprom.delay_usec); +} + +/****************************************************************************** + * Shift data bits out to the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * data - data to send to the EEPROM + * count - number of bits to shift out + *****************************************************************************/ +static void +e1000_shift_out_ee_bits(struct e1000_hw *hw, + uint16_t data, + uint16_t count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint32_t eecd; + uint32_t mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = E1000_READ_REG(hw, EECD); + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~E1000_EECD_DO; + } else if (eeprom->type == e1000_eeprom_spi) { + eecd |= E1000_EECD_DO; + } + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", + * and then raising and then lowering the clock (the SK bit controls + * the clock input to the EEPROM). A "0" is shifted out to the EEPROM + * by setting "DI" to "0" and then raising and then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, EECD, eecd); +} + +/****************************************************************************** + * Shift data bits in from the EEPROM + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static uint16_t +e1000_shift_in_ee_bits(struct e1000_hw *hw, + uint16_t count) +{ + uint32_t eecd; + uint32_t i; + uint16_t data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value of + * the "DO" bit. During this "shifting in" process the "DI" bit should + * always be clear. + */ + + eecd = E1000_READ_REG(hw, EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/****************************************************************************** + * Prepares EEPROM for access + * + * hw - Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + *****************************************************************************/ +static int32_t +e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint32_t eecd, i=0; + + DEBUGFUNC("e1000_acquire_eeprom"); + + if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) + return -E1000_ERR_SWFW_SYNC; + eecd = E1000_READ_REG(hw, EECD); + + if (hw->mac_type != e1000_82573) { + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + E1000_WRITE_REG(hw, EECD, eecd); + eecd = E1000_READ_REG(hw, EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + usec_delay(5); + eecd = E1000_READ_REG(hw, EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, EECD, eecd); + DEBUGOUT("Could not acquire EEPROM grant\n"); + e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); + return -E1000_ERR_EEPROM; + } + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + E1000_WRITE_REG(hw, EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, EECD, eecd); + usec_delay(1); + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Returns EEPROM to a "standby" state + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint32_t eecd; + + eecd = E1000_READ_REG(hw, EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(eeprom->delay_usec); + } +} + +/****************************************************************************** + * Terminates a command by inverting the EEPROM's chip select pin + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +e1000_release_eeprom(struct e1000_hw *hw) +{ + uint32_t eecd; + + DEBUGFUNC("e1000_release_eeprom"); + + eecd = E1000_READ_REG(hw, EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + E1000_WRITE_REG(hw, EECD, eecd); + + usec_delay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + E1000_WRITE_REG(hw, EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + E1000_WRITE_REG(hw, EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, EECD, eecd); + } + + e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); +} + +/****************************************************************************** + * Reads a 16 bit word from the EEPROM. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + uint16_t retry_count = 0; + uint8_t spi_stat_reg; + + DEBUGFUNC("e1000_spi_eeprom_ready"); + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + DEBUGOUT("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Reads a 16 bit word from the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_read_eeprom(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint32_t i = 0; + int32_t ret_val; + + DEBUGFUNC("e1000_read_eeprom"); + + /* A check for invalid values: offset too large, too many words, and not + * enough words. + */ + if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || + (words == 0)) { + DEBUGOUT("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* FLASH reads without acquiring the semaphore are safe */ + if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && + hw->eeprom.use_eerd == FALSE) { + switch (hw->mac_type) { + case e1000_80003es2lan: + break; + default: + /* Prepare the EEPROM for reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + break; + } + } + + if (eeprom->use_eerd == TRUE) { + ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || + (hw->mac_type != e1000_82573)) + e1000_release_eeprom(hw); + return ret_val; + } + + if (eeprom->type == e1000_eeprom_ich8) + return e1000_read_eeprom_ich8(hw, offset, words, data); + + if (eeprom->type == e1000_eeprom_spi) { + uint16_t word_in; + uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the opcode */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits); + + /* Read the data. The address of the eeprom internally increments with + * each byte (spi) being read, saving on the overhead of eeprom setup + * and tear-down. The address counter will roll over if reading beyond + * the size of the eeprom, thus allowing the entire memory to be read + * starting from any offset. */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the overhead + * of eeprom setup and tear-down. */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Reads a 16 bit word from the EEPROM using the EERD register. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_read_eeprom_eerd(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + uint32_t i, eerd = 0; + int32_t error = 0; + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + + E1000_EEPROM_RW_REG_START; + + E1000_WRITE_REG(hw, EERD, eerd); + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); + + if (error) { + break; + } + data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); + + } + + return error; +} + +/****************************************************************************** + * Writes a 16 bit word from the EEPROM using the EEWR register. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_write_eeprom_eewr(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + uint32_t register_value = 0; + uint32_t i = 0; + int32_t error = 0; + + if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) + return -E1000_ERR_SWFW_SYNC; + + for (i = 0; i < words; i++) { + register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | + ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | + E1000_EEPROM_RW_REG_START; + + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); + if (error) { + break; + } + + E1000_WRITE_REG(hw, EEWR, register_value); + + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); + + if (error) { + break; + } + } + + e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); + return error; +} + +/****************************************************************************** + * Polls the status bit (bit 1) of the EERD to determine when the read is done. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) +{ + uint32_t attempts = 100000; + uint32_t i, reg = 0; + int32_t done = E1000_ERR_EEPROM; + + for (i = 0; i < attempts; i++) { + if (eerd == E1000_EEPROM_POLL_READ) + reg = E1000_READ_REG(hw, EERD); + else + reg = E1000_READ_REG(hw, EEWR); + + if (reg & E1000_EEPROM_RW_REG_DONE) { + done = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + return done; +} + +/*************************************************************************** +* Description: Determines if the onboard NVM is FLASH or EEPROM. +* +* hw - Struct containing variables accessed by shared code +****************************************************************************/ +boolean_t +e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) +{ + uint32_t eecd = 0; + + DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); + + if (hw->mac_type == e1000_ich8lan) + return FALSE; + + if (hw->mac_type == e1000_82573) { + eecd = E1000_READ_REG(hw, EECD); + + /* Isolate bits 15 & 16 */ + eecd = ((eecd >> 15) & 0x03); + + /* If both bits are set, device is Flash type */ + if (eecd == 0x03) { + return FALSE; + } + } + return TRUE; +} + +/****************************************************************************** + * Verifies that the EEPROM has a valid checksum + * + * hw - Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + *****************************************************************************/ +int32_t +e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + uint16_t checksum = 0; + uint16_t i, eeprom_data; + + DEBUGFUNC("e1000_validate_eeprom_checksum"); + + if ((hw->mac_type == e1000_82573) && + (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) { + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. */ + e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); + if ((eeprom_data & 0x10) == 0) { + /* Read 0x23 and check bit 15. This bit is a 1 when the checksum + * has already been fixed. If the checksum is still wrong and this + * bit is a 1, we need to return bad checksum. Otherwise, we need + * to set this bit to a 1 and update the checksum. */ + e1000_read_eeprom(hw, 0x23, 1, &eeprom_data); + if ((eeprom_data & 0x8000) == 0) { + eeprom_data |= 0x8000; + e1000_write_eeprom(hw, 0x23, 1, &eeprom_data); + e1000_update_eeprom_checksum(hw); + } + } + } + + if (hw->mac_type == e1000_ich8lan) { + /* Drivers must allocate the shadow ram structure for the + * EEPROM checksum to be updated. Otherwise, this bit as well + * as the checksum must both be set correctly for this + * validation to pass. + */ + e1000_read_eeprom(hw, 0x19, 1, &eeprom_data); + if ((eeprom_data & 0x40) == 0) { + eeprom_data |= 0x40; + e1000_write_eeprom(hw, 0x19, 1, &eeprom_data); + e1000_update_eeprom_checksum(hw); + } + } + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + + if (checksum == (uint16_t) EEPROM_SUM) + return E1000_SUCCESS; + else { + DEBUGOUT("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/****************************************************************************** + * Calculates the EEPROM checksum and writes it to the EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + *****************************************************************************/ +int32_t +e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + uint16_t checksum = 0; + uint16_t i, eeprom_data; + + DEBUGFUNC("e1000_update_eeprom_checksum"); + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (uint16_t) EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + DEBUGOUT("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } else if (hw->eeprom.type == e1000_eeprom_flash) { + e1000_commit_shadow_ram(hw); + } else if (hw->eeprom.type == e1000_eeprom_ich8) { + e1000_commit_shadow_ram(hw); + /* Reload the EEPROM, or else modifications will not appear + * until after next adapter reset. */ + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + msec_delay(10); + } + return E1000_SUCCESS; +} + +/****************************************************************************** + * Parent function for writing words to the different EEPROM types. + * + * hw - Struct containing variables accessed by shared code + * offset - offset within the EEPROM to be written to + * words - number of words to write + * data - 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + *****************************************************************************/ +int32_t +e1000_write_eeprom(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + int32_t status = 0; + + DEBUGFUNC("e1000_write_eeprom"); + + /* A check for invalid values: offset too large, too many words, and not + * enough words. + */ + if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || + (words == 0)) { + DEBUGOUT("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* 82573 writes only through eewr */ + if (eeprom->use_eewr == TRUE) + return e1000_write_eeprom_eewr(hw, offset, words, data); + + if (eeprom->type == e1000_eeprom_ich8) + return e1000_write_eeprom_ich8(hw, offset, words, data); + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msec_delay(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/****************************************************************************** + * Writes a 16 bit word to a given offset in an SPI EEPROM. + * + * hw - Struct containing variables accessed by shared code + * offset - offset within the EEPROM to be written to + * words - number of words to write + * data - pointer to array of 8 bit words to be written to the EEPROM + * + *****************************************************************************/ +int32_t +e1000_write_eeprom_spi(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint16_t widx = 0; + + DEBUGFUNC("e1000_write_eeprom_spi"); + + while (widx < words) { + uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the opcode */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of eeprom */ + while (widx < words) { + uint16_t word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE + * operation, while the smaller eeproms are capable of an 8-byte + * PAGE WRITE operation. Break the inner loop to pass new address + */ + if ((((offset + widx)*2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Writes a 16 bit word to a given offset in a Microwire EEPROM. + * + * hw - Struct containing variables accessed by shared code + * offset - offset within the EEPROM to be written to + * words - number of words to write + * data - pointer to array of 16 bit words to be written to the EEPROM + * + *****************************************************************************/ +int32_t +e1000_write_eeprom_microwire(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + uint32_t eecd; + uint16_t words_written = 0; + uint16_t i = 0; + + DEBUGFUNC("e1000_write_eeprom_microwire"); + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (uint16_t)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to execute + * the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will + * signal that the command has been completed by raising the DO signal. + * If DO does not go high in 10 milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = E1000_READ_REG(hw, EECD); + if (eecd & E1000_EECD_DO) break; + usec_delay(50); + } + if (i == 200) { + DEBUGOUT("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (uint16_t)(eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Flushes the cached eeprom to NVM. This is done by saving the modified values + * in the eeprom cache and the non modified values in the currently active bank + * to the new bank. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_commit_shadow_ram(struct e1000_hw *hw) +{ + uint32_t attempts = 100000; + uint32_t eecd = 0; + uint32_t flop = 0; + uint32_t i = 0; + int32_t error = E1000_SUCCESS; + uint32_t old_bank_offset = 0; + uint32_t new_bank_offset = 0; + uint32_t sector_retries = 0; + uint8_t low_byte = 0; + uint8_t high_byte = 0; + uint8_t temp_byte = 0; + boolean_t sector_write_failed = FALSE; + + if (hw->mac_type == e1000_82573) { + /* The flop register will be used to determine if flash type is STM */ + flop = E1000_READ_REG(hw, FLOP); + for (i=0; i < attempts; i++) { + eecd = E1000_READ_REG(hw, EECD); + if ((eecd & E1000_EECD_FLUPD) == 0) { + break; + } + usec_delay(5); + } + + if (i == attempts) { + return -E1000_ERR_EEPROM; + } + + /* If STM opcode located in bits 15:8 of flop, reset firmware */ + if ((flop & 0xFF00) == E1000_STM_OPCODE) { + E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); + } + + /* Perform the flash update */ + E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); + + for (i=0; i < attempts; i++) { + eecd = E1000_READ_REG(hw, EECD); + if ((eecd & E1000_EECD_FLUPD) == 0) { + break; + } + usec_delay(5); + } + + if (i == attempts) { + return -E1000_ERR_EEPROM; + } + } + + if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) { + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written */ + if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) { + new_bank_offset = hw->flash_bank_size * 2; + old_bank_offset = 0; + e1000_erase_ich8_4k_segment(hw, 1); + } else { + old_bank_offset = hw->flash_bank_size * 2; + new_bank_offset = 0; + e1000_erase_ich8_4k_segment(hw, 0); + } + + do { + sector_write_failed = FALSE; + /* Loop for every byte in the shadow RAM, + * which is in units of words. */ + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM */ + if (hw->eeprom_shadow_ram[i].modified == TRUE) { + low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; + e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset, + &temp_byte); + usec_delay(100); + error = e1000_verify_write_ich8_byte(hw, + (i << 1) + new_bank_offset, + low_byte); + if (error != E1000_SUCCESS) + sector_write_failed = TRUE; + high_byte = + (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); + e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, + &temp_byte); + usec_delay(100); + } else { + e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset, + &low_byte); + usec_delay(100); + error = e1000_verify_write_ich8_byte(hw, + (i << 1) + new_bank_offset, low_byte); + if (error != E1000_SUCCESS) + sector_write_failed = TRUE; + e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, + &high_byte); + } + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress */ + if (i == E1000_ICH8_NVM_SIG_WORD) + high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte; + + error = e1000_verify_write_ich8_byte(hw, + (i << 1) + new_bank_offset + 1, high_byte); + if (error != E1000_SUCCESS) + sector_write_failed = TRUE; + + if (sector_write_failed == FALSE) { + /* Clear the now not used entry in the cache */ + hw->eeprom_shadow_ram[i].modified = FALSE; + hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; + } + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. */ + if (sector_write_failed == FALSE) { + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b */ + e1000_read_ich8_byte(hw, + E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, + &high_byte); + high_byte &= 0xBF; + error = e1000_verify_write_ich8_byte(hw, + E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, + high_byte); + if (error != E1000_SUCCESS) + sector_write_failed = TRUE; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase */ + error = e1000_verify_write_ich8_byte(hw, + E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, + 0); + if (error != E1000_SUCCESS) + sector_write_failed = TRUE; + } + } while (++sector_retries < 10 && sector_write_failed == TRUE); + } + + return error; +} + +/****************************************************************************** + * Reads the adapter's part number from the EEPROM + * + * hw - Struct containing variables accessed by shared code + * part_num - Adapter's part number + *****************************************************************************/ +int32_t +e1000_read_part_num(struct e1000_hw *hw, + uint32_t *part_num) +{ + uint16_t offset = EEPROM_PBA_BYTE_1; + uint16_t eeprom_data; + + DEBUGFUNC("e1000_read_part_num"); + + /* Get word 0 from EEPROM */ + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + /* Save word 0 in upper half of part_num */ + *part_num = (uint32_t) (eeprom_data << 16); + + /* Get word 1 from EEPROM */ + if (e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + /* Save word 1 in lower half of part_num */ + *part_num |= eeprom_data; + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_read_mac_addr(struct e1000_hw * hw) +{ + uint16_t offset; + uint16_t eeprom_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); + hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82571: + case e1000_80003es2lan: + if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/****************************************************************************** + * Initializes receive address filters. + * + * hw - Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive addresss registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + *****************************************************************************/ +void +e1000_init_rx_addrs(struct e1000_hw *hw) +{ + uint32_t i; + uint32_t rar_num; + + DEBUGFUNC("e1000_init_rx_addrs"); + + /* Setup the receive address. */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Reserve a spot for the Locally Administered Address to work around + * an 82571 issue in which a reset on one port will reload the MAC on + * the other port. */ + if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) + rar_num -= 1; + if (hw->mac_type == e1000_ich8lan) + rar_num = E1000_RAR_ENTRIES_ICH8LAN; + + /* Zero out the other 15 receive addresses. */ + DEBUGOUT("Clearing RAR[1-15]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(hw); + } +} + +/****************************************************************************** + * Updates the MAC's list of multicast addresses. + * + * hw - Struct containing variables accessed by shared code + * mc_addr_list - the list of new multicast addresses + * mc_addr_count - number of addresses + * pad - number of bytes between addresses in the list + * rar_used_count - offset where to start adding mc addresses into the RAR's + * + * The given list replaces any existing list. Clears the last 15 receive + * address registers and the multicast table. Uses receive address registers + * for the first 15 multicast addresses, and hashes the rest into the + * multicast table. + *****************************************************************************/ +void +e1000_mc_addr_list_update(struct e1000_hw *hw, + uint8_t *mc_addr_list, + uint32_t mc_addr_count, + uint32_t pad, + uint32_t rar_used_count) +{ + uint32_t hash_value; + uint32_t i; + uint32_t num_rar_entry; + uint32_t num_mta_entry; + + DEBUGFUNC("e1000_mc_addr_list_update"); + + /* Set the new number of MC addresses that we are being requested to use. */ + hw->num_mc_addrs = mc_addr_count; + + /* Clear RAR[1-15] */ + DEBUGOUT(" Clearing RAR[1-15]\n"); + num_rar_entry = E1000_RAR_ENTRIES; + if (hw->mac_type == e1000_ich8lan) + num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN; + /* Reserve a spot for the Locally Administered Address to work around + * an 82571 issue in which a reset on one port will reload the MAC on + * the other port. */ + if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) + num_rar_entry -= 1; + + for (i = rar_used_count; i < num_rar_entry; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(hw); + } + + /* Clear the MTA */ + DEBUGOUT(" Clearing MTA\n"); + num_mta_entry = E1000_NUM_MTA_REGISTERS; + if (hw->mac_type == e1000_ich8lan) + num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN; + for (i = 0; i < num_mta_entry; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + /* Add the new addresses */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, + mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)], + mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 1], + mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 2], + mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 3], + mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 4], + mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 5]); + + hash_value = e1000_hash_mc_addr(hw, + mc_addr_list + + (i * (ETH_LENGTH_OF_ADDRESS + pad))); + + DEBUGOUT1(" Hash value = 0x%03X\n", hash_value); + + /* Place this multicast address in the RAR if there is room, * + * else put it in the MTA + */ + if (rar_used_count < num_rar_entry) { + e1000_rar_set(hw, + mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), + rar_used_count); + rar_used_count++; + } else { + e1000_mta_set(hw, hash_value); + } + } + DEBUGOUT("MC Update Complete\n"); +} + +/****************************************************************************** + * Hashes an address to determine its location in the multicast table + * + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + *****************************************************************************/ +uint32_t +e1000_hash_mc_addr(struct e1000_hw *hw, + uint8_t *mc_addr) +{ + uint32_t hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + if (hw->mac_type == e1000_ich8lan) { + /* [47:38] i.e. 0x158 for above example address */ + hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2)); + } else { + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); + } + break; + case 1: + if (hw->mac_type == e1000_ich8lan) { + /* [46:37] i.e. 0x2B1 for above example address */ + hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3)); + } else { + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); + } + break; + case 2: + if (hw->mac_type == e1000_ich8lan) { + /*[45:36] i.e. 0x163 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); + } else { + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); + } + break; + case 3: + if (hw->mac_type == e1000_ich8lan) { + /* [43:34] i.e. 0x18D for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); + } else { + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); + } + break; + } + + hash_value &= 0xFFF; + if (hw->mac_type == e1000_ich8lan) + hash_value &= 0x3FF; + + return hash_value; +} + +/****************************************************************************** + * Sets the bit in the multicast table corresponding to the hash value. + * + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + *****************************************************************************/ +void +e1000_mta_set(struct e1000_hw *hw, + uint32_t hash_value) +{ + uint32_t hash_bit, hash_reg; + uint32_t mta; + uint32_t temp; + + /* The MTA is a register array of 128 32-bit registers. + * It is treated like an array of 4096 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 5) & 0x7F; + if (hw->mac_type == e1000_ich8lan) + hash_reg &= 0x1F; + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); + + mta |= (1 << hash_bit); + + /* If we are on an 82544 and we are trying to write an odd offset + * in the MTA, save off the previous entry before writing and + * restore the old value after writing. + */ + if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); + E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); + E1000_WRITE_FLUSH(hw); + } else { + E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); + } +} + +/****************************************************************************** + * Puts an ethernet address into a receive address register. + * + * hw - Struct containing variables accessed by shared code + * addr - Address to put into receive address register + * index - Receive address register to write + *****************************************************************************/ +void +e1000_rar_set(struct e1000_hw *hw, + uint8_t *addr, + uint32_t index) +{ + uint32_t rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((uint32_t) addr[0] | + ((uint32_t) addr[1] << 8) | + ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); + rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + if (hw->leave_av_bit_off == TRUE) + break; + fallthrough; + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(hw); +} + +/****************************************************************************** + * Writes a value to the specified offset in the VLAN filter table. + * + * hw - Struct containing variables accessed by shared code + * offset - Offset in VLAN filer table to write + * value - Value to write into VLAN filter table + *****************************************************************************/ +void +e1000_write_vfta(struct e1000_hw *hw, + uint32_t offset, + uint32_t value) +{ + uint32_t temp; + + if (hw->mac_type == e1000_ich8lan) + return; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(hw); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(hw); + } +} + +/****************************************************************************** + * Clears the VLAN filer table + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +e1000_clear_vfta(struct e1000_hw *hw) +{ + uint32_t offset; + uint32_t vfta_value = 0; + uint32_t vfta_offset = 0; + uint32_t vfta_bit_in_reg = 0; + + if (hw->mac_type == e1000_ich8lan) + return; + + if (hw->mac_type == e1000_82573) { + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying a single VLAN + * ID. The following operations determine which 32b entry + * (i.e. offset) into the array we want to set the VLAN ID + * (i.e. bit) of the manageability unit. */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of the + * manageability unit */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(hw); + } +} + +int32_t +e1000_id_led_init(struct e1000_hw * hw) +{ + uint32_t ledctl; + const uint32_t ledctl_mask = 0x000000FF; + const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + uint16_t eeprom_data, i, temp; + const uint16_t led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init"); + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = E1000_READ_REG(hw, LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + DEBUGOUT("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((hw->mac_type == e1000_82573) && + (eeprom_data == ID_LED_RESERVED_82573)) + eeprom_data = ID_LED_DEFAULT_82573; + else if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + if (hw->mac_type == e1000_ich8lan) + eeprom_data = ID_LED_DEFAULT_ICH8LAN; + else + eeprom_data = ID_LED_DEFAULT; + } + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/****************************************************************************** + * Prepares SW controlable LED for use and saves the current state of the LED. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_setup_led(struct e1000_hw *hw) +{ + uint32_t ledctl; + int32_t ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_led"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (uint16_t)(hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + + +/****************************************************************************** + * Used on 82571 and later Si that has LED blink bits. + * Callers must use their own timer and should have already called + * e1000_id_led_init() + * Call e1000_cleanup led() to stop blinking + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_blink_led_start(struct e1000_hw *hw) +{ + int16_t i; + uint32_t ledctl_blink = 0; + + DEBUGFUNC("e1000_id_led_blink_on"); + + if (hw->mac_type < e1000_82571) { + /* Nothing to do */ + return E1000_SUCCESS; + } + if (hw->media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */ + ledctl_blink = hw->ledctl_mode2; + for (i=0; i < 4; i++) + if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); + } + + E1000_WRITE_REG(hw, LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Restores the saved state of the SW controlable LED. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_cleanup_led(struct e1000_hw *hw) +{ + int32_t ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_cleanup_led"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + fallthrough; + default: + if (hw->phy_type == e1000_phy_ife) { + e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + break; + } + /* Restore LEDCTL settings */ + E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Turns on the software controllable LED + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_led_on(struct e1000_hw *hw) +{ + uint32_t ctrl = E1000_READ_REG(hw, CTRL); + + DEBUGFUNC("e1000_led_on"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->phy_type == e1000_phy_ife) { + e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + } else if (hw->media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + E1000_WRITE_REG(hw, CTRL, ctrl); + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Turns off the software controllable LED + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_led_off(struct e1000_hw *hw) +{ + uint32_t ctrl = E1000_READ_REG(hw, CTRL); + + DEBUGFUNC("e1000_led_off"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->phy_type == e1000_phy_ife) { + e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + } else if (hw->media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + E1000_WRITE_REG(hw, CTRL, ctrl); + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Clears all hardware statistics counters. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + volatile uint32_t temp; + + temp = E1000_READ_REG(hw, CRCERRS); + temp = E1000_READ_REG(hw, SYMERRS); + temp = E1000_READ_REG(hw, MPC); + temp = E1000_READ_REG(hw, SCC); + temp = E1000_READ_REG(hw, ECOL); + temp = E1000_READ_REG(hw, MCC); + temp = E1000_READ_REG(hw, LATECOL); + temp = E1000_READ_REG(hw, COLC); + temp = E1000_READ_REG(hw, DC); + temp = E1000_READ_REG(hw, SEC); + temp = E1000_READ_REG(hw, RLEC); + temp = E1000_READ_REG(hw, XONRXC); + temp = E1000_READ_REG(hw, XONTXC); + temp = E1000_READ_REG(hw, XOFFRXC); + temp = E1000_READ_REG(hw, XOFFTXC); + temp = E1000_READ_REG(hw, FCRUC); + + if (hw->mac_type != e1000_ich8lan) { + temp = E1000_READ_REG(hw, PRC64); + temp = E1000_READ_REG(hw, PRC127); + temp = E1000_READ_REG(hw, PRC255); + temp = E1000_READ_REG(hw, PRC511); + temp = E1000_READ_REG(hw, PRC1023); + temp = E1000_READ_REG(hw, PRC1522); + } + + temp = E1000_READ_REG(hw, GPRC); + temp = E1000_READ_REG(hw, BPRC); + temp = E1000_READ_REG(hw, MPRC); + temp = E1000_READ_REG(hw, GPTC); + temp = E1000_READ_REG(hw, GORCL); + temp = E1000_READ_REG(hw, GORCH); + temp = E1000_READ_REG(hw, GOTCL); + temp = E1000_READ_REG(hw, GOTCH); + temp = E1000_READ_REG(hw, RNBC); + temp = E1000_READ_REG(hw, RUC); + temp = E1000_READ_REG(hw, RFC); + temp = E1000_READ_REG(hw, ROC); + temp = E1000_READ_REG(hw, RJC); + temp = E1000_READ_REG(hw, TORL); + temp = E1000_READ_REG(hw, TORH); + temp = E1000_READ_REG(hw, TOTL); + temp = E1000_READ_REG(hw, TOTH); + temp = E1000_READ_REG(hw, TPR); + temp = E1000_READ_REG(hw, TPT); + + if (hw->mac_type != e1000_ich8lan) { + temp = E1000_READ_REG(hw, PTC64); + temp = E1000_READ_REG(hw, PTC127); + temp = E1000_READ_REG(hw, PTC255); + temp = E1000_READ_REG(hw, PTC511); + temp = E1000_READ_REG(hw, PTC1023); + temp = E1000_READ_REG(hw, PTC1522); + } + + temp = E1000_READ_REG(hw, MPTC); + temp = E1000_READ_REG(hw, BPTC); + + if (hw->mac_type < e1000_82543) return; + + temp = E1000_READ_REG(hw, ALGNERRC); + temp = E1000_READ_REG(hw, RXERRC); + temp = E1000_READ_REG(hw, TNCRS); + temp = E1000_READ_REG(hw, CEXTERR); + temp = E1000_READ_REG(hw, TSCTC); + temp = E1000_READ_REG(hw, TSCTFC); + + if (hw->mac_type <= e1000_82544) return; + + temp = E1000_READ_REG(hw, MGTPRC); + temp = E1000_READ_REG(hw, MGTPDC); + temp = E1000_READ_REG(hw, MGTPTC); + + if (hw->mac_type <= e1000_82547_rev_2) return; + + temp = E1000_READ_REG(hw, IAC); + temp = E1000_READ_REG(hw, ICRXOC); + + if (hw->mac_type == e1000_ich8lan) return; + + temp = E1000_READ_REG(hw, ICRXPTC); + temp = E1000_READ_REG(hw, ICRXATC); + temp = E1000_READ_REG(hw, ICTXPTC); + temp = E1000_READ_REG(hw, ICTXATC); + temp = E1000_READ_REG(hw, ICTXQEC); + temp = E1000_READ_REG(hw, ICTXQMTC); + temp = E1000_READ_REG(hw, ICRXDMTC); +} + +/****************************************************************************** + * Resets Adaptive IFS to its default state. + * + * hw - Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to TRUE. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + *****************************************************************************/ +void +e1000_reset_adaptive(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_reset_adaptive"); + + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, AIT, 0); + } else { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + } +} + +/****************************************************************************** + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + * + * hw - Struct containing variables accessed by shared code + * tx_packets - Number of transmits since last callback + * total_collisions - Number of collisions since last callback + *****************************************************************************/ +void +e1000_update_adaptive(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_update_adaptive"); + + if (hw->adaptive_ifs) { + if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = TRUE; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = hw->ifs_min_val; + else + hw->current_ifs_val += hw->ifs_step_size; + E1000_WRITE_REG(hw, AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, AIT, 0); + } + } + } else { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + } +} + +/****************************************************************************** + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + * + * hw - Struct containing variables accessed by shared code + * frame_len - The length of the frame in question + * mac_addr - The Ethernet destination address of the frame in question + *****************************************************************************/ +void +e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + uint32_t frame_len, + uint8_t *mac_addr) +{ + uint64_t carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) + /* Broadcast packet */ + stats->bprc++; + else if (*mac_addr & 0x01) + /* Multicast packet */ + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +/****************************************************************************** + * Gets the current PCI bus type, speed, and width of the hardware + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +e1000_get_bus_info(struct e1000_hw *hw) +{ + uint32_t status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_unknown; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + case e1000_82572: + case e1000_82573: + hw->bus_type = e1000_bus_type_pci_express; + hw->bus_speed = e1000_bus_speed_2500; + hw->bus_width = e1000_bus_width_pciex_1; + break; + case e1000_82571: + case e1000_ich8lan: + case e1000_80003es2lan: + hw->bus_type = e1000_bus_type_pci_express; + hw->bus_speed = e1000_bus_speed_2500; + hw->bus_width = e1000_bus_width_pciex_4; + break; + default: + status = E1000_READ_REG(hw, STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} +/****************************************************************************** + * Reads a value from one of the devices registers using port I/O (as opposed + * memory mapped I/O). Only 82544 and newer devices support port I/O. + * + * hw - Struct containing variables accessed by shared code + * offset - offset to read from + *****************************************************************************/ +uint32_t +e1000_read_reg_io(struct e1000_hw *hw, + uint32_t offset) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + return e1000_io_read(hw, io_data); +} + +/****************************************************************************** + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + * + * hw - Struct containing variables accessed by shared code + * offset - offset to write to + * value - value to write + *****************************************************************************/ +void +e1000_write_reg_io(struct e1000_hw *hw, + uint32_t offset, + uint32_t value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + + +/****************************************************************************** + * Estimates the cable length. + * + * hw - Struct containing variables accessed by shared code + * min_length - The estimated minimum length + * max_length - The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + *****************************************************************************/ +int32_t +e1000_get_cable_length(struct e1000_hw *hw, + uint16_t *min_length, + uint16_t *max_length) +{ + int32_t ret_val; + uint16_t agc_value = 0; + uint16_t i, phy_data; + uint16_t cable_length; + + DEBUGFUNC("e1000_get_cable_length"); + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + break; + } + } else if (hw->phy_type == e1000_phy_gg82563) { + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH; + + switch (cable_length) { + case e1000_gg_cable_length_60: + *min_length = 0; + *max_length = e1000_igp_cable_length_60; + break; + case e1000_gg_cable_length_60_115: + *min_length = e1000_igp_cable_length_60; + *max_length = e1000_igp_cable_length_115; + break; + case e1000_gg_cable_length_115_150: + *min_length = e1000_igp_cable_length_115; + *max_length = e1000_igp_cable_length_150; + break; + case e1000_gg_cable_length_150: + *min_length = e1000_igp_cable_length_150; + *max_length = e1000_igp_cable_length_180; + break; + default: + return -E1000_ERR_PHY; + break; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + uint16_t cur_agc_value; + uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = + {IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D}; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } else if (hw->phy_type == e1000_phy_igp_2 || + hw->phy_type == e1000_phy_igp_3) { + uint16_t cur_agc_index, max_agc_index = 0; + uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; + uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of course and + * fine gain values. The result is a number that can be put into + * the lookup table to obtain the approximate cable length. */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + *max_length = agc_value + IGP02E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/****************************************************************************** + * Check the cable polarity + * + * hw - Struct containing variables accessed by shared code + * polarity - output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older then IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + *****************************************************************************/ +int32_t +e1000_check_polarity(struct e1000_hw *hw, + uint16_t *polarity) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_check_polarity"); + + if ((hw->phy_type == e1000_phy_m88) || + (hw->phy_type == e1000_phy_gg82563)) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT; + } else if (hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_3 || + hw->phy_type == e1000_phy_igp_2) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to + * find the polarity status */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? 1 : 0; + } else { + /* For 10 Mbps, read the polarity bit in the status register. (for + * 100 Mbps this bit is always 0) */ + *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED; + } + } else if (hw->phy_type == e1000_phy_ife) { + ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL, + &phy_data); + if (ret_val) + return ret_val; + *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >> + IFE_PESC_POLARITY_REVERSED_SHIFT; + } + return E1000_SUCCESS; +} + +/****************************************************************************** + * Check if Downshift occured + * + * hw - Struct containing variables accessed by shared code + * downshift - output parameter : 0 - No Downshift ocured. + * 1 - Downshift ocured. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older then IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + *****************************************************************************/ +int32_t +e1000_check_downshift(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_check_downshift"); + + if (hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_3 || + hw->phy_type == e1000_phy_igp_2) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if ((hw->phy_type == e1000_phy_m88) || + (hw->phy_type == e1000_phy_gg82563)) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } else if (hw->phy_type == e1000_phy_ife) { + /* e1000_phy_ife supports 10/100 speed only */ + hw->speed_downgraded = FALSE; + } + + return E1000_SUCCESS; +} + +/***************************************************************************** + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + ****************************************************************************/ + +int32_t +e1000_config_dsp_after_link_change(struct e1000_hw *hw, + boolean_t link_up) +{ + int32_t ret_val; + uint16_t phy_data, phy_saved_data, speed, duplex, i; + uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = + {IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D}; + uint16_t min_length, max_length; + + DEBUGFUNC("e1000_config_dsp_after_link_change"); + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if ((hw->dsp_config_state == e1000_dsp_config_enabled) && + min_length >= e1000_igp_cable_length_50) { + + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } + + if ((hw->ffe_config_state == e1000_ffe_config_enabled) && + (min_length < e1000_igp_cable_length_50)) { + + uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + uint32_t idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + usec_delay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be restored at + * the end of the routines. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msec_delay_irq(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be restored at + * the end of the routines. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msec_delay_irq(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/***************************************************************************** + * Set PHY to class A mode + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + * + * hw - Struct containing variables accessed by shared code + ****************************************************************************/ +static int32_t +e1000_set_phy_mode(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t eeprom_data; + + DEBUGFUNC("e1000_set_phy_mode"); + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data); + if (ret_val) { + return ret_val; + } + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = FALSE; + } + } + + return E1000_SUCCESS; +} + +/***************************************************************************** + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisment + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * hw: Struct containing variables accessed by shared code + * active - true to enable lplu false to disable lplu. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + ****************************************************************************/ + +int32_t +e1000_set_d3_lplu_state(struct e1000_hw *hw, + boolean_t active) +{ + uint32_t phy_ctrl = 0; + int32_t ret_val; + uint16_t phy_data; + DEBUGFUNC("e1000_set_d3_lplu_state"); + + if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 + && hw->phy_type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used for + * Dx transitions and states */ + if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } else if (hw->mac_type == e1000_ich8lan) { + /* MAC writes into PHY register based on the state transition + * and start auto-negotiation. SW driver can overwrite the settings + * in CSR PHY power control E1000_PHY_CTRL register. */ + phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); + } else { + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); + if (ret_val) + return ret_val; + } else { + if (hw->mac_type == e1000_ich8lan) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); + } else { + phy_data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during + * Dx states where the power conservation is most important. During + * driver activity we should enable SmartSpeed, so performance is + * maintained. */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); + if (ret_val) + return ret_val; + } else { + if (hw->mac_type == e1000_ich8lan) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); + } else { + phy_data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); + if (ret_val) + return ret_val; + + } + return E1000_SUCCESS; +} + +/***************************************************************************** + * + * This function sets the lplu d0 state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisment + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * hw: Struct containing variables accessed by shared code + * active - true to enable lplu false to disable lplu. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + ****************************************************************************/ + +int32_t +e1000_set_d0_lplu_state(struct e1000_hw *hw, + boolean_t active) +{ + uint32_t phy_ctrl = 0; + int32_t ret_val; + uint16_t phy_data; + DEBUGFUNC("e1000_set_d0_lplu_state"); + + if (hw->mac_type <= e1000_82547_rev_2) + return E1000_SUCCESS; + + if (hw->mac_type == e1000_ich8lan) { + phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); + } else { + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_ich8lan) { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); + } else { + phy_data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during + * Dx states where the power conservation is most important. During + * driver activity we should enable SmartSpeed, so performance is + * maintained. */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + + + } else { + + if (hw->mac_type == e1000_ich8lan) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); + } else { + phy_data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); + if (ret_val) + return ret_val; + + } + return E1000_SUCCESS; +} + +/****************************************************************************** + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static int32_t +e1000_set_vco_speed(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t default_page = 0; + uint16_t phy_data; + + DEBUGFUNC("e1000_set_vco_speed"); + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function reads the cookie from ARC ram. + * + * returns: - E1000_SUCCESS . + ****************************************************************************/ +int32_t +e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) +{ + uint8_t i; + uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; + uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; + + length = (length >> 2); + offset = (offset >> 2); + + for (i = 0; i < length; i++) { + *((uint32_t *) buffer + i) = + E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); + } + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. + * It busy waits in case of previous command is not completed. + * + * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or + * timeout + * - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_enable_host_if(struct e1000_hw * hw) +{ + uint32_t hicr; + uint8_t i; + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + return E1000_SUCCESS; +} + +/***************************************************************************** + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient way. + * Also fills up the sum of the buffer in *buffer parameter. + * + * returns - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, + uint16_t length, uint16_t offset, uint8_t *sum) +{ + uint8_t *tmp; + uint8_t *bufptr = buffer; + uint32_t data = 0; + uint16_t remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { + return -E1000_ERR_PARAM; + } + + tmp = (uint8_t *)&data; + prev_bytes = offset & 0x3; + offset &= 0xFFFC; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); + for (j = prev_bytes; j < sizeof(uint32_t); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(uint32_t); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(uint32_t); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); + } + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function writes the command header after does the checksum calculation. + * + * returns - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_write_cmd_header(struct e1000_hw * hw, + struct e1000_host_mng_command_header * hdr) +{ + uint16_t i; + uint8_t sum; + uint8_t *buffer; + + /* Write the whole command header structure which includes sum of + * the buffer */ + + uint16_t length = sizeof(struct e1000_host_mng_command_header); + + sum = hdr->checksum; + hdr->checksum = 0; + + buffer = (uint8_t *) hdr; + i = length; + while (i--) + sum += buffer[i]; + + hdr->checksum = 0 - sum; + + length >>= 2; + /* The device driver writes the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function indicates to ARC that a new command is pending which completes + * one write operation by the driver. + * + * returns - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_write_commit(struct e1000_hw * hw) +{ + uint32_t hicr; + + hicr = E1000_READ_REG(hw, HICR); + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function checks the mode of the firmware. + * + * returns - TRUE when the mode is IAMT or FALSE. + ****************************************************************************/ +boolean_t +e1000_check_mng_mode(struct e1000_hw *hw) +{ + uint32_t fwsm; + + fwsm = E1000_READ_REG(hw, FWSM); + + if (hw->mac_type == e1000_ich8lan) { + if ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) + return TRUE; + } else if ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) + return TRUE; + + return FALSE; +} + + +/***************************************************************************** + * This function writes the dhcp info . + ****************************************************************************/ +int32_t +e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, + uint16_t length) +{ + int32_t ret_val; + struct e1000_host_mng_command_header hdr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val == E1000_SUCCESS) { + ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr), + &(hdr.checksum)); + if (ret_val == E1000_SUCCESS) { + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val == E1000_SUCCESS) + ret_val = e1000_mng_write_commit(hw); + } + } + return ret_val; +} + + +/***************************************************************************** + * This function calculates the checksum. + * + * returns - checksum of buffer contents. + ****************************************************************************/ +uint8_t +e1000_calculate_mng_checksum(char *buffer, uint32_t length) +{ + uint8_t sum = 0; + uint32_t i; + + if (!buffer) + return 0; + + for (i=0; i < length; i++) + sum += buffer[i]; + + return (uint8_t) (0 - sum); +} + +/***************************************************************************** + * This function checks whether tx pkt filtering needs to be enabled or not. + * + * returns - TRUE for packet filtering or FALSE. + ****************************************************************************/ +boolean_t +e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + /* called in init as well as watchdog timer functions */ + + int32_t ret_val, checksum; + boolean_t tx_filter = FALSE; + struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); + uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); + + if (e1000_check_mng_mode(hw)) { + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val == E1000_SUCCESS) { + ret_val = e1000_host_if_read_cookie(hw, buffer); + if (ret_val == E1000_SUCCESS) { + checksum = hdr->checksum; + hdr->checksum = 0; + if ((hdr->signature == E1000_IAMT_SIGNATURE) && + checksum == e1000_calculate_mng_checksum((char *)buffer, + E1000_MNG_DHCP_COOKIE_LENGTH)) { + if (hdr->status & + E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT) + tx_filter = TRUE; + } else + tx_filter = TRUE; + } else + tx_filter = TRUE; + } + } + + hw->tx_pkt_filtering = tx_filter; + return tx_filter; +} + +/****************************************************************************** + * Verifies the hardware needs to allow ARPs to be processed by the host + * + * hw - Struct containing variables accessed by shared code + * + * returns: - TRUE/FALSE + * + *****************************************************************************/ +uint32_t +e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + uint32_t manc; + uint32_t fwsm, factps; + + if (hw->asf_firmware_present) { + manc = E1000_READ_REG(hw, MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return FALSE; + if (e1000_arc_subsystem_valid(hw) == TRUE) { + fwsm = E1000_READ_REG(hw, FWSM); + factps = E1000_READ_REG(hw, FACTPS); + + if (((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) && + (factps & E1000_FACTPS_MNGCG)) + return TRUE; + } else + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return TRUE; + } + return FALSE; +} + +static int32_t +e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t mii_status_reg; + uint16_t i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break; + msec_delay_irq(100); + } + + /* Recommended delay time after link has been lost */ + msec_delay_irq(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) break; + msec_delay_irq(100); + } + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Disables PCI-Express master access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - none. + * + ***************************************************************************/ +void +e1000_set_pci_express_master_disable(struct e1000_hw *hw) +{ + uint32_t ctrl; + + DEBUGFUNC("e1000_set_pci_express_master_disable"); + + if (hw->bus_type != e1000_bus_type_pci_express) + return; + + ctrl = E1000_READ_REG(hw, CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, CTRL, ctrl); +} + +/*************************************************************************** + * + * Enables PCI-Express master access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - none. + * + ***************************************************************************/ +void +e1000_enable_pciex_master(struct e1000_hw *hw) +{ + uint32_t ctrl; + + DEBUGFUNC("e1000_enable_pciex_master"); + + if (hw->bus_type != e1000_bus_type_pci_express) + return; + + ctrl = E1000_READ_REG(hw, CTRL); + ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, CTRL, ctrl); +} + +/******************************************************************************* + * + * Disables PCI-Express master access and verifies there are no pending requests + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't + * caused the master requests to be disabled. + * E1000_SUCCESS master requests disabled. + * + ******************************************************************************/ +int32_t +e1000_disable_pciex_master(struct e1000_hw *hw) +{ + int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ + + DEBUGFUNC("e1000_disable_pciex_master"); + + if (hw->bus_type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + e1000_set_pci_express_master_disable(hw); + + while (timeout) { + if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) + break; + else + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return E1000_SUCCESS; +} + +/******************************************************************************* + * + * Check for EEPROM Auto Read bit done. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + * + ******************************************************************************/ +int32_t +e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + int32_t timeout = AUTO_READ_DONE_TIMEOUT; + + DEBUGFUNC("e1000_get_auto_rd_done"); + + switch (hw->mac_type) { + default: + msec_delay(5); + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_80003es2lan: + case e1000_ich8lan: + while (timeout) { + if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) + break; + else msec_delay(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Auto read by HW from EEPROM has not completed.\n"); + return -E1000_ERR_RESET; + } + break; + } + + /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high. + * Need to wait for PHY configuration completion before accessing NVM + * and PHY. */ + if (hw->mac_type == e1000_82573) + msec_delay(25); + + return E1000_SUCCESS; +} + +/*************************************************************************** + * Checks if the PHY configuration is done + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + * + ***************************************************************************/ +int32_t +e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + int32_t timeout = PHY_CFG_TIMEOUT; + uint32_t cfg_mask = E1000_EEPROM_CFG_DONE; + + DEBUGFUNC("e1000_get_phy_cfg_done"); + + switch (hw->mac_type) { + default: + msec_delay_irq(10); + break; + case e1000_80003es2lan: + /* Separate *_CFG_DONE_* bit for each port */ + if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) + cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; + fallthrough; + case e1000_82571: + case e1000_82572: + while (timeout) { + if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask) + break; + else + msec_delay(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + break; + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Using the combination of SMBI and SWESMBI semaphore bits when resetting + * adapter or Eeprom access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_EEPROM if fail to access EEPROM. + * E1000_SUCCESS at any other case. + * + ***************************************************************************/ +int32_t +e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) +{ + int32_t timeout; + uint32_t swsm; + + DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); + + if (!hw->eeprom_semaphore_present) + return E1000_SUCCESS; + + if (hw->mac_type == e1000_80003es2lan) { + /* Get the SW semaphore. */ + if (e1000_get_software_semaphore(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + } + + /* Get the FW semaphore. */ + timeout = hw->eeprom.word_size + 1; + while (timeout) { + swsm = E1000_READ_REG(hw, SWSM); + swsm |= E1000_SWSM_SWESMBI; + E1000_WRITE_REG(hw, SWSM, swsm); + /* if we managed to set the bit we got the semaphore. */ + swsm = E1000_READ_REG(hw, SWSM); + if (swsm & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + timeout--; + } + + if (!timeout) { + /* Release semaphores */ + e1000_put_hw_eeprom_semaphore(hw); + DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * This function clears HW semaphore bits. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - None. + * + ***************************************************************************/ +void +e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) +{ + uint32_t swsm; + + DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); + + if (!hw->eeprom_semaphore_present) + return; + + swsm = E1000_READ_REG(hw, SWSM); + if (hw->mac_type == e1000_80003es2lan) { + /* Release both semaphores. */ + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + } else + swsm &= ~(E1000_SWSM_SWESMBI); + E1000_WRITE_REG(hw, SWSM, swsm); +} + +/*************************************************************************** + * + * Obtaining software semaphore bit (SMBI) before resetting PHY. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_RESET if fail to obtain semaphore. + * E1000_SUCCESS at any other case. + * + ***************************************************************************/ +int32_t +e1000_get_software_semaphore(struct e1000_hw *hw) +{ + int32_t timeout = hw->eeprom.word_size + 1; + uint32_t swsm; + + DEBUGFUNC("e1000_get_software_semaphore"); + + if (hw->mac_type != e1000_80003es2lan) + return E1000_SUCCESS; + + while (timeout) { + swsm = E1000_READ_REG(hw, SWSM); + /* If SMBI bit cleared, it is now set and we hold the semaphore */ + if (!(swsm & E1000_SWSM_SMBI)) + break; + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Release semaphore bit (SMBI). + * + * hw: Struct containing variables accessed by shared code + * + ***************************************************************************/ +void +e1000_release_software_semaphore(struct e1000_hw *hw) +{ + uint32_t swsm; + + DEBUGFUNC("e1000_release_software_semaphore"); + + if (hw->mac_type != e1000_80003es2lan) + return; + + swsm = E1000_READ_REG(hw, SWSM); + /* Release the SW semaphores.*/ + swsm &= ~E1000_SWSM_SMBI; + E1000_WRITE_REG(hw, SWSM, swsm); +} + +/****************************************************************************** + * Checks if PHY reset is blocked due to SOL/IDER session, for example. + * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to + * the caller to figure out how to deal with it. + * + * hw - Struct containing variables accessed by shared code + * + * returns: - E1000_BLK_PHY_RESET + * E1000_SUCCESS + * + *****************************************************************************/ +int32_t +e1000_check_phy_reset_block(struct e1000_hw *hw) +{ + uint32_t manc = 0; + uint32_t fwsm = 0; + + if (hw->mac_type == e1000_ich8lan) { + fwsm = E1000_READ_REG(hw, FWSM); + return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS + : E1000_BLK_PHY_RESET; + } + + if (hw->mac_type > e1000_82547_rev_2) + manc = E1000_READ_REG(hw, MANC); + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +uint8_t +e1000_arc_subsystem_valid(struct e1000_hw *hw) +{ + uint32_t fwsm; + + /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC + * may not be provided a DMA clock when no manageability features are + * enabled. We do not want to perform any reads/writes to these registers + * if this is the case. We read FWSM to determine the manageability mode. + */ + switch (hw->mac_type) { + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_80003es2lan: + fwsm = E1000_READ_REG(hw, FWSM); + if ((fwsm & E1000_FWSM_MODE_MASK) != 0) + return TRUE; + break; + case e1000_ich8lan: + return TRUE; + default: + break; + } + return FALSE; +} + + +/****************************************************************************** + * Configure PCI-Ex no-snoop + * + * hw - Struct containing variables accessed by shared code. + * no_snoop - Bitmap of no-snoop events. + * + * returns: E1000_SUCCESS + * + *****************************************************************************/ +int32_t +e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) +{ + uint32_t gcr_reg = 0; + + DEBUGFUNC("e1000_set_pci_ex_no_snoop"); + + if (hw->bus_type == e1000_bus_type_unknown) + e1000_get_bus_info(hw); + + if (hw->bus_type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + if (no_snoop) { + gcr_reg = E1000_READ_REG(hw, GCR); + gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); + gcr_reg |= no_snoop; + E1000_WRITE_REG(hw, GCR, gcr_reg); + } + if (hw->mac_type == e1000_ich8lan) { + uint32_t ctrl_ext; + + E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); + + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Get software semaphore FLAG bit (SWFLAG). + * SWFLAG is used to synchronize the access to all shared resource between + * SW, FW and HW. + * + * hw: Struct containing variables accessed by shared code + * + ***************************************************************************/ +int32_t +e1000_get_software_flag(struct e1000_hw *hw) +{ + int32_t timeout = PHY_CFG_TIMEOUT; + uint32_t extcnf_ctrl; + + DEBUGFUNC("e1000_get_software_flag"); + + if (hw->mac_type == e1000_ich8lan) { + while (timeout) { + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); + + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("FW or HW locks the resource too long.\n"); + return -E1000_ERR_CONFIG; + } + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Release software semaphore FLAG bit (SWFLAG). + * SWFLAG is used to synchronize the access to all shared resource between + * SW, FW and HW. + * + * hw: Struct containing variables accessed by shared code + * + ***************************************************************************/ +void +e1000_release_software_flag(struct e1000_hw *hw) +{ + uint32_t extcnf_ctrl; + + DEBUGFUNC("e1000_release_software_flag"); + + if (hw->mac_type == e1000_ich8lan) { + extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); + } + + return; +} + +/*************************************************************************** + * + * Disable dynamic power down mode in ife PHY. + * It can be used to workaround band-gap problem. + * + * hw: Struct containing variables accessed by shared code + * + ***************************************************************************/ +int32_t +e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) +{ + uint16_t phy_data; + int32_t ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_ife_disable_dynamic_power_down"); + + if (hw->phy_type == e1000_phy_ife) { + ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN; + ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data); + } + + return ret_val; +} + +/*************************************************************************** + * + * Enable dynamic power down mode in ife PHY. + * It can be used to workaround band-gap problem. + * + * hw: Struct containing variables accessed by shared code + * + ***************************************************************************/ +int32_t +e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) +{ + uint16_t phy_data; + int32_t ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_ife_enable_dynamic_power_down"); + + if (hw->phy_type == e1000_phy_ife) { + ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN; + ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data); + } + + return ret_val; +} + +/****************************************************************************** + * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access + * register. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, + uint16_t *data) +{ + int32_t error = E1000_SUCCESS; + uint32_t flash_bank = 0; + uint32_t act_offset = 0; + uint32_t bank_offset = 0; + uint16_t word = 0; + uint16_t i = 0; + + /* We need to know which is the valid flash bank. In the event + * that we didn't allocate eeprom_shadow_ram, we may not be + * managing flash_bank. So it cannot be trusted and needs + * to be updated with each read. + */ + /* Value of bit 22 corresponds to the flash bank we're on. */ + flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; + + /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ + bank_offset = flash_bank * (hw->flash_bank_size * 2); + + error = e1000_get_software_flag(hw); + if (error != E1000_SUCCESS) + return error; + + for (i = 0; i < words; i++) { + if (hw->eeprom_shadow_ram != NULL && + hw->eeprom_shadow_ram[offset+i].modified == TRUE) { + data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word; + } else { + /* The NVM part needs a byte offset, hence * 2 */ + act_offset = bank_offset + ((offset + i) * 2); + error = e1000_read_ich8_word(hw, act_offset, &word); + if (error != E1000_SUCCESS) + break; + data[i] = word; + } + } + + e1000_release_software_flag(hw); + + return error; +} + +/****************************************************************************** + * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access + * register. Actually, writes are written to the shadow ram cache in the hw + * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to + * the NVM, which occurs when the NVM checksum is updated. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to write + * words - number of words to write + * data - words to write to the EEPROM + *****************************************************************************/ +int32_t +e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, + uint16_t *data) +{ + uint32_t i = 0; + int32_t error = E1000_SUCCESS; + + error = e1000_get_software_flag(hw); + if (error != E1000_SUCCESS) + return error; + + /* A driver can write to the NVM only if it has eeprom_shadow_ram + * allocated. Subsequent reads to the modified words are read from + * this cached structure as well. Writes will only go into this + * cached structure unless it's followed by a call to + * e1000_update_eeprom_checksum() where it will commit the changes + * and clear the "modified" field. + */ + if (hw->eeprom_shadow_ram != NULL) { + for (i = 0; i < words; i++) { + if ((offset + i) < E1000_SHADOW_RAM_WORDS) { + hw->eeprom_shadow_ram[offset+i].modified = TRUE; + hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i]; + } else { + error = -E1000_ERR_EEPROM; + break; + } + } + } else { + /* Drivers have the option to not allocate eeprom_shadow_ram as long + * as they don't perform any NVM writes. An attempt in doing so + * will result in this error. + */ + error = -E1000_ERR_EEPROM; + } + + e1000_release_software_flag(hw); + + return error; +} + +/****************************************************************************** + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + * + * hw - The pointer to the hw structure + ****************************************************************************/ +int32_t +e1000_ich8_cycle_init(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + int32_t error = E1000_ERR_EEPROM; + int32_t i = 0; + + DEBUGFUNC("e1000_ich8_cycle_init"); + + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + + /* May be check the Flash Des Valid bit in Hw status */ + if (hsfsts.hsf_status.fldesvalid == 0) { + DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used."); + return error; + } + + /* Clear FCERR in Hw status by writing 1 */ + /* Clear DAEL in Hw status by writing a 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress bit to check + * against, in order to start a new cycle or FDONE bit should be changed + * in the hardware so that it is 1 after harware reset, which can then be + * used as an indication whether a cycle is in progress or has been + * completed .. we should also have some software semaphore mechanism to + * guard FDONE or the cycle in progress bit so that two threads access to + * those bits can be sequentiallized or a way so that 2 threads dont + * start the cycle at the same time */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* There is no cycle running at present, so we can start a cycle */ + /* Begin by setting Flash Cycle Done. */ + hsfsts.hsf_status.flcdone = 1; + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); + error = E1000_SUCCESS; + } else { + /* otherwise poll for sometime so the current cycle has a chance + * to end before giving up. */ + for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) { + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + error = E1000_SUCCESS; + break; + } + usec_delay(1); + } + if (error == E1000_SUCCESS) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. */ + hsfsts.hsf_status.flcdone = 1; + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); + } else { + DEBUGOUT("Flash controller busy, cannot get access"); + } + } + return error; +} + +/****************************************************************************** + * This function starts a flash cycle and waits for its completion + * + * hw - The pointer to the hw structure + ****************************************************************************/ +int32_t +e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + int32_t error = E1000_ERR_EEPROM; + uint32_t i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + usec_delay(1); + i++; + } while (i < timeout); + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) { + error = E1000_SUCCESS; + } + return error; +} + +/****************************************************************************** + * Reads a byte or word from the NVM using the ICH8 flash access registers. + * + * hw - The pointer to the hw structure + * index - The index of the byte or word to read. + * size - Size of data to read, 1=byte 2=word + * data - Pointer to the word to store the value read. + *****************************************************************************/ +int32_t +e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, + uint32_t size, uint16_t* data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + uint32_t flash_linear_address; + uint32_t flash_data = 0; + int32_t error = -E1000_ERR_EEPROM; + int32_t count = 0; + + DEBUGFUNC("e1000_read_ich8_data"); + + if (size < 1 || size > 2 || data == 0x0 || + index > ICH8_FLASH_LINEAR_ADDR_MASK) + return error; + + flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + + hw->flash_base_addr; + + do { + usec_delay(1); + /* Steps */ + error = e1000_ich8_cycle_init(hw); + if (error != E1000_SUCCESS) + break; + + hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ; + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of index into Flash Linear address field in + * Flash Address */ + /* TODO: TBD maybe check the index against the size of flash */ + + E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); + + error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it and try the whole + * sequence a few more times, else read in (shift in) the Flash Data0, + * the order is least significant byte first msb to lsb */ + if (error == E1000_SUCCESS) { + flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0); + if (size == 1) { + *data = (uint8_t)(flash_data & 0x000000FF); + } else if (size == 2) { + *data = (uint16_t)(flash_data & 0x0000FFFF); + } + break; + } else { + /* If we've gotten here, then things are probably completely hosed, + * but if the error condition is detected, it won't hurt to give + * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + DEBUGOUT("Timeout error - flash cycle did not complete."); + break; + } + } + } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); + + return error; +} + +/****************************************************************************** + * Writes One /two bytes to the NVM using the ICH8 flash access registers. + * + * hw - The pointer to the hw structure + * index - The index of the byte/word to read. + * size - Size of data to read, 1=byte 2=word + * data - The byte(s) to write to the NVM. + *****************************************************************************/ +int32_t +e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, + uint16_t data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + uint32_t flash_linear_address; + uint32_t flash_data = 0; + int32_t error = -E1000_ERR_EEPROM; + int32_t count = 0; + + DEBUGFUNC("e1000_write_ich8_data"); + + if (size < 1 || size > 2 || data > size * 0xff || + index > ICH8_FLASH_LINEAR_ADDR_MASK) + return error; + + flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + + hw->flash_base_addr; + + do { + usec_delay(1); + /* Steps */ + error = e1000_ich8_cycle_init(hw); + if (error != E1000_SUCCESS) + break; + + hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE; + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of index into Flash Linear address field in + * Flash Address */ + E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); + + if (size == 1) + flash_data = (uint32_t)data & 0x00FF; + else + flash_data = (uint32_t)data; + + E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it and try the whole + * sequence a few more times else done */ + error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); + if (error == E1000_SUCCESS) { + break; + } else { + /* If we're here, then things are most likely completely hosed, + * but if the error condition is detected, it won't hurt to give + * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + DEBUGOUT("Timeout error - flash cycle did not complete."); + break; + } + } + } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); + + return error; +} + +/****************************************************************************** + * Reads a single byte from the NVM using the ICH8 flash access registers. + * + * hw - pointer to e1000_hw structure + * index - The index of the byte to read. + * data - Pointer to a byte to store the value read. + *****************************************************************************/ +int32_t +e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) +{ + int32_t status = E1000_SUCCESS; + uint16_t word = 0; + + status = e1000_read_ich8_data(hw, index, 1, &word); + if (status == E1000_SUCCESS) { + *data = (uint8_t)word; + } + + return status; +} + +/****************************************************************************** + * Writes a single byte to the NVM using the ICH8 flash access registers. + * Performs verification by reading back the value and then going through + * a retry algorithm before giving up. + * + * hw - pointer to e1000_hw structure + * index - The index of the byte to write. + * byte - The byte to write to the NVM. + *****************************************************************************/ +int32_t +e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) +{ + int32_t error = E1000_SUCCESS; + int32_t program_retries; + uint8_t temp_byte = 0; + + e1000_write_ich8_byte(hw, index, byte); + usec_delay(100); + + for (program_retries = 0; program_retries < 100; program_retries++) { + e1000_read_ich8_byte(hw, index, &temp_byte); + if (temp_byte == byte) + break; + usec_delay(10); + e1000_write_ich8_byte(hw, index, byte); + usec_delay(100); + } + if (program_retries == 100) + error = E1000_ERR_EEPROM; + + return error; +} + +/****************************************************************************** + * Writes a single byte to the NVM using the ICH8 flash access registers. + * + * hw - pointer to e1000_hw structure + * index - The index of the byte to read. + * data - The byte to write to the NVM. + *****************************************************************************/ +int32_t +e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) +{ + int32_t status = E1000_SUCCESS; + uint16_t word = (uint16_t)data; + + status = e1000_write_ich8_data(hw, index, 1, word); + + return status; +} + +/****************************************************************************** + * Reads a word from the NVM using the ICH8 flash access registers. + * + * hw - pointer to e1000_hw structure + * index - The starting byte index of the word to read. + * data - Pointer to a word to store the value read. + *****************************************************************************/ +int32_t +e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) +{ + int32_t status = E1000_SUCCESS; + status = e1000_read_ich8_data(hw, index, 2, data); + return status; +} + +/****************************************************************************** + * Writes a word to the NVM using the ICH8 flash access registers. + * + * hw - pointer to e1000_hw structure + * index - The starting byte index of the word to read. + * data - The word to write to the NVM. + *****************************************************************************/ +int32_t +e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) +{ + int32_t status = E1000_SUCCESS; + status = e1000_write_ich8_data(hw, index, 2, data); + return status; +} + +/****************************************************************************** + * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. + * segment N is 4096 * N + flash_reg_addr. + * + * hw - pointer to e1000_hw structure + * segment - 0 for first segment, 1 for second segment, etc. + *****************************************************************************/ +int32_t +e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + uint32_t flash_linear_address; + int32_t count = 0; + int32_t error = E1000_ERR_EEPROM; + int32_t iteration, seg_size; + int32_t sector_size; + int32_t j = 0; + int32_t error_flag = 0; + + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */ + /* 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector can be + * calculated as = segment * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = segment * 4096 + * 10: Error condition + * 11: The Hw sector size is much bigger than the size asked to + * erase...error condition */ + if (hsfsts.hsf_status.berasesz == 0x0) { + /* Hw sector size 256 */ + sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256; + iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256; + } else if (hsfsts.hsf_status.berasesz == 0x1) { + sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K; + iteration = 1; + } else if (hsfsts.hsf_status.berasesz == 0x3) { + sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K; + iteration = 1; + } else { + return error; + } + + for (j = 0; j < iteration ; j++) { + do { + count++; + /* Steps */ + error = e1000_ich8_cycle_init(hw); + if (error != E1000_SUCCESS) { + error_flag = 1; + break; + } + + /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash + * Control */ + hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE; + E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of an index within the block into Flash + * Linear address field in Flash Address. This probably needs to + * be calculated here based off the on-chip segment size and the + * software segment size assumed (4K) */ + /* TBD */ + flash_linear_address = segment * sector_size + j * seg_size; + flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK; + flash_linear_address += hw->flash_base_addr; + + E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); + + error = e1000_ich8_flash_cycle(hw, 1000000); + /* Check if FCERR is set to 1. If 1, clear it and try the whole + * sequence a few more times else Done */ + if (error == E1000_SUCCESS) { + break; + } else { + hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* repeat for some time before giving up */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + error_flag = 1; + break; + } + } + } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag); + if (error_flag == 1) + break; + } + if (error_flag != 1) + error = E1000_SUCCESS; + return error; +} + +/****************************************************************************** + * + * Reverse duplex setting without breaking the link. + * + * hw: Struct containing variables accessed by shared code + * + *****************************************************************************/ +int32_t +e1000_duplex_reversal(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + + if (hw->phy_type != e1000_phy_igp_3) + return E1000_SUCCESS; + + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data ^= MII_CR_FULL_DUPLEX; + + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET; + ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data); + + return ret_val; +} + +int32_t +e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, + uint32_t cnf_base_addr, uint32_t cnf_size) +{ + uint32_t ret_val = E1000_SUCCESS; + uint16_t word_addr, reg_data, reg_addr; + uint16_t i; + + /* cnf_base_addr is in DWORD */ + word_addr = (uint16_t)(cnf_base_addr << 1); + + /* cnf_size is returned in size of dwords */ + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, ®_addr); + if (ret_val) + return ret_val; + + ret_val = e1000_get_software_flag(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; + + ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data); + + e1000_release_software_flag(hw); + } + + return ret_val; +} + + +int32_t +e1000_init_lcd_from_nvm(struct e1000_hw *hw) +{ + uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; + + if (hw->phy_type != e1000_phy_igp_3) + return E1000_SUCCESS; + + /* Check if SW needs configure the PHY */ + reg_data = E1000_READ_REG(hw, FEXTNVM); + if (!(reg_data & FEXTNVM_SW_CONFIG)) + return E1000_SUCCESS; + + /* Wait for basic configuration completes before proceeding*/ + loop = 0; + do { + reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE; + usec_delay(100); + loop++; + } while ((!reg_data) && (loop < 50)); + + /* Clear the Init Done bit for the next init event */ + reg_data = E1000_READ_REG(hw, STATUS); + reg_data &= ~E1000_STATUS_LAN_INIT_DONE; + E1000_WRITE_REG(hw, STATUS, reg_data); + + /* Make sure HW does not configure LCD from PHY extended configuration + before SW configuration */ + reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); + if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { + reg_data = E1000_READ_REG(hw, EXTCNF_SIZE); + cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; + cnf_size >>= 16; + if (cnf_size) { + reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); + cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; + /* cnf_base_addr is in DWORD */ + cnf_base_addr >>= 16; + + /* Configure LCD from extended configuration region. */ + ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr, + cnf_size); + if (ret_val) + return ret_val; + } + } + + return E1000_SUCCESS; +} + + + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h new file mode 100644 index 0000000..93cbe37 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h @@ -0,0 +1,3454 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controlers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_82571, + e1000_82572, + e1000_82573, + e1000_80003es2lan, + e1000_ich8lan, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_ich8, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + e1000_fc_none = 0, + e1000_fc_rx_pause = 1, + e1000_fc_tx_pause = 2, + e1000_fc_full = 3, + e1000_fc_default = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + uint16_t eeprom_word; + boolean_t modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_pciex_1, + e1000_bus_width_pciex_2, + e1000_bus_width_pciex_4, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + uint32_t idle_errors; + uint32_t receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + uint16_t word_size; + uint16_t opcode_bits; + uint16_t address_bits; + uint16_t delay_usec; + uint16_t page_size; + boolean_t use_eerd; + boolean_t use_eewr; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + + + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 + +/* Function prototypes */ +/* Initialization */ +int32_t e1000_reset_hw(struct e1000_hw *hw); +int32_t e1000_init_hw(struct e1000_hw *hw); +int32_t e1000_id_led_init(struct e1000_hw * hw); +int32_t e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +int32_t e1000_setup_link(struct e1000_hw *hw); +int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw); +int32_t e1000_check_for_link(struct e1000_hw *hw); +int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t * speed, uint16_t * duplex); +int32_t e1000_wait_autoneg(struct e1000_hw *hw); +int32_t e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); +int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); +int32_t e1000_phy_hw_reset(struct e1000_hw *hw); +int32_t e1000_phy_reset(struct e1000_hw *hw); +void e1000_phy_powerdown_workaround(struct e1000_hw *hw); +int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); +int32_t e1000_duplex_reversal(struct e1000_hw *hw); +int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); +int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); +int32_t e1000_detect_gig_phy(struct e1000_hw *hw); +int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length); +int32_t e1000_check_polarity(struct e1000_hw *hw, uint16_t *polarity); +int32_t e1000_check_downshift(struct e1000_hw *hw); +int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); +int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); +int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); + +/* EEPROM Functions */ +int32_t e1000_init_eeprom_params(struct e1000_hw *hw); +boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); +int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); +int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); +int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); + +/* MNG HOST IF functions */ +uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + uint8_t command_id; + uint8_t checksum; + uint16_t reserved1; + uint16_t reserved2; + uint16_t command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ +}; +#ifdef E1000_BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie{ + uint32_t signature; + uint16_t vlan_id; + uint8_t reserved0; + uint8_t status; + uint32_t reserved1; + uint8_t checksum; + uint8_t reserved3; + uint16_t reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie{ + uint32_t signature; + uint8_t status; + uint8_t reserved0; + uint16_t vlan_id; + uint32_t reserved1; + uint16_t reserved2; + uint8_t reserved3; + uint8_t checksum; +}; +#endif + +int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, + uint16_t length); +boolean_t e1000_check_mng_mode(struct e1000_hw *hw); +boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); +int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, + uint16_t length, uint16_t offset, uint8_t *sum); +int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, + struct e1000_host_mng_command_header* hdr); + +int32_t e1000_mng_write_commit(struct e1000_hw *hw); + +int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); +int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); +int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); +int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); +int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); +int32_t e1000_read_mac_addr(struct e1000_hw * hw); +int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); +void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); +void e1000_release_software_flag(struct e1000_hw *hw); +int32_t e1000_get_software_flag(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +void e1000_init_rx_addrs(struct e1000_hw *hw); +void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); +uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); +void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); +void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value); +void e1000_clear_vfta(struct e1000_hw *hw); + +/* LED functions */ +int32_t e1000_setup_led(struct e1000_hw *hw); +int32_t e1000_cleanup_led(struct e1000_hw *hw); +int32_t e1000_led_on(struct e1000_hw *hw); +int32_t e1000_led_off(struct e1000_hw *hw); +int32_t e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_clear_hw_cntrs(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); +void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); +/* Port I/O is only supported on 82544 and newer */ +uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); +uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset); +void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); +void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); +int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); +int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); +int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active); +void e1000_set_pci_express_master_disable(struct e1000_hw *hw); +void e1000_enable_pciex_master(struct e1000_hw *hw); +int32_t e1000_disable_pciex_master(struct e1000_hw *hw); +int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); +int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); +int32_t e1000_get_software_semaphore(struct e1000_hw *hw); +void e1000_release_software_semaphore(struct e1000_hw *hw); +int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); +int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); +void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); +int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); +uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); +int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); + +int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t *data); +int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t byte); +int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, + uint8_t byte); +int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, + uint16_t *data); +int32_t e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, + uint16_t word); +int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, + uint32_t size, uint16_t *data); +int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, + uint32_t size, uint16_t data); +int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, + uint16_t words, uint16_t *data); +int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment); +int32_t e1000_ich8_cycle_init(struct e1000_hw *hw); +int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout); +int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +int32_t e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw); +int32_t e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D + + +#define NODE_ADDRESS_SIZE 6 +#define ETH_LENGTH_OF_ADDRESS 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* With FCS */ +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MAXIMUM_ETHERNET_PACKET_SIZE \ + (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Additional interrupts need to be handled for e1000_ich8lan: + DSW = The FW changed the status of the DISSW bit in FWSM + PHYINT = The LAN connected device generates an interrupt + EPRST = Manageability reset event */ +#define IMS_ICH8LAN_ENABLE_MASK (\ + E1000_IMS_DSW | \ + E1000_IMS_PHYINT | \ + E1000_IMS_EPRST) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAR_ENTRIES_ICH8LAN 7 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + uint64_t buffer_addr; /* Address of the descriptor's data buffer */ + uint16_t length; /* Length of data DMAed into data buffer */ + uint16_t csum; /* Packet checksum */ + uint8_t status; /* Descriptor status */ + uint8_t errors; /* Descriptor Errors */ + uint16_t special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + uint64_t buffer_addr; + uint64_t reserved; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length; + uint16_t vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + uint64_t buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length0; /* length of buffer 0 */ + uint16_t vlan; /* VLAN tag */ + } middle; + struct { + uint16_t header_status; + uint16_t length[3]; /* length of buffers 1-3 */ + } upper; + uint64_t reserved; + } wb; /* writeback */ +}; + +/* Receive Decriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + uint64_t buffer_addr; /* Address of the descriptor's data buffer */ + union { + uint32_t data; + struct { + uint16_t length; /* Data buffer length */ + uint8_t cso; /* Checksum offset */ + uint8_t cmd; /* Descriptor control */ + } flags; + } lower; + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t css; /* Checksum start */ + uint16_t special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + uint32_t ip_config; + struct { + uint8_t ipcss; /* IP checksum start */ + uint8_t ipcso; /* IP checksum offset */ + uint16_t ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + uint32_t tcp_config; + struct { + uint8_t tucss; /* TCP checksum start */ + uint8_t tucso; /* TCP checksum offset */ + uint16_t tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + uint32_t cmd_and_length; /* */ + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t hdr_len; /* Header length */ + uint16_t mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + uint64_t buffer_addr; /* Address of the descriptor's buffer address */ + union { + uint32_t data; + struct { + uint16_t length; /* Data buffer length */ + uint8_t typ_len_ext; /* */ + uint8_t cmd; /* */ + } flags; + } lower; + union { + uint32_t data; + struct { + uint8_t status; /* Descriptor status */ + uint8_t popts; /* Packet Options */ + uint16_t special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +#define E1000_NUM_UNICAST_ICH8LAN 7 +#define E1000_MC_TBL_SIZE_ICH8LAN 32 + + +/* Receive Address Register */ +struct e1000_rar { + volatile uint32_t low; /* receive address low */ + volatile uint32_t high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 +#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile uint32_t ipv4_addr; /* IP Address (RW) */ + volatile uint32_t reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP4AT_SIZE_ICH8LAN 3 +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile uint8_t ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile uint32_t length; /* Flexible Filter Length (RW) */ + volatile uint32_t reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile uint32_t mask; /* Flexible Filter Mask (RW) */ + volatile uint32_t reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile uint32_t value; /* Flexible Filter Value (RW) */ + volatile uint32_t reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Inteface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + uint64_t crcerrs; + uint64_t algnerrc; + uint64_t symerrs; + uint64_t rxerrc; + uint64_t mpc; + uint64_t scc; + uint64_t ecol; + uint64_t mcc; + uint64_t latecol; + uint64_t colc; + uint64_t dc; + uint64_t tncrs; + uint64_t sec; + uint64_t cexterr; + uint64_t rlec; + uint64_t xonrxc; + uint64_t xontxc; + uint64_t xoffrxc; + uint64_t xofftxc; + uint64_t fcruc; + uint64_t prc64; + uint64_t prc127; + uint64_t prc255; + uint64_t prc511; + uint64_t prc1023; + uint64_t prc1522; + uint64_t gprc; + uint64_t bprc; + uint64_t mprc; + uint64_t gptc; + uint64_t gorcl; + uint64_t gorch; + uint64_t gotcl; + uint64_t gotch; + uint64_t rnbc; + uint64_t ruc; + uint64_t rfc; + uint64_t roc; + uint64_t rjc; + uint64_t mgprc; + uint64_t mgpdc; + uint64_t mgptc; + uint64_t torl; + uint64_t torh; + uint64_t totl; + uint64_t toth; + uint64_t tpr; + uint64_t tpt; + uint64_t ptc64; + uint64_t ptc127; + uint64_t ptc255; + uint64_t ptc511; + uint64_t ptc1023; + uint64_t ptc1522; + uint64_t mptc; + uint64_t bptc; + uint64_t tsctc; + uint64_t tsctfc; + uint64_t iac; + uint64_t icrxptc; + uint64_t icrxatc; + uint64_t ictxptc; + uint64_t ictxatc; + uint64_t ictxqec; + uint64_t ictxqmtc; + uint64_t icrxdmtc; + uint64_t icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + uint8_t *hw_addr; + uint8_t *flash_address; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + uint32_t phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + uint32_t flash_bank_size; + uint32_t flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + uint32_t asf_firmware_present; + uint32_t eeprom_semaphore_present; + uint32_t swfw_sync_present; + uint32_t swfwhw_semaphore_present; + unsigned long io_base; + uint32_t phy_id; + uint32_t phy_revision; + uint32_t phy_addr; + uint32_t original_fc; + uint32_t txcw; + uint32_t autoneg_failed; + uint32_t max_frame_size; + uint32_t min_frame_size; + uint32_t mc_filter_type; + uint32_t num_mc_addrs; + uint32_t collision_delta; + uint32_t tx_packet_delta; + uint32_t ledctl_default; + uint32_t ledctl_mode1; + uint32_t ledctl_mode2; + boolean_t tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + uint16_t phy_spd_default; + uint16_t autoneg_advertised; + uint16_t pci_cmd_word; + uint16_t fc_high_water; + uint16_t fc_low_water; + uint16_t fc_pause_time; + uint16_t current_ifs_val; + uint16_t ifs_min_val; + uint16_t ifs_max_val; + uint16_t ifs_step_size; + uint16_t ifs_ratio; + uint16_t device_id; + uint16_t vendor_id; + uint16_t subsystem_id; + uint16_t subsystem_vendor_id; + uint8_t revision_id; + uint8_t autoneg; + uint8_t mdix; + uint8_t forced_speed_duplex; + uint8_t wait_autoneg_complete; + uint8_t dma_fairness; + uint8_t mac_addr[NODE_ADDRESS_SIZE]; + uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; + boolean_t disable_polarity_correction; + boolean_t speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + boolean_t get_link_status; + boolean_t serdes_link_down; + boolean_t tbi_compatibility_en; + boolean_t tbi_compatibility_on; + boolean_t laa_is_present; + boolean_t phy_reset_disable; + boolean_t fc_send_xon; + boolean_t fc_strict_ieee; + boolean_t report_tx_early; + boolean_t adaptive_ifs; + boolean_t ifs_params_forced; + boolean_t in_ifs_mode; + boolean_t mng_reg_access_disabled; + boolean_t leave_av_bit_off; + boolean_t kmrn_lock_loss_workaround_disabled; +}; + + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to intrepret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH8_NVM_SIG_WORD 0x13 +#define E1000_ICH8_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x000000FF /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x0000FF00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x00FF0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Inteface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + uint8_t command_id; + uint8_t command_length; + uint8_t command_options; /* I/F bits for command, status for return */ + uint8_t checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_RESERVED_82573 0xF746 +#define ID_LED_DEFAULT_82573 0x1811 +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009 +#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008 +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((uint32_t)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = TRUE; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = FALSE; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_SPEC_STATUS \ + GG82563_REG(0, 17) /* PHY Specific Status */ +#define GG82563_PHY_INT_ENABLE \ + GG82563_REG(0, 18) /* Interrupt Enable */ +#define GG82563_PHY_SPEC_STATUS_2 \ + GG82563_REG(0, 19) /* PHY Specific Status 2 */ +#define GG82563_PHY_RX_ERR_CNTR \ + GG82563_REG(0, 21) /* Receive Error Counter */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ +#define GG82563_PHY_TEST_CLK_CTRL \ + GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL_2 \ + GG82563_REG(2, 26) /* MAC Specific Control 2 */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PORT_RESET \ + GG82563_REG(193, 17) /* Port Reset */ +#define GG82563_PHY_REVISION_ID \ + GG82563_REG(193, 18) /* Revision ID */ +#define GG82563_PHY_DEVICE_ID \ + GG82563_REG(193, 19) /* Device ID */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ +#define GG82563_PHY_RATE_ADAPT_CTRL \ + GG82563_REG(193, 25) /* Rate Adaptation Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ + GG82563_REG(194, 16) /* FIFO's Control/Status */ +#define GG82563_PHY_KMRN_CTRL \ + GG82563_REG(194, 17) /* Control */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ +#define GG82563_PHY_KMRN_DIAGNOSTIC \ + GG82563_REG(194, 19) /* Diagnostic */ +#define GG82563_PHY_ACK_TIMEOUTS \ + GG82563_REG(194, 20) /* Acknowledge Timeouts */ +#define GG82563_PHY_ADV_ABILITY \ + GG82563_REG(194, 21) /* Advertised Ability */ +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ + GG82563_REG(194, 23) /* Link Partner Advertised Ability */ +#define GG82563_PHY_ADV_NEXT_PAGE \ + GG82563_REG(194, 24) /* Advertised Next Page */ +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ + GG82563_REG(194, 25) /* Link Partner Advertised Next page */ +#define GG82563_PHY_KMRN_MISC \ + GG82563_REG(194, 26) /* Misc. */ + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */ +#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */ +#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */ +#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */ +#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300 +#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */ +#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */ +#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */ +#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */ +#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */ +#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000 +#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12 + +/* PHY Specific Status Register (Page 0, Register 17) */ +#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */ +#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */ +#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */ +#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */ +#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */ +#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */ +#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */ +#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */ +#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */ +#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */ +#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */ +#define GG82563_PSSR_SPEED_MASK 0xC000 +#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */ +#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */ +#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */ + +/* PHY Specific Status Register 2 (Page 0, Register 19) */ +#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */ +#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */ +#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */ +#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */ +#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */ +#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */ +#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */ +#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */ +#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */ +#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */ +#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */ +#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */ +#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */ +#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C +#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */ +#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */ +#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */ +#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */ +#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000 +#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */ +#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006 +#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M; + 1 = 50-80M; + 2 = 80-110M; + 3 = 110-140M; + 4 = >140M */ + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */ +#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */ +#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080 +#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400 +#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Power Management Control Register (Page 193, Register 20) */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */ +#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */ +#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */ +#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */ +#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */ +#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */ +#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */ +#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */ +#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300 +#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */ +#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */ +#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */ +#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */ + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */ + + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define L1LXT971A_PHY_ID 0x001378E0 +#define GG82563_E_PHY_ID 0x01410CA0 + + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorthm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH8_FLASH_COMMAND_TIMEOUT 500 /* 500 ms , should be adjusted */ +#define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles , should be adjusted */ +#define ICH8_FLASH_SEG_SIZE_256 256 +#define ICH8_FLASH_SEG_SIZE_4K 4096 +#define ICH8_FLASH_SEG_SIZE_64K 65536 + +#define ICH8_CYCLE_READ 0x0 +#define ICH8_CYCLE_RESERVED 0x1 +#define ICH8_CYCLE_WRITE 0x2 +#define ICH8_CYCLE_ERASE 0x3 + +#define ICH8_FLASH_GFPREG 0x0000 +#define ICH8_FLASH_HSFSTS 0x0004 +#define ICH8_FLASH_HSFCTL 0x0006 +#define ICH8_FLASH_FADDR 0x0008 +#define ICH8_FLASH_FDATA0 0x0010 +#define ICH8_FLASH_FRACC 0x0050 +#define ICH8_FLASH_FREG0 0x0054 +#define ICH8_FLASH_FREG1 0x0058 +#define ICH8_FLASH_FREG2 0x005C +#define ICH8_FLASH_FREG3 0x0060 +#define ICH8_FLASH_FPR0 0x0074 +#define ICH8_FLASH_FPR1 0x0078 +#define ICH8_FLASH_SSFSTS 0x0090 +#define ICH8_FLASH_SSFCTL 0x0092 +#define ICH8_FLASH_PREOP 0x0094 +#define ICH8_FLASH_OPTYPE 0x0096 +#define ICH8_FLASH_OPMENU 0x0098 + +#define ICH8_FLASH_REG_MAPSIZE 0x00A0 +#define ICH8_FLASH_SECTOR_SIZE 4096 +#define ICH8_GFPREG_BASE_MASK 0x1FFF +#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { +#ifdef E1000_BIG_ENDIAN + uint16_t reserved2 :6; + uint16_t fldesvalid :1; + uint16_t flockdn :1; + uint16_t flcdone :1; + uint16_t flcerr :1; + uint16_t dael :1; + uint16_t berasesz :2; + uint16_t flcinprog :1; + uint16_t reserved1 :2; +#else + uint16_t flcdone :1; /* bit 0 Flash Cycle Done */ + uint16_t flcerr :1; /* bit 1 Flash Cycle Error */ + uint16_t dael :1; /* bit 2 Direct Access error Log */ + uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */ + uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */ + uint16_t reserved1 :2; /* bit 13:6 Reserved */ + uint16_t reserved2 :6; /* bit 13:6 Reserved */ + uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */ +#endif + } hsf_status; + uint16_t regval; +}; + +/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { +#ifdef E1000_BIG_ENDIAN + uint16_t fldbcount :2; + uint16_t flockdn :6; + uint16_t flcgo :1; + uint16_t flcycle :2; + uint16_t reserved :5; +#else + uint16_t flcgo :1; /* 0 Flash Cycle Go */ + uint16_t flcycle :2; /* 2:1 Flash Cycle */ + uint16_t reserved :5; /* 7:3 Reserved */ + uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */ + uint16_t flockdn :6; /* 15:10 Reserved */ +#endif + } hsf_ctrl; + uint16_t regval; +}; + +/* ICH8 Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { +#ifdef E1000_BIG_ENDIAN + uint32_t gmwag :8; + uint32_t gmrag :8; + uint32_t grwa :8; + uint32_t grra :8; +#else + uint32_t grra :8; /* 0:7 GbE region Read Access */ + uint32_t grwa :8; /* 8:15 GbE region Write Access */ + uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */ + uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */ +#endif + } hsf_flregacc; + uint16_t regval; +}; + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/ + +#endif /* _E1000_HW_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c new file mode 100644 index 0000000..726b38a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c @@ -0,0 +1,3184 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/* Change Log + * + * Port to rtnet (0.9.3) by Mathias Koehrer. Base version: e1000-7.1.9 + * 8-Aug-2006 + * + * 7.0.36 10-Mar-2006 + * o fixups for compilation issues on older kernels + * 7.0.35 3-Mar-2006 + * 7.0.34 + * o Major performance fixes by understanding relationship of rx_buffer_len + * to window size growth. _ps and legacy receive paths changed + * o merge with kernel changes + * o legacy receive path went back to single descriptor model for jumbos + * 7.0.33 3-Feb-2006 + * o Added another fix for the pass false carrier bit + * 7.0.32 24-Jan-2006 + * o Need to rebuild with noew version number for the pass false carrier + * fix in e1000_hw.c + * 7.0.30 18-Jan-2006 + * o fixup for tso workaround to disable it for pci-x + * o fix mem leak on 82542 + * o fixes for 10 Mb/s connections and incorrect stats + * 7.0.28 01/06/2006 + * o hardware workaround to only set "speed mode" bit for 1G link. + * 7.0.26 12/23/2005 + * o wake on lan support modified for device ID 10B5 + * o fix dhcp + vlan issue not making it to the iAMT firmware + * 7.0.24 12/9/2005 + * o New hardware support for the Gigabit NIC embedded in the south bridge + * o Fixes to the recycling logic (skb->tail) from IBM LTC + * 6.3.7 11/18/2005 + * o Honor eeprom setting for enabling/disabling Wake On Lan + * 6.3.5 11/17/2005 + * o Fix memory leak in rx ring handling for PCI Express adapters + * 6.3.4 11/8/05 + * o Patch from Jesper Juhl to remove redundant NULL checks for kfree + * 6.3.2 9/20/05 + * o Render logic that sets/resets DRV_LOAD as inline functions to + * avoid code replication. If f/w is AMT then set DRV_LOAD only when + * network interface is open. + * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs. + * o Adjust PBA partioning for Jumbo frames using MTU size and not + * rx_buffer_len + * 6.3.1 9/19/05 + * o Use adapter->tx_timeout_factor in Tx Hung Detect logic + * (e1000_clean_tx_irq) + * o Support for 8086:10B5 device (Quad Port) + */ + +char e1000_driver_name[] = "rt_e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +#ifndef CONFIG_E1000_NAPI +#define DRIVERNAPI +#else +#define DRIVERNAPI "-NAPI" +#endif +#define DRV_VERSION "7.1.9"DRIVERNAPI +char e1000_driver_version[] = DRV_VERSION; +static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1049), + INTEL_E1000_ETHERNET_DEVICE(0x104A), + INTEL_E1000_ETHERNET_DEVICE(0x104B), + INTEL_E1000_ETHERNET_DEVICE(0x104C), + INTEL_E1000_ETHERNET_DEVICE(0x104D), + INTEL_E1000_ETHERNET_DEVICE(0x105E), + INTEL_E1000_ETHERNET_DEVICE(0x105F), + INTEL_E1000_ETHERNET_DEVICE(0x1060), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x107D), + INTEL_E1000_ETHERNET_DEVICE(0x107E), + INTEL_E1000_ETHERNET_DEVICE(0x107F), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x108B), + INTEL_E1000_ETHERNET_DEVICE(0x108C), + INTEL_E1000_ETHERNET_DEVICE(0x1096), + INTEL_E1000_ETHERNET_DEVICE(0x1098), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x109A), + INTEL_E1000_ETHERNET_DEVICE(0x10A4), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x10B9), + INTEL_E1000_ETHERNET_DEVICE(0x10BA), + INTEL_E1000_ETHERNET_DEVICE(0x10BB), + INTEL_E1000_ETHERNET_DEVICE(0x10BC), + INTEL_E1000_ETHERNET_DEVICE(0x10C4), + INTEL_E1000_ETHERNET_DEVICE(0x10C5), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +static int e1000_open(struct rtnet_device *netdev); +static int e1000_close(struct rtnet_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_multi(struct rtnet_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static int e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev); +static int e1000_intr(rtdm_irq_t *irq_handle); +static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp); +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +#ifdef SIOCGMIIPHY +#endif +void e1000_set_ethtool_ops(struct rtnet_device *netdev); +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *ifr); +#endif +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct rtskb *skb); + + + + + +/* Exported from other modules */ + +extern void e1000_check_options(struct e1000_adapter *adapter); + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, +}; + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver for rtnet"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int local_debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; +module_param_named(debug, local_debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + + +#define MAX_UNITS 8 + +static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (eg. 1,0,1)"); + + +#define kmalloc(a,b) rtdm_malloc(a) +#define vmalloc(a) rtdm_malloc(a) +#define kfree(a) rtdm_free(a) +#define vfree(a) rtdm_free(a) + + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ + +static int __init +e1000_init_module(void) +{ + int ret; + printk(KERN_INFO "%s - version %s\n", + e1000_driver_string, e1000_driver_version); + + printk(KERN_INFO "%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ + +static void __exit +e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + int flags, err = 0; + + flags = RTDM_IRQTYPE_SHARED; +#ifdef CONFIG_PCI_MSI + if (adapter->hw.mac_type > e1000_82547_rev_2) { + adapter->have_msi = TRUE; + if ((err = pci_enable_msi(adapter->pdev))) { + DPRINTK(PROBE, ERR, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = FALSE; + } + } + if (adapter->have_msi) + flags = 0; +#endif + rt_stack_connect(netdev, &STACK_manager); + if ((err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq, + e1000_intr, flags, netdev->name, netdev))) + DPRINTK(PROBE, ERR, + "Unable to allocate interrupt Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + // struct rtnet_device *netdev = adapter->netdev; + + rtdm_irq_free(&adapter->irq_handle); + +#ifdef CONFIG_PCI_MSI + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +#endif +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ + +static void +e1000_irq_disable(struct e1000_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(&adapter->hw, IMC, ~0); + E1000_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ + +static void +e1000_irq_enable(struct e1000_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * e1000_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the netowrk i/f is closed. + * + **/ + +static void +e1000_release_hw_control(struct e1000_adapter *adapter) +{ + uint32_t ctrl_ext; + uint32_t swsm; + uint32_t extcnf; + + /* Let firmware taken over control of h/w */ + switch (adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm & ~E1000_SWSM_DRV_LOAD); + break; + case e1000_ich8lan: + extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + extcnf & ~E1000_CTRL_EXT_DRV_LOAD); + break; + default: + break; + } +} + +/** + * e1000_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the netowrk i/f is open. + * + **/ + +static void +e1000_get_hw_control(struct e1000_adapter *adapter) +{ + uint32_t ctrl_ext; + uint32_t swsm; + uint32_t extcnf; + /* Let firmware know the driver has taken over */ + switch (adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm | E1000_SWSM_DRV_LOAD); + break; + case e1000_ich8lan: + extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL); + E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL, + extcnf | E1000_EXTCNF_CTRL_SWFLAG); + break; + default: + break; + } +} + +int +e1000_up(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + int i; + + /* hardware has been reset, we need to reload some things */ + + e1000_set_multi(netdev); + + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } + + // TODO makoehre adapter->tx_queue_len = netdev->tx_queue_len; + + schedule_delayed_work(&adapter->watchdog_task, 1); + + e1000_irq_enable(adapter); + + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + * + **/ + +static void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + uint16_t mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (adapter->hw.media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle */ + e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && + e1000_check_mng_mode(&adapter->hw); + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is TRUE * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ + if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && + adapter->hw.mac_type != e1000_ich8lan && + adapter->hw.media_type == e1000_media_type_copper && + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && + !mng_mode_enabled && + !e1000_check_phy_reset_block(&adapter->hw)) { + uint16_t mii_reg = 0; + e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); + mdelay(1); + } +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); +} + +void +e1000_down(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + + e1000_irq_disable(adapter); + + e1000_down_and_stop(adapter); + + // TODO makoehre netdev->tx_queue_len = adapter->tx_queue_len; + adapter->link_speed = 0; + adapter->link_duplex = 0; + rtnetif_carrier_off(netdev); + rtnetif_stop_queue(netdev); + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void +e1000_reinit_locked(struct e1000_adapter *adapter) +{ + WARN_ON(in_interrupt()); + if (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void +e1000_reset(struct e1000_adapter *adapter) +{ + uint32_t pba; + uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (adapter->hw.mac_type) { + case e1000_82547: + case e1000_82547_rev_2: + pba = E1000_PBA_30K; + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + pba = E1000_PBA_38K; + break; + case e1000_82573: + pba = E1000_PBA_12K; + break; + case e1000_ich8lan: + pba = E1000_PBA_8K; + break; + default: + pba = E1000_PBA_48K; + break; + } + + if ((adapter->hw.mac_type != e1000_82573) && + (adapter->netdev->mtu > E1000_RXBUFFER_8192)) + pba -= 8; /* allocate more FIFO for Tx */ + + + if (adapter->hw.mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + + E1000_WRITE_REG(&adapter->hw, PBA, pba); + + /* flow control settings */ + /* Set the FC high water mark to 90% of the FIFO size. + * Required to clear last 3 LSB */ + fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; + /* We can't use 90% on small FIFOs because the remainder + * would be less than 1 full frame. In this case, we size + * it to allow at least a full frame above the high water + * mark. */ + if (pba < E1000_PBA_16K) + fc_high_water_mark = (pba * 1024) - 1600; + + adapter->hw.fc_high_water = fc_high_water_mark; + adapter->hw.fc_low_water = fc_high_water_mark - 8; + if (adapter->hw.mac_type == e1000_80003es2lan) + adapter->hw.fc_pause_time = 0xFFFF; + else + adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; + adapter->hw.fc_send_xon = 1; + adapter->hw.fc = adapter->hw.original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(&adapter->hw); + if (adapter->hw.mac_type >= e1000_82544) + E1000_WRITE_REG(&adapter->hw, WUC, 0); + if (e1000_init_hw(&adapter->hw)) + DPRINTK(PROBE, ERR, "Hardware Error\n"); + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); + + E1000_WRITE_REG(&adapter->hw, AIT, 0); // Set adaptive interframe spacing to zero + + // e1000_reset_adaptive(&adapter->hw); + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); + + if (!adapter->smart_power_down && + (adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572)) { + uint16_t phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed */ + e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, + &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + } + +} + +static void +e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e1000_reinit_locked(adapter); +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ + +static int e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct rtnet_device *netdev; + struct e1000_adapter *adapter; + unsigned long mmio_start, mmio_len; + unsigned long flash_start, flash_len; + + static int cards_found = 0; + static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ + int i, err; + uint16_t eeprom_data; + uint16_t eeprom_apme_mask = E1000_EEPROM_APME; + + if (cards[cards_found++] == 0) + { + return -ENODEV; + } + + if ((err = pci_enable_device(pdev))) + return err; + + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) || + (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) { + if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) && + (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + E1000_ERR("No usable DMA configuration, aborting\n"); + return err; + } + } + + if ((err = pci_request_regions(pdev, e1000_driver_name))) + return err; + + pci_set_master(pdev); + + netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter), 48); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + memset(netdev->priv, 0, sizeof(struct e1000_adapter)); + + rt_rtdev_connect(netdev, &RTDEV_manager); + + + // SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->vers = RTDEV_VERS_2_0; + netdev->sysbind = &pdev->dev; + + pci_set_drvdata(pdev, netdev); + adapter = netdev->priv; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = (1 << local_debug) - 1; + + mmio_start = pci_resource_start(pdev, BAR_0); + mmio_len = pci_resource_len(pdev, BAR_0); + + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + adapter->hw.io_base = pci_resource_start(pdev, i); + break; + } + } + + netdev->open = &e1000_open; + netdev->stop = &e1000_close; + netdev->hard_start_xmit = &e1000_xmit_frame; + // netdev->get_stats = &e1000_get_stats; + // netdev->set_multicast_list = &e1000_set_multi; + // netdev->set_mac_address = &e1000_set_mac; + // netdev->change_mtu = &e1000_change_mtu; + // netdev->do_ioctl = &e1000_ioctl; + // e1000_set_ethtool_ops(netdev); + strcpy(netdev->name, pci_name(pdev)); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + netdev->base_addr = adapter->hw.io_base; + + adapter->bd_number = cards_found - 1; + + /* setup the private structure */ + + if ((err = e1000_sw_init(adapter))) + goto err_sw_init; + + /* Flash BAR mapping must happen after e1000_sw_init + * because it depends on mac_type */ + if ((adapter->hw.mac_type == e1000_ich8lan) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) { + err = -EIO; + goto err_flashmap; + } + } + + if ((err = e1000_check_phy_reset_block(&adapter->hw))) + DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); + + /* if ksp3, indicate if it's port a being setup */ + if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && + e1000_ksp3_port_a == 0) + adapter->ksp3_port_a = 1; + e1000_ksp3_port_a++; + /* Reset for multiple KP3 adapters */ + if (e1000_ksp3_port_a == 4) + e1000_ksp3_port_a = 0; + + netdev->features |= NETIF_F_LLTX; + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); + + /* initialize eeprom parameters */ + + if (e1000_init_eeprom_params(&adapter->hw)) { + E1000_ERR("EEPROM initialization failed\n"); + return -EIO; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state */ + + e1000_reset_hw(&adapter->hw); + + /* make sure the EEPROM is good */ + + if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { + DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + /* copy the MAC address out of the EEPROM */ + + if (e1000_read_mac_addr(&adapter->hw)) + DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { +#else + if (!is_valid_ether_addr(netdev->dev_addr)) { +#endif + DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + e1000_read_part_num(&adapter->hw, &(adapter->part_num)); + + e1000_get_bus_info(&adapter->hw); + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, + (void (*)(struct work_struct *))e1000_reset_task); + + /* we're going to reset, so assume we have no link for now */ + + rtnetif_carrier_off(netdev); + rtnetif_stop_queue(netdev); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (adapter->hw.mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(&adapter->hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_ich8lan: + e1000_read_eeprom(&adapter->hw, + EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_ICH8_APME; + break; + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82571: + case e1000_80003es2lan: + if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ + e1000_read_eeprom(&adapter->hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_eeprom(&adapter->hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->wol |= E1000_WUFC_MAG; + + /* print bus type/speed/width info */ + { + struct e1000_hw *hw = &adapter->hw; + DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : + (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), + ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : + (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : + (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : + (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), + ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : + (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : + (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : + "32-bit")); + } + + printk(KERN_INFO "e1000: hw "); + for (i = 0; i < 6; i++) + printk(KERN_CONT "%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + /* If the controller is 82573 and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (adapter->hw.mac_type != e1000_82573 || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + strcpy(netdev->name, "rteth%d"); + if ((err = rt_register_rtnetdev(netdev))) + goto err_register; + + DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); + + return 0; + +err_register: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); +err_flashmap: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_ioremap: + rtdev_free(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ + +static void e1000_remove(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + uint32_t manc; + + e1000_down_and_stop(adapter); + + if (adapter->hw.mac_type >= e1000_82540 && + adapter->hw.mac_type != e1000_ich8lan && + adapter->hw.media_type == e1000_media_type_copper) { + manc = E1000_READ_REG(&adapter->hw, MANC); + if (manc & E1000_MANC_SMBUS_EN) { + manc |= E1000_MANC_ARP_EN; + E1000_WRITE_REG(&adapter->hw, MANC, manc); + } + } + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + rt_unregister_rtnetdev(netdev); + + if (!e1000_check_phy_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_regions(pdev); + + rtdev_free(netdev); + + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ + +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct rtnet_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; + adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; + hw->max_frame_size = netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + + if (e1000_set_mac_type(hw)) { + DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + + hw->wait_autoneg_complete = FALSE; + hw->tbi_compatibility_en = TRUE; + hw->adaptive_ifs = FALSE; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = FALSE; + hw->master_slave = E1000_MASTER_SLAVE; + } + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + + if (e1000_alloc_queues(adapter)) { + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ + +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + int size; + + size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; + adapter->tx_ring = kmalloc(size, GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + memset(adapter->tx_ring, 0, size); + + size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; + adapter->rx_ring = kmalloc(size, GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + memset(adapter->rx_ring, 0, size); + + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ + +static int +e1000_open(struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate transmit descriptors */ + + if ((err = e1000_setup_all_tx_resources(adapter))) + goto err_setup_tx; + + /* allocate receive descriptors */ + + if ((err = e1000_setup_all_rx_resources(adapter))) + goto err_setup_rx; + + err = e1000_request_irq(adapter); + if (err) + goto err_up; + + e1000_power_up_phy(adapter); + + if ((err = e1000_up(adapter))) + goto err_up; + + /* If AMT is enabled, let the firmware know that the network + * interface is now open */ + if (adapter->hw.mac_type == e1000_82573 && + e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* Wait for the hardware to come up */ + msleep(3000); + + return E1000_SUCCESS; + +err_up: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ + +static int +e1000_close(struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed */ + if (adapter->hw.mac_type == e1000_82573 && + e1000_check_mng_mode(&adapter->hw)) + e1000_release_hw_control(adapter); + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static boolean_t +e1000_check_64k_bound(struct e1000_adapter *adapter, + void *start, unsigned long len) +{ + unsigned long begin = (unsigned long) start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 */ + if (adapter->hw.mac_type == e1000_82545 || + adapter->hw.mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; + } + + return TRUE; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ + +static int +e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_buffer) * txdr->count; + txdr->buffer_info = vmalloc(size); + if (!txdr->buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + memset(txdr->buffer_info, 0, size); + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + E1000_ROUNDUP(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_ATOMIC); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " + "at %p\n", txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_ATOMIC); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + rtdm_lock_init(&txdr->tx_lock); + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +int +e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Tx Queue %u failed\n", i); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ + +static void +e1000_configure_tx(struct e1000_adapter *adapter) +{ + uint64_t tdba; + struct e1000_hw *hw = &adapter->hw; + uint32_t tdlen, tctl, tipg, tarc; + uint32_t ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + E1000_WRITE_REG(hw, TDLEN, tdlen); + E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); + E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, TDT, 0); + E1000_WRITE_REG(hw, TDH, 0); + adapter->tx_ring[0].tdh = E1000_TDH; + adapter->tx_ring[0].tdt = E1000_TDT; + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + case e1000_80003es2lan: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + E1000_WRITE_REG(hw, TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = E1000_READ_REG(hw, TCTL); + + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + +#ifdef DISABLE_MULR + /* disable Multiple Reads for debugging */ + tctl &= ~E1000_TCTL_MULR; +#endif + + if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { + tarc = E1000_READ_REG(hw, TARC0); + tarc |= ((1 << 25) | (1 << 21)); + E1000_WRITE_REG(hw, TARC0, tarc); + tarc = E1000_READ_REG(hw, TARC1); + tarc |= (1 << 25); + if (tctl & E1000_TCTL_MULR) + tarc &= ~(1 << 28); + else + tarc |= (1 << 28); + E1000_WRITE_REG(hw, TARC1, tarc); + } else if (hw->mac_type == e1000_80003es2lan) { + tarc = E1000_READ_REG(hw, TARC0); + tarc |= 1; + if (hw->media_type == e1000_media_type_internal_serdes) + tarc |= (1 << 20); + E1000_WRITE_REG(hw, TARC0, tarc); + tarc = E1000_READ_REG(hw, TARC1); + tarc |= 1; + E1000_WRITE_REG(hw, TARC1, tarc); + } + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = 1; + + E1000_WRITE_REG(hw, TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ + +static int +e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_buffer) * rxdr->count; + rxdr->buffer_info = vmalloc(size); + if (!rxdr->buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + memset(rxdr->buffer_info, 0, size); + + size = sizeof(struct e1000_ps_page) * rxdr->count; + rxdr->ps_page = kmalloc(size, GFP_KERNEL); + if (!rxdr->ps_page) { + vfree(rxdr->buffer_info); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + memset(rxdr->ps_page, 0, size); + + size = sizeof(struct e1000_ps_page_dma) * rxdr->count; + rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); + if (!rxdr->ps_page_dma) { + vfree(rxdr->buffer_info); + kfree(rxdr->ps_page); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + memset(rxdr->ps_page_dma, 0, size); + + if (adapter->hw.mac_type <= e1000_82547_rev_2) + desc_len = sizeof(struct e1000_rx_desc); + else + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + E1000_ROUNDUP(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_ATOMIC); + + if (!rxdr->desc) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); +setup_rx_desc_die: + vfree(rxdr->buffer_info); + kfree(rxdr->ps_page); + kfree(rxdr->ps_page_dma); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " + "at %p\n", rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_ATOMIC); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate memory " + "for the receive descriptor ring\n"); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate aligned memory " + "for the receive descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +int +e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Rx Queue %u failed\n", i); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void +e1000_setup_rctl(struct e1000_adapter *adapter) +{ + uint32_t rctl; +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT + uint32_t pages = 0; +#endif + + rctl = E1000_READ_REG(&adapter->hw, RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* FIXME: disable the stripping of CRC because it breaks + * BMC firmware connected over SMBUS + if (adapter->hw.mac_type > e1000_82543) + rctl |= E1000_RCTL_SECRC; + */ + + if (adapter->hw.tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_256: + rctl |= E1000_RCTL_SZ_256; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_512: + rctl |= E1000_RCTL_SZ_512; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_1024: + rctl |= E1000_RCTL_SZ_1024; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + adapter->rx_ps_pages = 0; + + E1000_WRITE_REG(&adapter->hw, RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ + +static void +e1000_configure_rx(struct e1000_adapter *adapter) +{ + uint64_t rdba; + struct e1000_hw *hw = &adapter->hw; + uint32_t rdlen, rctl, rxcsum, ctrl_ext; + + { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = NULL; /* unused */ + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = E1000_READ_REG(hw, RCTL); + E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); + if (adapter->itr > 1) + E1000_WRITE_REG(hw, ITR, + 1000000000 / (adapter->itr * 256)); + } + + if (hw->mac_type >= e1000_82571) { + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + /* Reset delay timers after every interrupt */ + ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_REG(hw, IAM, ~0); + E1000_WRITE_FLUSH(hw); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + E1000_WRITE_REG(hw, RDLEN, rdlen); + E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); + E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, RDT, 0); + E1000_WRITE_REG(hw, RDH, 0); + adapter->rx_ring[0].rdh = E1000_RDH; + adapter->rx_ring[0].rdt = E1000_RDT; + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = E1000_READ_REG(hw, RXCSUM); + if (adapter->rx_csum == TRUE) { + rxcsum |= E1000_RXCSUM_TUOFL; + + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* don't need to clear IPPCSE as it defaults to 0 */ + } + E1000_WRITE_REG(hw, RXCSUM, rxcsum); + } + + + /* Enable Receives */ + E1000_WRITE_REG(hw, RCTL, rctl); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ + +static void +e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ + +void +e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + dma_unmap_page(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + } + if (buffer_info->skb) + kfree_rtskb(buffer_info->skb); + memset(buffer_info, 0, sizeof(struct e1000_buffer)); +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ + +static void +e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->tdh); + writel(0, adapter->hw.hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ + +static void +e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ + +static void +e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + kfree(rx_ring->ps_page); + rx_ring->ps_page = NULL; + kfree(rx_ring->ps_page_dma); + rx_ring->ps_page_dma = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void +e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ + +static void +e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->skb) { + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + + kfree_rtskb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct e1000_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + size = sizeof(struct e1000_ps_page) * rx_ring->count; + memset(rx_ring->ps_page, 0, size); + size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; + memset(rx_ring->ps_page_dma, 0, size); + + /* Zero out the descriptor ring */ + + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->rdh); + writel(0, adapter->hw.hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ + +static void +e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void +e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + uint32_t rctl; + + e1000_pci_clear_mwi(&adapter->hw); + + rctl = E1000_READ_REG(&adapter->hw, RCTL); + rctl |= E1000_RCTL_RST; + E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + E1000_WRITE_FLUSH(&adapter->hw); + mdelay(5); + + if (rtnetif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void +e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + uint32_t rctl; + + rctl = E1000_READ_REG(&adapter->hw, RCTL); + rctl &= ~E1000_RCTL_RST; + E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + E1000_WRITE_FLUSH(&adapter->hw); + mdelay(5); + + if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(&adapter->hw); + + if (rtnetif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ + +static void +e1000_set_multi(struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + uint32_t rctl; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? + E1000_NUM_MTA_REGISTERS_ICH8LAN : + E1000_NUM_MTA_REGISTERS; + + if (adapter->hw.mac_type == e1000_ich8lan) + rar_entries = E1000_RAR_ENTRIES_ICH8LAN; + + /* reserve RAR[14] for LAA over-write work-around */ + if (adapter->hw.mac_type == e1000_82571) + rar_entries--; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = E1000_READ_REG(hw, RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + } else if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + + E1000_WRITE_REG(hw, RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 multicast address into the exact filters 1-14 + * RAR 0 is used for the station MAC adddress + * if there are not 14 addresses, go ahead and clear the filters + * -- with 82571 controllers only 0-13 entries are filled here + */ + + for (i = 1; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(hw); + } + + /* clear the old settings from the multicast hash table */ + + for (i = 0; i < mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ + +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct rtnet_device *netdev = adapter->netdev; + uint32_t tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((E1000_READ_REG(&adapter->hw, TDT) == + E1000_READ_REG(&adapter->hw, TDH)) && + (E1000_READ_REG(&adapter->hw, TDFT) == + E1000_READ_REG(&adapter->hw, TDFH)) && + (E1000_READ_REG(&adapter->hw, TDFTS) == + E1000_READ_REG(&adapter->hw, TDFHS))) { + tctl = E1000_READ_REG(&adapter->hw, TCTL); + E1000_WRITE_REG(&adapter->hw, TCTL, + tctl & ~E1000_TCTL_EN); + E1000_WRITE_REG(&adapter->hw, TDFT, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, TDFH, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, TDFTS, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, TDFHS, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, TCTL, tctl); + E1000_WRITE_FLUSH(&adapter->hw); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + rtnetif_wake_queue(netdev); + } else { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct rtnet_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + uint32_t link, tctl; + int32_t ret_val; + + ret_val = e1000_check_for_link(&adapter->hw); + if ((ret_val == E1000_ERR_PHY) && + (adapter->hw.phy_type == e1000_phy_igp_3) && + (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kumeran_lock_loss_workaround() */ + DPRINTK(LINK, INFO, + "Gigabit has been disabled, downgrading speed\n"); + } + if (adapter->hw.mac_type == e1000_82573) { + e1000_enable_tx_pkt_filtering(&adapter->hw); + } + + if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && + !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) + link = !adapter->hw.serdes_link_down; + else + link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; + + if (link) { + if (!rtnetif_carrier_ok(netdev)) { + boolean_t txb2b = 1; + e1000_get_speed_and_duplex(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + + DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + + /* tweak tx_queue_len according to speed/duplex + * and adjust the timeout factor */ + // TODO makoehre netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + // TODO makoehre netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 8; + break; + case SPEED_100: + txb2b = 0; + // TODO makoehre netdev->tx_queue_len = 100; + /* maybe add some timeout factor ? */ + break; + } + + if ((adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572) && + txb2b == 0) { +#define SPEED_MODE_BIT (1 << 21) + uint32_t tarc0; + tarc0 = E1000_READ_REG(&adapter->hw, TARC0); + tarc0 &= ~SPEED_MODE_BIT; + E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); + } + + + /* enable transmits in the hardware, need to do this + * after setting TARC0 */ + tctl = E1000_READ_REG(&adapter->hw, TCTL); + tctl |= E1000_TCTL_EN; + E1000_WRITE_REG(&adapter->hw, TCTL, tctl); + + rtnetif_carrier_on(netdev); + rtnetif_wake_queue(netdev); + schedule_delayed_work(&adapter->phy_info_task, 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (rtnetif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + DPRINTK(LINK, INFO, "NIC Link is Down\n"); + rtnetif_carrier_off(netdev); + rtnetif_stop_queue(netdev); + schedule_delayed_work(&adapter->phy_info_task, 2 * HZ); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives in the ISR and + * reset device here in the watchdog + */ + if (adapter->hw.mac_type == e1000_80003es2lan) + /* reset device */ + schedule_work(&adapter->reset_task); + } + + e1000_smartspeed(adapter); + } + + + adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + // e1000_update_adaptive(&adapter->hw); + + if (!rtnetif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Dynamic mode for Interrupt Throttle Rate (ITR) */ + if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; Total + * asymmetrical Tx or Rx gets ITR=8000; everyone + * else is between 2000-8000. */ + uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; + uint32_t dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = TRUE; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] */ + if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) + e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); + + /* Reschedule the task */ + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + + +static boolean_t +e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, + struct rtskb *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + uint8_t css; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + css = skb->h.raw - skb->data; + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); + + buffer_info->time_stamp = jiffies; + + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return TRUE; + } + + return FALSE; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) + +static int +e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, + struct rtskb *skb, unsigned int first, unsigned int max_per_txd, + unsigned int nr_frags, unsigned int mss) +{ + struct e1000_buffer *buffer_info; + unsigned int len = skb->len; + unsigned int offset = 0, size, count = 0, i; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->dma = + dma_map_single(&adapter->pdev->dev, + skb->data + offset, + size, + DMA_TO_DEVICE); + buffer_info->time_stamp = jiffies; + + len -= size; + offset += size; + count++; + if (unlikely(++i == tx_ring->count)) i = 0; + } + + + i = (i == 0) ? tx_ring->count - 1 : i - 1; + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; +} + +static void +e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, + int tx_flags, int count, nanosecs_abs_t *xmit_stamp) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + if (xmit_stamp) + *xmit_stamp = cpu_to_be64(rtdm_clock_read() + *xmit_stamp); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tdt); +} + +/** + * 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + **/ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int +e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct rtskb *skb) +{ + uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; + + E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int +e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct rtskb *skb) +{ + struct e1000_hw *hw = &adapter->hw; + uint16_t length, offset; + if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { + struct ethhdr *eth = (struct ethhdr *) skb->data; + if ((htons(ETH_P_IP) == eth->h_proto)) { + const struct iphdr *ip = + (struct iphdr *)((uint8_t *)skb->data+14); + if (IPPROTO_UDP == ip->protocol) { + struct udphdr *udp = + (struct udphdr *)((uint8_t *)ip + + (ip->ihl << 2)); + if (ntohs(udp->dest) == 67) { + offset = (uint8_t *)udp + 8 - skb->data; + length = skb->len - offset; + + return e1000_mng_write_dhcp_info(hw, + (uint8_t *)udp + 8, + length); + } + } + } + } + return 0; +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static int +e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb->len; + rtdm_lockctx_t context; + unsigned int nr_frags = 0; + unsigned int mss = 0; + int count = 0; + + /* This goes back to the question of how to logically map a tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. */ + tx_ring = adapter->tx_ring; + + if (unlikely(skb->len <= 0)) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + count++; + + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + + if (adapter->hw.tx_pkt_filtering && + (adapter->hw.mac_type == e1000_82573)) + e1000_transfer_dhcp_info(adapter, skb); + + rtdm_lock_get_irqsave(&tx_ring->tx_lock, context); + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time */ + if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) { + rtnetif_stop_queue(netdev); + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context); + rtdm_printk("FATAL: rt_e1000 ran into tail close to head situation!\n"); + return NETDEV_TX_BUSY; + } + + if (unlikely(adapter->hw.mac_type == e1000_82547)) { + if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { + rtnetif_stop_queue(netdev); + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context); + + /* FIXME: warn the user earlier, i.e. on startup if + half-duplex is detected! */ + rtdm_printk("FATAL: rt_e1000 ran into 82547 " + "controller bug!\n"); + return NETDEV_TX_BUSY; + } + } + + first = tx_ring->next_to_use; + + if (likely(e1000_tx_csum(adapter, tx_ring, skb))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + e1000_tx_queue(adapter, tx_ring, tx_flags, + e1000_tx_map(adapter, tx_ring, skb, first, + max_per_txd, nr_frags, mss), + skb->xmit_stamp); + + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context); + + return NETDEV_TX_OK; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + **/ + +static int +e1000_intr(rtdm_irq_t *irq_handle) + /* int irq, void *data, struct pt_regs *regs) */ +{ + + struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + uint32_t rctl, icr = E1000_READ_REG(hw, ICR); + int i; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + if (unlikely(!icr)) { + return RTDM_IRQ_NONE; /* Not our interrupt */ + } + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (rtnetif_carrier_ok(netdev) && + (adapter->hw.mac_type == e1000_80003es2lan)) { + /* disable receives */ + rctl = E1000_READ_REG(hw, RCTL); + E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); + } + /* FIXME: we need to handle this via some yet-to-be-invented + error manager (Linux botton-half and/or kthread) + mod_timer(&adapter->watchdog_timer, jiffies);*/ + } + + /* Writing IMC and IMS is needed for 82547. + * Due to Hub Link bus being occupied, an interrupt + * de-assertion message is not able to be sent. + * When an interrupt assertion message is generated later, + * two messages are re-ordered and sent out. + * That causes APIC to think 82547 is in de-assertion + * state, while 82547 is in assertion state, resulting + * in dead lock. Writing IMC forces 82547 into + * de-assertion state. + */ + if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(hw, IMC, ~0); + } + + adapter->data_received = 0; + + for (i = 0; i < E1000_MAX_INTR; i++) + if (unlikely(!e1000_clean_rx_irq(adapter, adapter->rx_ring, + &time_stamp) & + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) + break; + + if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) + e1000_irq_enable(adapter); + + + if (adapter->data_received) + rt_mark_stack_mgr(netdev); + return RTDM_IRQ_HANDLED; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ + +static boolean_t +e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + boolean_t cleaned = FALSE; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { + for (cleaned = FALSE; !cleaned; ) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); + + if (unlikely(++i == tx_ring->count)) i = 0; + } + + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(cleaned && rtnetif_queue_stopped(netdev) && + rtnetif_carrier_ok(netdev))) { + rtdm_lock_get(&tx_ring->tx_lock); + if (rtnetif_queue_stopped(netdev) && + (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) + rtnetif_wake_queue(netdev); + rtdm_lock_put(&tx_ring->tx_lock); + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = FALSE; + if (tx_ring->buffer_info[eop].dma && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) + && !(E1000_READ_REG(&adapter->hw, STATUS) & + E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)((tx_ring - adapter->tx_ring) / + sizeof(struct e1000_tx_ring)), + readl(adapter->hw.hw_addr + tx_ring->tdh), + readl(adapter->hw.hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + rtnetif_stop_queue(netdev); + } + } + return cleaned; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ + +static void +e1000_rx_checksum(struct e1000_adapter *adapter, + uint32_t status_err, uint32_t csum, + struct rtskb *skb) +{ + uint16_t status = (uint16_t)status_err; + uint8_t errors = (uint8_t)(status_err >> 24); + skb->ip_summed = CHECKSUM_NONE; + + /* 82543 or newer only */ + if (unlikely(adapter->hw.mac_type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (adapter->hw.mac_type <= e1000_82547_rev_2) { + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + } else { + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (adapter->hw.mac_type > e1000_82547_rev_2) { + /* IP fragment with UDP payload */ + /* Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + csum = ntohl(csum ^ 0xFFFF); + skb->csum = csum; + skb->ip_summed = CHECKSUM_PARTIAL; + } + adapter->hw_csum_good++; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + **/ + +static boolean_t +e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp) +{ + struct rtnet_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + uint32_t length; + uint8_t last_byte; + unsigned int i; + int cleaned_count = 0; + boolean_t cleaned = FALSE; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct rtskb *skb, *next_skb; + u8 status; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + next_skb = next_buffer->skb; + prefetch(next_skb->data - NET_IP_ALIGN); + + cleaned = TRUE; + cleaned_count++; + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + + length = le16_to_cpu(rx_desc->length); + + if (unlikely(!(status & E1000_RXD_STAT_EOP))) { + /* All receives must fit into a single buffer */ + E1000_DBG("%s: Receive packet consumed multiple" + " buffers\n", netdev->name); + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + last_byte = *(skb->data + length - 1); + if (TBI_ACCEPT(&adapter->hw, status, + rx_desc->errors, length, last_byte)) { + length--; + } else { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + } + + /* code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack */ + rtskb_put(skb, length); + + /* end copybreak code */ + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (uint32_t)(status) | + ((uint32_t)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + skb->protocol = rt_eth_type_trans(skb, netdev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + adapter->data_received = 1; // Set flag for the main interrupt routine + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + return cleaned; +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ + +static void +e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct rtnet_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct rtskb *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + if (!(skb = buffer_info->skb)) + skb = rtnetdev_alloc_rtskb(netdev, bufsz); + else { + rtskb_trim(skb, 0); + goto map_skb; + } + + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct rtskb *oldskb = skb; + DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " + "at %p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = rtnetdev_alloc_rtskb(netdev, bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + kfree_rtskb(oldskb); + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + kfree_rtskb(skb); + kfree_rtskb(oldskb); + break; /* while !buffer_info->skb */ + } else { + /* Use new allocation */ + kfree_rtskb(oldskb); + } + } + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + rtskb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + DPRINTK(RX_ERR, ERR, + "dma align check failed: %u bytes at %p\n", + adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + kfree_rtskb(skb); + buffer_info->skb = NULL; + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + break; /* while !buffer_info->skb */ + } + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: + **/ + +static void +e1000_smartspeed(struct e1000_adapter *adapter) +{ + uint16_t phy_status; + uint16_t phy_ctrl; + + if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || + !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back */ + e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(&adapter->hw) && + !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(&adapter->hw) && + !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + + + +void +e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; +#ifdef HAVE_PCI_SET_MWI + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + DPRINTK(PROBE, ERR, "Error in setting MWI\n"); +#else + pci_write_config_word(adapter->pdev, PCI_COMMAND, + adapter->hw.pci_cmd_word | + PCI_COMMAND_INVALIDATE); +#endif +} + +void +e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + +#ifdef HAVE_PCI_SET_MWI + pci_clear_mwi(adapter->pdev); +#else + pci_write_config_word(adapter->pdev, PCI_COMMAND, + adapter->hw.pci_cmd_word & + ~PCI_COMMAND_INVALIDATE); +#endif +} + +void +e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) +{ + struct e1000_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void +e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) +{ + struct e1000_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +uint32_t +e1000_io_read(struct e1000_hw *hw, unsigned long port) +{ + return inl(port); +} + +void +e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) +{ + outl(value, port); +} + + +int +e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) +{ + adapter->hw.autoneg = 0; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.media_type == e1000_media_type_fiber) && + spddplx != (SPEED_1000 + DUPLEX_FULL)) { + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + } + + switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + adapter->hw.forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + adapter->hw.forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + adapter->hw.forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + adapter->hw.forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + adapter->hw.autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + } + return 0; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h new file mode 100644 index 0000000..8de3048 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h @@ -0,0 +1,148 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <asm/io.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include "kcompat.h" + +#define usec_delay(x) udelay(x) +#ifndef msec_delay +#define msec_delay(x) do { if(in_interrupt()) { \ + /* Don't mdelay in interrupt context! */ \ + BUG(); \ + } else { \ + msleep(x); \ + } } while (0) + +/* Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + */ +#define msec_delay_irq(x) mdelay(x) +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE + +typedef enum { +#undef FALSE + FALSE = 0, +#undef TRUE + TRUE = 1 +} boolean_t; + +#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) + +#ifdef DBG +#define DEBUGOUT(S) printk(KERN_DEBUG S "\n") +#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A) +#else +#define DEBUGOUT(S) +#define DEBUGOUT1(S, A...) +#endif + +#define DEBUGFUNC(F) DEBUGOUT(F) +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT7 DEBUGOUT3 + +#ifdef __BIG_ENDIAN +#define E1000_BIG_ENDIAN __BIG_ENDIAN +#endif + +#define E1000_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_READ_REG(a, reg) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) + +#define E1000_WRITE_ICH8_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH8_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH8_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + + +#endif /* _E1000_OSDEP_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c new file mode 100644 index 0000000..42e94d5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c @@ -0,0 +1,906 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when e1000_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labeled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define E1000_PARAM(X, desc) \ + static const int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static int num_##X = 0; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ + +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ + +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ + +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ + +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ + +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ + +E1000_PARAM(FlowControl, "Flow Control setting"); + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ + +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 0 for rtnet + */ + +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 0 + */ + +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); + +/* Receive Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 0 + */ + +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 0 for rtnet + */ + +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic) + * + * Default Value: 0 for rtnet + */ + +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ + +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ + +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +#define DEFAULT_RADV 0 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +#define DEFAULT_TIDV 0 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +#define DEFAULT_TADV 0 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +#define DEFAULT_ITR 0 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(int *value, struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + DPRINTK(PROBE, INFO, + "%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + DPRINTK(PROBE, INFO, "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ + +void e1000_check_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (bd >= E1000_MAX_NIC) { + DPRINTK(PROBE, NOTICE, + "Warning: no configuration for board #%i\n", bd); + DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); +#ifndef module_param_array + bd = E1000_MAX_NIC; +#endif + } + + { /* Transmit Descriptor Count */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { .min = E1000_MIN_TXD }} + }; + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + opt.arg.r.max = mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD; + +#ifdef module_param_array + if (num_TxDescriptors > bd) { +#endif + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + E1000_ROUNDUP(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); +#ifdef module_param_array + } else { + tx_ring->count = opt.def; + } +#endif + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { .min = E1000_MIN_RXD }} + }; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + +#ifdef module_param_array + if (num_RxDescriptors > bd) { +#endif + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + E1000_ROUNDUP(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); +#ifdef module_param_array + } else { + rx_ring->count = opt.def; + } +#endif + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + struct e1000_option opt = { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_XsumRX > bd) { +#endif + int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; +#ifdef module_param_array + } else { + adapter->rx_csum = opt.def; + } +#endif + } + { /* Flow Control */ + + struct e1000_opt_list fc_list[] = + {{ e1000_fc_none, "Flow Control Disabled" }, + { e1000_fc_rx_pause,"Flow Control Receive Only" }, + { e1000_fc_tx_pause,"Flow Control Transmit Only" }, + { e1000_fc_full, "Flow Control Enabled" }, + { e1000_fc_default, "Flow Control Hardware Default" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = e1000_fc_default, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + +#ifdef module_param_array + if (num_FlowControl > bd) { +#endif + int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; +#ifdef module_param_array + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } +#endif + } + { /* Transmit Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + +#ifdef module_param_array + if (num_TxIntDelay > bd) { +#endif + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->tx_int_delay = opt.def; + } +#endif + } + { /* Transmit Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + +#ifdef module_param_array + if (num_TxAbsIntDelay > bd) { +#endif + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->tx_abs_int_delay = opt.def; + } +#endif + } + { /* Receive Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + +#ifdef module_param_array + if (num_RxIntDelay > bd) { +#endif + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->rx_int_delay = opt.def; + } +#endif + } + { /* Receive Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + +#ifdef module_param_array + if (num_RxAbsIntDelay > bd) { +#endif + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->rx_abs_int_delay = opt.def; + } +#endif + } + { /* Interrupt Throttling Rate */ + struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + break; + case 1: + DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", + opt.name); + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + break; + } +#ifdef module_param_array + } else { + adapter->itr = opt.def; + } +#endif + } + { /* Smart Power Down */ + struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + +#ifdef module_param_array + if (num_SmartPowerDownEnable > bd) { +#endif + int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; +#ifdef module_param_array + } else { + adapter->smart_power_down = opt.def; + } +#endif + } + { /* Kumeran Lock Loss Workaround */ + struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_KumeranLockLoss > bd) { +#endif + int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss; +#ifdef module_param_array + } else { + adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def; + } +#endif + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ + +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; +#ifndef module_param_array + bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; + if ((Speed[bd] != OPTION_UNSET)) { +#else + if (num_Speed > bd) { +#endif + DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " + "parameter ignored\n"); + } + +#ifndef module_param_array + if ((Duplex[bd] != OPTION_UNSET)) { +#else + if (num_Duplex > bd) { +#endif + DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " + "parameter ignored\n"); + } + +#ifndef module_param_array + if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { +#else + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { +#endif + DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " + "not valid for fiber adapters, " + "parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ + +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + int speed, dplx, an; + int bd = adapter->bd_number; +#ifndef module_param_array + bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; +#endif + + { /* Speed */ + struct e1000_opt_list speed_list[] = {{ 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + +#ifdef module_param_array + if (num_Speed > bd) { +#endif + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); +#ifdef module_param_array + } else { + speed = opt.def; + } +#endif + } + { /* Duplex */ + struct e1000_opt_list dplx_list[] = {{ 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (e1000_check_phy_reset_block(&adapter->hw)) { + DPRINTK(PROBE, INFO, + "Link active due to SoL/IDER Session. " + "Speed/Duplex/AutoNeg parameter ignored.\n"); + return; + } +#ifdef module_param_array + if (num_Duplex > bd) { +#endif + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); +#ifdef module_param_array + } else { + dplx = opt.def; + } +#endif + } + +#ifdef module_param_array + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { +#else + if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { +#endif + DPRINTK(PROBE, INFO, + "AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + +#ifdef module_param_array + if (num_AutoNeg > bd) { +#endif + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); +#ifdef module_param_array + } else { + an = opt.def; + } +#endif + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; +#ifdef module_param_array + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) +#else + if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) +#endif + DPRINTK(PROBE, INFO, + "Speed and duplex autonegotiation enabled\n"); + break; + case HALF_DUPLEX: + DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + DPRINTK(PROBE, INFO, "10 Mbps Speed specified " + "without Duplex\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + DPRINTK(PROBE, INFO, "100 Mbps Speed specified " + "without Duplex\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " + "Duplex\n"); + DPRINTK(PROBE, INFO, + "Using Autonegotiation at 1000 Mbps " + "Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + HALF_DUPLEX: + DPRINTK(PROBE, INFO, + "Half Duplex is not supported at 1000 Mbps\n"); + DPRINTK(PROBE, INFO, + "Using Autonegotiation at 1000 Mbps " + "Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + FULL_DUPLEX: + DPRINTK(PROBE, INFO, + "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + DPRINTK(PROBE, INFO, + "Speed, AutoNeg and MDI-X specifications are " + "incompatible. Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h new file mode 100644 index 0000000..0acb218 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h @@ -0,0 +1,446 @@ +/******************************************************************************* + + + Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#include <linux/version.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/pagemap.h> +#include <linux/list.h> +#include <linux/sched.h> +#include <asm/io.h> + +#include <rtnet_port.h> + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +/* Useful settings for rtnet */ +#undef MAX_SKB_FRAGS +#undef NETIF_F_TSO +#undef E1000_COUNT_ICR +#undef NETIF_F_HW_VLAN_TX +#undef CONFIG_NET_POLL_CONTROLLER +#undef ETHTOOL_OPS_COMPAT +#undef ETHTOOL_GPERMADDR + +#ifndef HAVE_FREE_NETDEV +#define free_netdev(x) kfree(x) +#endif + +#undef E1000_NAPI +#undef CONFIG_E1000_NAPI + +#undef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 + + +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif + +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif +#undef CONFIG_PM + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +/*****************************************************************************/ +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif +/*****************************************************************************/ + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + uint32_t cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + uint32_t n_stats; + uint32_t testinfo_len; + uint32_t eedump_len; + uint32_t regdump_len; +}; + +struct ethtool_stats { + uint32_t cmd; + uint32_t n_stats; + uint64_t data[0]; +}; + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), +}; +struct ethtool_test { + uint32_t cmd; + uint32_t flags; + uint32_t reserved; + uint32_t len; + uint64_t data[0]; +}; +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + uint32_t cmd; + uint32_t magic; + uint32_t offset; + uint32_t len; + uint8_t data[0]; +}; + +struct ethtool_value { + uint32_t cmd; + uint32_t data; +}; + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* Ethtool version without link support */ +#endif /* Ethtool version without eeprom support */ +#endif /* Ethtool version without test support */ +#endif /* Ethtool version without strings support */ +#endif /* Ethtool version wihtout adapter id support */ +#endif /* Ethtool version without statistics support */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autonet' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 /* driver took care of the packet */ +#endif + +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 /* driver tx path was busy */ +#endif + +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ +#endif + +/* if we do not have the infrastructure to detect if skb_header is cloned * + * just return false in all cases */ +#ifndef SKB_DATAREF_SHIFT +#define skb_header_cloned(x) 0 +#endif /* SKB_DATAREF_SHIFT not defined */ + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#define USE_DRIVER_SHUTDOWN_HANDLER + +#ifndef SA_PROBEIRQ +#define SA_PROBEIRQ 0 +#endif + +#endif /* _KCOMPAT_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c new file mode 100644 index 0000000..e1159e5 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c @@ -0,0 +1,1515 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000.h" + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 + /* 1=Reverse Auto-Negotiation */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M + 1 = 50-80M + 2 = 80-110M + 3 = 110-140M + 4 = >140M */ + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + /* 1=Enable SERDES Electrical Idle */ + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +/* + * A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_gg82563_cable_length_table) + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; + func->check_for_link = e1000e_check_for_copper_link; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_fiber_link; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_serdes_link; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* + * Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + udelay(200); + + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + } else { + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* + * Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + udelay(200); + + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + } else { + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link " + "on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* + * Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + goto out; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, + speed, + duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + ctrl = er32(CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000e_setup_link(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec.e80003es2lan.mdic_wa_enable = true; + + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, + &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec.e80003es2lan.mdic_wa_enable = false; + } + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl_ext; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, + E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + if (ret_val) + return ret_val; + + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, + &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, + data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); + ew32(CTRL_EXT, ctrl_ext); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* + * Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!e1000e_check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* + * Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return 0; +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 speed; + u16 duplex; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return 0; +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return ret_val; +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations es2_mac_ops = { + .read_mac_addr = e1000_read_mac_addr_80003es2lan, + .id_led_init = e1000e_id_led_init, + .blink_led = e1000e_blink_led_generic, + .check_mng_mode = e1000e_check_mng_mode_generic, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link, + /* setup_physical_interface dependent on media type */ + .setup_led = e1000e_setup_led_generic, +}; + +static const struct e1000_phy_operations es2_phy_ops = { + .acquire = e1000_acquire_phy_80003es2lan, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release = e1000_release_phy_80003es2lan, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, +}; + +static const struct e1000_nvm_operations es2_nvm_ops = { + .acquire = e1000_acquire_nvm_80003es2lan, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_80003es2lan, + .update = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate = e1000e_validate_nvm_checksum_generic, + .write = e1000_write_nvm_80003es2lan, +}; + +const struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, + .flags2 = FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c new file mode 100644 index 0000000..1a3fa39 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c @@ -0,0 +1,2112 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000.h" + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF + +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static s32 e1000_setup_link_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static void e1000_clear_vfta_82571(struct e1000_hw *hw); +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +static s32 e1000_led_on_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + case e1000_82574: + case e1000_82583: + phy->type = e1000_phy_bm; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* + * Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + fallthrough; + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_82571; + func->check_for_link = e1000e_check_for_copper_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_fiber_link; + func->get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000_check_for_serdes_link_82571; + func->get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + func->set_lan_id = e1000_set_lan_id_single_port; + func->check_mng_mode = e1000e_check_mng_mode_generic; + func->led_on = e1000e_led_on_generic; + func->blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + break; + case e1000_82574: + case e1000_82583: + func->set_lan_id = e1000_set_lan_id_single_port; + func->check_mng_mode = e1000_check_mng_mode_82574; + func->led_on = e1000_led_on_82574; + break; + default: + func->check_mng_mode = e1000e_check_mng_mode_generic; + func->led_on = e1000e_led_on_generic; + func->blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* + * Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = er32(SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + ew32(SWSM2, + swsm2 | E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else + force_clear_smbi = false; + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = er32(SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + e_dbg("Please update your 82571 Bootagent\n"); + } + ew32(SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* + * Initialize device specific counter of SMBI acquisition + * timeouts. + */ + hw->dev_spec.e82571.smb_counter = 0; + + return 0; +} + +static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) + adapter->flags &= ~FLAG_HAS_WOL; + /* Does not support WoL on any port */ + if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) + adapter->flags &= ~FLAG_HAS_WOL; + break; + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = DEFAULT_JUMBO; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* + * If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec.e82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == sw_timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec.e82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 ret_val = 0; + s32 i = 0; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + do { + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + usleep_range(2000, 4000); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + e_dbg("Driver can't access the PHY\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + mutex_lock(&swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + e1000_put_hw_semaphore_82573(hw); + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000e_acquire_nvm(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* + * If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return ret_val; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* + * The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = (data[i] << E1000_NVM_RW_REG_DATA) | + ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START; + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* + * Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + if (ret_val) + e_dbg("Cannot acquire MDIO ownership\n"); + + ctrl = er32(CTRL); + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* + * Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + msleep(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000e_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + /* + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82571(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000e_enable_tx_pkt_filtering(hw); + fallthrough; + case e1000_82574: + case e1000_82583: + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + break; + default: + reg_data = er32(TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL(1), reg_data); + break; + } + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + default: + break; + } + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = er32(PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + ew32(PBA_ECC, reg); + } + /* + * Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + + if ((hw->mac.type == e1000_82571) || + (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } + + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = er32(GCR); + reg |= (1 << 22); + ew32(GCR, reg); + + /* + * Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = er32(GCR2); + reg |= 1; + ew32(GCR2, reg); + break; + default: + break; + } +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* + * The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* + * If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +static s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & er32(STATUS))) { + /* + * If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + ew32(LEDCTL, ctrl); + + return 0; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + bool phy_hung = false; + s32 ret_val = 0; + + /* + * Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + + if (ret_val) + goto out; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + goto out; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + phy_hung = true; + } +out: + return phy_hung; +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* + * 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000e_setup_link(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = 0; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* + * We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* + * If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + * If the partner code word is null, stop forcing + * and restart auto negotiation. + */ + if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + /* Enable autoneg, and unforce link up */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* + * We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + e_dbg("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("AN_PROG -> DOWN\n"); + } + } else { + /* + * The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + e_dbg("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* + * The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + } else { + /* + * Check several times, if Sync and Config + * both are consistently 1 then simply ignore + * the Invalid bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + udelay(10); + rxcw = er32(RXCW); + if ((rxcw & E1000_RXCW_IV) && + !((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C))) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = er32(TXCW); + txcw |= E1000_TXCW_ANE; + ew32(TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* + * Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* + * Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* + * Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + } + } + + return 0; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type == e1000_82571) { + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + } + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init, + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + /* .get_link_up_info: media type dependent */ + /* .led_on: mac type dependent */ + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ + .setup_led = e1000e_setup_led_generic, + .read_mac_addr = e1000_read_mac_addr_82571, +}; + +static const struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_info = e1000e_get_phy_info_igp, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_igp, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_m88, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_m88, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_bm2, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_bm2, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, + .update = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate = e1000_validate_nvm_checksum_82571, + .write = e1000_write_nvm_82571, +}; + +const struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1 + | FLAG2_DISABLE_ASPM_L0S, + .pba = 20, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82574_info = { + .mac = e1000_82574, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_MSIX + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG + | FLAG2_DISABLE_ASPM_L0S + | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82583_info = { + .mac = e1000_82583, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile new file mode 100644 index 0000000..6a488cb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile @@ -0,0 +1,12 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000E) += rt_e1000e.o + +rt_e1000e-y := \ + 82571.o \ + 80003es2lan.o \ + ich8lan.o \ + lib.o \ + netdev.o \ + param.o \ + phy.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h new file mode 100644 index 0000000..ffa4c02 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h @@ -0,0 +1,852 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000004 /* Force SMBus mode*/ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* + * Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_CSR_SM 0x8 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +/* + * Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ + +/* Constants used to interpret the masked PCI-X bus speed. */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ + +#define E1000_PBS_16K E1000_PBA_16K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of desc. still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type (0-small, 1-large) */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) + * 0=Normal 10BASE-T Rx Threshold + */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) + +/* + * Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +/* FW Semaphore */ +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h new file mode 100644 index 0000000..d6fa3d4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h @@ -0,0 +1,764 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/crc32.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> + +#include <rtnet_port.h> + +#include "hw.h" + +struct e1000_info; + +#define e_dbg(format, arg...) \ + pr_debug(format, ## arg) +#define e_err(format, arg...) \ + pr_err(format, ## arg) +#define e_info(format, arg...) \ + pr_info(format, ## arg) +#define e_warn(format, arg...) \ + pr_warn(format, ## arg) +#define e_notice(format, arg...) \ + pr_notice(format, ## arg) + + +/* Interrupt modes, as used by the IntMode parameter */ +#define E1000E_INT_MODE_LEGACY 0 +#define E1000E_INT_MODE_MSI 1 +#define E1000E_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 64 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 64 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* Early Receive defines */ +#define E1000_ERT_2048 0x100 + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +#define DEFAULT_JUMBO 9234 + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_STATS_PAGE 778 +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + +#define DEFAULT_RDTR 0 +#define DEFAULT_RADV 8 +#define BURST_RDTR 0x20 +#define BURST_RADV 0x20 + +/* + * in the case of WTHRESH, it appears at least the 82571/2 hardware + * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when + * WTHRESH=4, and since we want 64 bytes at a time written back, set + * it to 5 + */ +#define E1000_TXDCTL_DMA_BURST_ENABLE \ + (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ + E1000_TXDCTL_COUNT_DESC | \ + (5 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ + +#define E1000_RXDCTL_DMA_BURST_ENABLE \ + (0x01000000 | /* set descriptor granularity */ \ + (4 << 16) | /* set writeback threshold */ \ + (4 << 8) | /* set prefetch threshold */ \ + 0x20) /* set hthresh */ + +#define E1000_TIDV_FPD (1 << 31) +#define E1000_RDTR_FPD (1 << 31) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_82574, + board_82583, + board_80003es2lan, + board_ich8lan, + board_ich9lan, + board_ich10lan, + board_pchlan, + board_pch2lan, + board_pch_lpt, +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct rtskb *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + /* Rx */ + struct { + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_ring { + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + char name[IFNAMSIZ + 5]; + u32 ims_val; + u32 itr_val; + u16 itr_register; + int set_itr; + + struct rtskb *rx_skb_top; + + rtdm_lock_t lock; +}; + +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * Tx + */ + struct e1000_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + struct napi_struct napi; + + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * Rx + */ + bool (*clean_rx) (struct e1000_adapter *adapter, + nanosecs_abs_t *time_stamp) + ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct rtnet_device *netdev; + struct pci_dev *pdev; + + rtdm_irq_t irq_handle; + rtdm_irq_t rx_irq_handle; + rtdm_irq_t tx_irq_handle; + rtdm_nrtsig_t mod_timer_sig; + rtdm_nrtsig_t downshift_sig; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + spinlock_t stats64_lock; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + + bool fc_autoneg; + + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + + bool idle_check; + int phy_hang_count; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +#define FLAG_HAS_ERT (1 << 4) +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) +#define FLAG_IS_ICH (1 << 9) +#define FLAG_HAS_MSIX (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +/* reserved (1 << 28) */ +#define FLAG_TSO_FORCE (1 << 29) +#define FLAG_RX_RESTART_NOW (1 << 30) +#define FLAG_MSI_TEST_FAILED (1 << 31) + +#define FLAG2_CRC_STRIPPING (1 << 0) +#define FLAG2_HAS_PHY_WAKEUP (1 << 1) +#define FLAG2_IS_DISCARDING (1 << 2) +#define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) +#define FLAG2_DMA_BURST (1 << 6) +#define FLAG2_DISABLE_ASPM_L0S (1 << 7) +#define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) +#define FLAG2_NO_DISABLE_RX (1 << 10) +#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_ACCESS_SHARED_RESOURCE, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +extern void e1000e_check_options(struct e1000_adapter *adapter); +extern void e1000e_set_ethtool_ops(struct net_device *netdev); + +extern int e1000e_up(struct e1000_adapter *adapter); +extern void e1000e_down(struct e1000_adapter *adapter); +extern void e1000e_reinit_locked(struct e1000_adapter *adapter); +extern void e1000e_reset(struct e1000_adapter *adapter); +extern void e1000e_power_up_phy(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); +extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); +extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 + *stats); +extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_get_hw_control(struct e1000_adapter *adapter); +extern void e1000e_release_hw_control(struct e1000_adapter *adapter); + +extern unsigned int copybreak; + +extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); + +extern const struct e1000_info e1000_82571_info; +extern const struct e1000_info e1000_82572_info; +extern const struct e1000_info e1000_82573_info; +extern const struct e1000_info e1000_82574_info; +extern const struct e1000_info e1000_82583_info; +extern const struct e1000_info e1000_ich8_info; +extern const struct e1000_info e1000_ich9_info; +extern const struct e1000_info e1000_ich10_info; +extern const struct e1000_info e1000_pch_info; +extern const struct e1000_info e1000_pch2_info; +extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_es2_info; + +extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); + +extern s32 e1000e_commit_phy(struct e1000_hw *hw); + +extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); + +extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_setup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_led_on_generic(struct e1000_hw *hw); +extern s32 e1000e_led_off_generic(struct e1000_hw *hw); +extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +extern void e1000_set_lan_id_single_port(struct e1000_hw *hw); +extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +extern s32 e1000e_id_led_init(struct e1000_hw *hw); +extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +extern s32 e1000e_setup_link(struct e1000_hw *hw); +extern void e1000_clear_vfta_generic(struct e1000_hw *hw); +extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count); +extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +extern void e1000e_config_collision_dist(struct e1000_hw *hw); +extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); +extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); +extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); +extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +extern void e1000e_reset_adaptive(struct e1000_hw *hw); +extern void e1000e_update_adaptive(struct e1000_hw *hw); + +extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); +extern s32 e1000e_get_phy_id(struct e1000_hw *hw); +extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); +extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, + u16 *phy_reg); +extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, + u16 *phy_reg); +extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +extern void e1000_power_up_phy_copper(struct e1000_hw *hw); +extern void e1000_power_down_phy_copper(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_check_downshift(struct e1000_hw *hw); +extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, + u16 *data); +extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, + u16 data); +extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); +extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +extern s32 e1000_check_polarity_m88(struct e1000_hw *hw); +extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); +extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); +extern bool e1000_check_phy_82574(struct e1000_hw *hw); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset(hw); +} + +static inline s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + return hw->phy.ops.check_reset_block(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg(hw, offset, data); +} + +static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg_locked(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg(hw, offset, data); +} + +static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg_locked(hw, offset, data); +} + +static inline s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + return hw->phy.ops.get_cable_length(hw); +} + +extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); +extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +extern void e1000e_release_nvm(struct e1000_hw *hw); +extern void e1000e_reload_nvm(struct e1000_hw *hw); +extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); + +static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.read(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.write(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_info(hw); +} + +static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) +{ + return hw->mac.ops.check_mng_mode(hw); +} + +extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +#endif /* _E1000_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h new file mode 100644 index 0000000..247f79e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h @@ -0,0 +1,997 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include <linux/types.h> + +struct e1000_hw; +struct e1000_adapter; + +#include "defines.h" + +#define er32(reg) __er32(hw, E1000_##reg) +#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +enum e1e_registers { + E1000_CTRL = 0x00000, /* Device Control - RW */ + E1000_STATUS = 0x00008, /* Device Status - RO */ + E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ + E1000_EERD = 0x00014, /* EEPROM Read - RW */ + E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ + E1000_FLA = 0x0001C, /* Flash Access - RW */ + E1000_MDIC = 0x00020, /* MDI Control - RW */ + E1000_SCTL = 0x00024, /* SerDes Control - RW */ + E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ + E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ + E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ + E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ + E1000_FCT = 0x00030, /* Flow Control Type - RW */ + E1000_VET = 0x00038, /* VLAN Ether Type - RW */ + E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ + E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ + E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ + E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ + E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ + E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */ + E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ + E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */ + E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */ +#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2)) + E1000_RCTL = 0x00100, /* Rx Control - RW */ + E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ + E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ + E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ + E1000_TCTL = 0x00400, /* Tx Control - RW */ + E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ + E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ + E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ + E1000_LEDCTL = 0x00E00, /* LED Control - RW */ + E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ + E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ + E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ + E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ + E1000_PBS = 0x01008, /* Packet Buffer Size */ + E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ + E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ + E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ + E1000_PBA_ECC = 0x01100, /* PBA ECC Register */ + E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ + E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ + E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ + E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ + E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ + E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ + E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ + E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ + E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ + E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ + E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ +#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) + E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + * + */ +#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) + E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ + E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ + E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ + E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ + E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ + E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ + E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ + E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ +#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) + E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ + E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */ +#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8)) + E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ + E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ + E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ + E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ + E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ + E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ + E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ + E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ + E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ + E1000_COLC = 0x04028, /* Collision Count - R/clr */ + E1000_DC = 0x04030, /* Defer Count - R/clr */ + E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ + E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ + E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ + E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ + E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ + E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ + E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ + E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ + E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ + E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ + E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ + E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ + E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ + E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ + E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ + E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ + E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ + E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ + E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ + E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ + E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ + E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ + E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ + E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ + E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ + E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ + E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ + E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ + E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ + E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ + E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ + E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ + E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ + E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ + E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ + E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ + E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ + E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ + E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ + E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ + E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ + E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ + E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ + E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ + E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ + E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ + E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ + E1000_IAC = 0x04100, /* Interrupt Assertion Count */ + E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ + E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ + E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ + E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ + E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ + E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ + E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ + E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ + E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ + E1000_RFCTL = 0x05008, /* Receive Filter Control */ + E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ + E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */ +#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8)) +#define E1000_RA (E1000_RAL(0)) + E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ +#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) + E1000_SHRAL_PCH_LPT_BASE = 0x05408, +#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8)) + E1000_SHRAH_PCH_LTP_BASE = 0x0540C, +#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8)) + E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ + E1000_WUC = 0x05800, /* Wakeup Control - RW */ + E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ + E1000_WUS = 0x05810, /* Wakeup Status - RO */ + E1000_MANC = 0x05820, /* Management Control - RW */ + E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ + E1000_HOST_IF = 0x08800, /* Host Interface */ + + E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ + E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ + E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */ +#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4)) + E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ + E1000_GCR = 0x05B00, /* PCI-Ex Control */ + E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ + E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ + E1000_SWSM = 0x05B50, /* SW Semaphore */ + E1000_FWSM = 0x05B54, /* FW Semaphore */ + E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ + E1000_FFLT_DBG = 0x05F04, /* Debug Register */ + E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ +#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) +#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE + E1000_HICR = 0x08F00, /* Host Interface Control */ +}; + +#define E1000_MAX_PHY_ADDR 4 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE 769 +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +/* manage.c */ +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +/* nvm.c */ +#define E1000_STM_OPCODE 0xDB00 + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C + +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 + +#define E1000_REVISION_4 4 + +#define E1000_FUNC_1 1 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, + e1000_pch_lpt, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, + e1000_phy_i217, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity{ + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +/* Function pointers and static data for the MAC. */ +struct e1000_mac_operations { + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); +}; + +/* + * When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c new file mode 100644 index 0000000..8bdcf3d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c @@ -0,0 +1,4446 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + */ + +#include "e1000.h" + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* EMI Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ + +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_MASK 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_MASK 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_MASK 0x0010 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone :1; /* bit 0 Flash Cycle Done */ + u16 flcerr :1; /* bit 1 Flash Cycle Error */ + u16 dael :1; /* bit 2 Direct Access error Log */ + u16 berasesz :2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog :1; /* bit 5 flash cycle in Progress */ + u16 reserved1 :2; /* bit 13:6 Reserved */ + u16 reserved2 :6; /* bit 13:6 Reserved */ + u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo :1; /* 0 Flash Cycle Go */ + u16 flcycle :2; /* 2:1 Flash Cycle */ + u16 reserved :5; /* 7:3 Reserved */ + u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ + u16 flockdn :6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra :8; /* 0:7 GbE region Read Access */ + u32 grwa :8; /* 8:15 GbE region Write Access */ + u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_led_on_pchlan(struct e1000_hw *hw); +static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) + +static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; + ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, ctrl); + e1e_flush(); + udelay(10); + ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, ctrl); +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 fwsm; + s32 ret_val = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* + * The MAC-PHY interconnect may still be in SMBus mode + * after Sx->S0. If the manageability engine (ME) is + * disabled, then toggle the LANPHYPC Value bit to force + * the interconnect to PCIe mode. + */ + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { + e1000_toggle_lanphypc_value_ich8lan(hw); + msleep(50); + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if (hw->mac.type == e1000_pch2lan) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + + /* + * Reset the PHY before any access to it. Doing so, ensures that + * the PHY is in a known good state before we read/write PHY registers. + * The generic reset is sufficient here, because we haven't determined + * the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + phy->id = e1000_phy_unknown; + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + goto out; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + fallthrough; + case e1000_pch2lan: + case e1000_pch_lpt: + /* + * In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + goto out; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + goto out; + break; + } + phy->type = e1000e_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000e_phy_sw_reset; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000e_get_cable_length_m88; + phy->ops.get_info = e1000e_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* + * We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + e_dbg("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + usleep_range(1000, 2000); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; + phy->ops.get_info = e1000e_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.commit = e1000e_phy_sw_reset; + phy->ops.get_info = e1000e_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + + /* Can't read flash registers if the register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* + * sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* + * find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = (sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* LED operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000e_id_led_init; + /* blink LED */ + mac->ops.blink_led = e1000e_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000e_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pch_lpt: + case e1000_pchlan: + case e1000_pch2lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + if (mac->type == e1000_pch_lpt) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + /* Gate automatic PHY configuration by hardware on managed + * 82579 and i217 + */ + if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + return 0; +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. The bits in + * the LPI Control register will remain set only if/when link is up. + **/ +static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val = 0; + u16 phy_reg; + + if ((hw->phy.type != e1000_phy_82579) && + (hw->phy.type != e1000_phy_i217)) + return ret_val; + + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + return ret_val; + + if (dev_spec->eee_disable) + phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; + else + phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; + + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + + if (ret_val) + return ret_val; + + if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I217_EEE_LP_ABILITY); + if (ret_val) + goto release; + e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability); + + /* EEE is not supported in 100Half, so ignore partner's EEE + * in 100 ability if full-duplex is not advertised. + */ + e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg); + if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS)) + dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED; +release: + hw->phy.ops.release(hw); + } + + return 0; +} + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + u16 phy_reg; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + goto out; + } + + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + switch (hw->mac.type) { + case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + goto out; + fallthrough; + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + goto out; + } + + /* + * Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; + } + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* Enable/Disable EEE after link up */ + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + goto out; + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } + if (rc) + return rc; + + /* + * Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; + + hw->mac.ops.blink_led = NULL; + } + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type != e1000_phy_ife)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + /* Enable workaround for 82579 w/ ME enabled */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; + + /* Disable EEE by default until IEEE802.3az spec is finalized */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->hw.dev_spec.ich8lan.eee_disable = true; + + return 0; +} + +static DEFINE_MUTEX(nvm_mutex); + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_unlock(&nvm_mutex); +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + + if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, + &hw->adapter->state)) { + WARN(1, "e1000e: %s: contention for Phy access\n", + hw->adapter->netdev->name); + return -E1000_ERR_PHY; + } + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("SW has already locked the resource.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore, FW or HW has it: " + "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + er32(FWSM), extcnf_ctrl); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } else { + e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + /* The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + ew32(SHRAL_PCH_LPT(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH_PCH_LPT(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && + (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) + return; + } + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; + s32 ret_val = 0; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + e_dbg("Unsupported SMB frequency in PHY\n"); + } + } + + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); + +out: + return ret_val; +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = 0; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + /* + * Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + fallthrough; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto out; + + /* + * Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto out; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto out; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { + /* + * HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto out; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto out; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto out; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, + reg_data); + if (ret_val) + goto out; + } + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + goto out; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK; + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val = 0; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto out; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + goto out; + + udelay(20); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); + udelay(20); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + udelay(20); + +out: + return ret_val; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if (hw->mac.type < e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto out; + } + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto out; + + mac_reg = er32(PHY_CTRL); + + ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + + /* Set Restart auto-neg to activate the bits */ + if (!e1000_check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + } else { + if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU)) + oem_reg |= HV_OEM_BITS_LPLU; + } + + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); + +out: + hw->phy.ops.release(hw); + + return ret_val; +} + + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_data; + + if (hw->mac.type != e1000_pchlan) + return ret_val; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + goto out; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* + * Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000e_phy_sw_reset(hw); + ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + goto out; + + /* + * Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + goto out; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, + phy_data & 0x00FF); +release: + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + mac_reg = er32(RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = er32(RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type < e1000_pch2lan) + goto out; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + goto out; + + if (enable) { + /* + * Write Rx addresses (rar_entry_count for RAL/H, +4 for + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + u8 mac_addr[ETH_ALEN] = {0}; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x1A << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + goto out; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + goto out; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + goto out; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + goto out; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + goto out; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + goto out; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + goto out; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + goto out; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + goto out; + } + + /* re-enable Rx path after enabling/disabling workaround */ + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); + +out: + return ret_val; +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type < e1000_pch2lan) + goto out; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + +out: + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + u32 mac_reg; + u16 phy_reg; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto out; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + goto out; + + if (status_reg & HV_M_STATUS_SPEED_1000) { + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + } else { + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + } + ew32(FEXTNVM4, mac_reg); + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + } + +out: + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type != e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); + return; +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + udelay(100); + } while ((!data) && --loop); + + /* + * If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + e_dbg("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 reg; + + if (e1000_check_reset_block(hw)) + goto out; + + /* Allow time for h/w to get to quiescent state after reset */ + usleep_range(10000, 20000); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + goto out; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + e1e_rphy(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, + I82579_LPI_UPDATE_TIMER); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, + 0x1387); +release: + hw->phy.ops.release(hw); + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + return e1000_post_phy_reset_ich8lan(hw); +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val = 0; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type == e1000_phy_ife) + return ret_val; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return 0; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u8 sig_byte = 0; + s32 ret_val = 0; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + eecd = er32(EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return 0; + } + e_dbg("Unable to determine valid NVM bank via EEC - " + "reading flash signature\n"); + fallthrough; + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (hsfsts.hsf_status.fldesvalid == 0) { + e_dbg("Flash descriptor invalid. " + "SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* + * Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* + * There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + s32 i = 0; + + /* + * Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + ret_val = 0; + break; + } + udelay(1); + } + if (ret_val == 0) { + /* + * Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + e_dbg("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + return 0; + + return ret_val; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) + return ret_val; + + *data = (u8)word; + + return 0; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != 0) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* + * Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == 0) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* + * If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + e_dbg("Timeout error - flash cycle " + "did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = true; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + nvm->ops.release(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* + * We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + /* + * Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + + /* + * If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + udelay(100); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + udelay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* + * Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* + * Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* + * And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* + * Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + e1000e_reload_nvm(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + /* + * Read 0x19 and check bit 6. If this bit is 0, the checksum + * needs to be fixed. This bit is an indication that the NVM + * was prepared by OEM software and did not calculate the + * checksum...a likely scenario. + */ + ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + + if ((data & 0x40) == 0) { + data |= 0x40; + ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + + nvm->ops.acquire(hw); + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* + * Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + nvm->ops.release(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* + * check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* + * If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* Repeat for some time before giving up. */ + continue; + if (hsfsts.hsf_status.flcdone == 0) { + e_dbg("Timeout error - flash cycle " + "did not complete."); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); + udelay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* + * Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration ; j++) { + do { + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* + * Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* + * Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_ERASE_COMMAND_TIMEOUT); + if (ret_val == 0) + break; + + /* + * Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* repeat for some time before giving up */ + continue; + else if (hsfsts.hsf_status.flcdone == 0) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* + * ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 reg; + u32 ctrl, kab; + s32 ret_val; + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* + * Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + if (ret_val) + return ret_val; + + if (reg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = er32(CTRL); + + if (!e1000_check_reset_block(hw)) { + /* + * Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + e_dbg("Issuing a global reset to ich8lan\n"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msleep(20); + + if (!ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + goto out; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + goto out; + } + + /* + * For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + + ew32(IMC, 0xffffffff); + er32(ICR); + + kab = er32(KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, kab); + +out: + return ret_val; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) + e_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* + * The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + e1e_rphy(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link_ich8lan(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL(0), txdctl); + txdctl = er32(TXDCTL(1)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL(1), txdctl); + + /* + * ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return 0; +} +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } + + /* + * work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + ew32(RFCTL, reg); +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (e1000_check_reset_block(hw)) + return 0; + + /* + * ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + /* Workaround h/w hang when Tx flow control enabled */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = e1000_setup_copper_link_ich8lan(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ew32(FCRTV_PCH, hw->fc.refresh_time); + + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* + * Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* + * Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + e_dbg("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* + * Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation + * to a lower speed. For PCH and newer parts, the OEM bits PHY register + * (LED, GbE disable and LPLU configurations) also needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; + + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, + I217_EEE_ADVERTISEMENT); + if (ret_val) + goto release; + e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert); + + /* Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link. + */ + if ((eee_advert & I217_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I217_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + } + + /* For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + + /* Enable proxy to reset only on power good. */ + e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); + + /* Set bit enable LPI (EEE) to reset only on + * power good. + */ + e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_MASK; + e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + } + + /* Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_MASK; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: + ew32(PHY_CTRL, phy_ctrl); + + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + e1000_phy_hw_reset_ich8lan(hw); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + **/ +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + if (hw->mac.type != e1000_pch2lan) + return; + + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { + u16 phy_id1, phy_id2; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to acquire PHY semaphore in resume\n"); + return; + } + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to setup iRST\n"); + return; + } + + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Restore clear on SMB if no manageability engine + * is present + */ + ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + if (ret_val) + goto _release; + phy_reg |= I217_MEMPWR_MASK; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + if (ret_val) + goto _release; + phy_reg &= ~I217_CGFREG_MASK; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + _release: + if (ret_val) + e_dbg("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + } + + /* Test access to the PHY registers by reading the ID regs */ + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); + if (ret_val) + goto release; + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); + if (ret_val) + goto release; + + if (hw->phy.id == ((u32)(phy_id1 << 16) | + (u32)(phy_id2 & PHY_REVISION_MASK))) + goto release; + + e1000_toggle_lanphypc_value_ich8lan(hw); + + hw->phy.ops.release(hw); + msleep(50); + e1000_phy_hw_reset(hw); + msleep(50); + return; + } + +release: + hw->phy.ops.release(hw); + + return; +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + /* + * If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + /* + * If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 bank = 0; + u32 status; + + e1000e_get_cfg_done(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + e_dbg("Auto Read Done did not complete\n"); + ret_val = 0; + } + } + + /* Clear PHY Reset Asserted bit */ + status = er32(STATUS); + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + e_dbg("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (((er32(EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000e_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + e_dbg("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + e1000e_clear_hw_cntrs_base(hw); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +static const struct e1000_mac_operations ich8_mac_ops = { + .id_led_init = e1000e_id_led_init, + /* check_mng_mode dependent on mac type */ + .check_for_link = e1000_check_for_copper_link_ich8lan, + /* cleanup_led dependent on mac type */ + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .set_lan_id = e1000_set_lan_id_single_port, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + /* led_on dependent on mac type */ + /* led_off dependent on mac type */ + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface= e1000_setup_copper_link_ich8lan, + /* id_led_init dependent on mac type */ +}; + +static const struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, + .get_cfg_done = e1000_get_cfg_done_ich8lan, + .get_cable_length = e1000e_get_cable_length_igp_2, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_release_swflag_ich8lan, + .reset = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_reg = e1000e_write_phy_reg_igp, +}; + +static const struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .update = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +const struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_IS_ICH + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich10_info = { + .mac = e1000_ich10lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_info = { + .mac = e1000_pchlan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, + .pba = 26, + .max_hw_frame_size = 4096, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_lpt_info = { + .mac = e1000_pch_lpt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c new file mode 100644 index 0000000..fbdccdc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c @@ -0,0 +1,2693 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u16 pcie_link_status, cap_offset; + + cap_offset = pci_pcie_cap(adapter->pdev); + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* + * The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = er32(STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000e_rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + e_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + e1000e_rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + goto out; + + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ + if (!((nvm_data & NVM_COMPAT_LOM) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES))) + goto out; + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* + * We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + e1000e_rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * e1000e_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* + * Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + e1e_flush(); +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return ret_val; /* No link detected */ + + mac->get_link_status = false; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + return 0; + + /* + * If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc.pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* + * If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + usleep_range(10000, 20000); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000e_config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + usleep_range(1000, 2000); + + /* + * For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + e_dbg("No signal detected\n"); + } + + return 0; +} + +/** + * e1000e_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000e_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + fcrtl |= E1000_FCRTL_XONE; + fcrth = hw->fc.high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + e_dbg("Copper PHY and Auto Neg " + "has not completed.\n"); + return ret_val; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = + e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\r\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = " + "Rx PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); + } else { + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + e_dbg("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000e_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + if (hw->mac.ops.setup_led != e1000e_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->mac.ledctl_mode1); + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000e_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + e_dbg("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + goto out; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + ew32(AIT, 0); +out: + return; +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + goto out; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + ew32(AIT, 0); + } + } +out: + return; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + usleep_range(10000, 20000); + nvm->ops.release(hw); + return 0; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + goto out; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!(hw->mac.arc_subsystem_valid)) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if ((hicr & E1000_HICR_EN) == 0) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!e1000e_check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + goto out; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { + hw->mac.tx_pkt_filtering = false; + goto out; + } + +out: + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + + factps = er32(FACTPS); + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) { + ret_val = true; + goto out; + } + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + +out: + return ret_val; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c new file mode 100644 index 0000000..20073aa --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c @@ -0,0 +1,4423 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> +#include <linux/slab.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/cpu.h> +#include <linux/smp.h> +#include <linux/version.h> +#include <linux/pm_qos.h> +#include <linux/pm_runtime.h> +#include <linux/aer.h> +#include <linux/prefetch.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#include <linux/pci-aspm.h> +#endif + +#include "e1000.h" + +#define RT_E1000E_NUM_RXD 64 + +#define DRV_EXTRAVERSION "-k-rt" + +#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION +char e1000e_driver_name[] = "rt_e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, + [board_82583] = &e1000_82583_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, + [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, + [board_pch_lpt] = &e1000_pch_lpt_info, +}; + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ + +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* Rx Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN, "RDLEN"}, + {E1000_RDH, "RDH"}, + {E1000_RDT, "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL, "RDBAL"}, + {E1000_RDBAH, "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* Tx Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL, "TDBAL"}, + {E1000_TDBAH, "TDBAH"}, + {E1000_TDLEN, "TDLEN"}, + {E1000_TDH, "TDH"}, + {E1000_TDT, "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* + * e1000_regdump - register printout routine + */ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + printk(KERN_INFO "%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + printk(KERN_INFO "%-15s ", rname); + for (n = 0; n < 2; n++) + printk(KERN_CONT "%08x ", regs[n]); + printk(KERN_CONT "\n"); +} + +/* + * e1000e_dump - Print registers, Tx-ring and Rx-ring + */ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { + u64 a; + u64 b; + } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + union e1000_rx_desc_extended *rx_desc; + struct my_u1 { + u64 a; + u64 b; + u64 c; + u64 d; + } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + printk(KERN_INFO "Device Name state " + "trans_start last_rx\n"); + printk(KERN_INFO "%-15s\n", netdev->name); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + printk(KERN_INFO " Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print Tx Ring Summary */ + if (!netdev || !rtnetif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" + " leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); + + /* Print Tx Ring */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Legacy format\n"); + printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Context format\n"); + printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" + " [bi->dma ] leng ntw timestamp bi->skb " + "<-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " + "%04X %3X %016llX %p", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb); + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC/U\n"); + else if (i == tx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == tx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + + if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, phys_to_virt(buffer_info->dma), + buffer_info->length, true); + } + + /* Print Rx Ring Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); + printk(KERN_INFO "Queue [NTU] [NTC]\n"); + printk(KERN_INFO " %5d %5X %5X\n", 0, + rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print Rx Ring */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " + "[buffer 1 63:0 ] " + "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " + "[vl l0 ee es] " + "[ l3 l2 l1 hs] [reserved ] ---------------- " + "[bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX %016llX %016llX " + "---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + phys_to_virt(buffer_info->dma), + adapter->rx_ps_bsize0, true); + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + } + break; + default: + case 0: + /* Extended Receive Descriptor (Read) Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * 8 | Reserved | + * +-----------------------------------------------------+ + */ + printk(KERN_INFO "R [desc] [buf addr 63:0 ] " + "[reserved 63:0 ] [bi->dma ] " + "[bi->skb] <-- Ext (Read) format\n"); + /* Extended Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 24 23 4 3 0 + * +------------------------------------------------------+ + * | RSS Hash | | | | + * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | + * | Packet | IP | | | Type | + * | Checksum | Ident | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + printk(KERN_INFO "RWB[desc] [cs ipid mrq] " + "[vt ln xe xs] " + "[bi->skb] <-- Ext (Write-Back) format\n"); + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + printk(KERN_INFO "RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb); + } else { + printk(KERN_INFO "R [0x%03X] %016llX " + "%016llX %016llX %p", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb); + + if (netif_msg_pktdata(adapter)) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, + 1, + phys_to_virt + (buffer_info->dma), + adapter->rx_buffer_len, + true); + } + + if (i == rx_ring->next_to_use) + printk(KERN_CONT " NTU\n"); + else if (i == rx_ring->next_to_clean) + printk(KERN_CONT " NTC\n"); + else + printk(KERN_CONT "\n"); + } + } + +exit: + return; +} + +void e1000e_mod_watchdog_timer(rtdm_nrtsig_t *nrt_sig, void *data) +{ + struct timer_list *timer = data; + + mod_timer(timer, jiffies + 1); +} + +void e1000e_trigger_downshift(rtdm_nrtsig_t *nrt_sig, void *data) +{ + struct work_struct *downshift_task = data; + + schedule_work(downshift_task); +} + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct rtskb *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + /* TCP/UDP checksum error bit is set */ + if (errors & E1000_RXD_ERR_TCPE) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status & E1000_RXD_STAT_TCPCS) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + /* + * IP fragment with UDP payload + * Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + __sum16 sum = (__force __sum16)htons(csum); + skb->csum = csum_unfold(~sum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + adapter->hw_csum_good++; +} + +/** + * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() + * @hw: pointer to the HW structure + * @tail: address of tail descriptor register + * @i: value to write to tail descriptor register + * + * When updating the tail register, the ME could be accessing Host CSR + * registers at the same time. Normally, this is handled in h/w by an + * arbiter but on some parts there is a bug that acknowledges Host accesses + * later than it should which could result in the descriptor register to + * have an incorrect value. Workaround this by checking the FWSM register + * which has bit 24 set while ME is accessing Host CSR registers, wait + * if it is set and try again a number of times. + **/ +static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, + unsigned int i) +{ + unsigned int j = 0; + + while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && + (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) + udelay(50); + + writel(i, tail); + + if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) + return E1000_ERR_SWFW_SYNC; + + return 0; +} + +static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) +{ + u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); + struct e1000_hw *hw = &adapter->hw; + + if (e1000e_update_tail_wa(hw, tail, i)) { + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e_err("ME firmware caused invalid RDT - resetting\n"); + rtdm_schedule_nrt_work(&adapter->reset_task); + } +} + +static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) +{ + u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); + struct e1000_hw *hw = &adapter->hw; + + if (e1000e_update_tail_wa(hw, tail, i)) { + u32 tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + e_err("ME firmware caused invalid TDT - resetting\n"); + rtdm_schedule_nrt_work(&adapter->reset_task); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count, gfp_t gfp) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct rtskb *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + rtskb_trim(skb, 0); + goto map_skb; + } + + skb = rtnetdev_alloc_rtskb(adapter->netdev, bufsz); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + rtskb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = rtskb_data_dma_addr(skb, 0); + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(adapter, i); + else + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + nanosecs_abs_t *time_stamp) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool data_received = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct rtskb *skb; + + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned_count++; + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* + * !EOP means multiple descriptors were used to store a single + * packet, if that's the case we need to toss it. In fact, we + * need to toss every packet with the EOP bit clear and the + * next frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) + length -= 4; + + total_rx_bytes += length; + total_rx_packets++; + + rtskb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, + le16_to_cpu(rx_desc->wb.lower.hi_dword. + csum_ip.csum), skb); + + skb->protocol = rt_eth_type_trans(skb, netdev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + data_received = true; + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return data_received; +} + +static void e1000_put_txbuf(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + buffer_info->dma = 0; + if (buffer_info->skb) { + kfree_rtskb(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for (; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + } + + e1000_put_txbuf(adapter, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + if (i == tx_ring->next_to_use) + break; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (count && rtnetif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (rtnetif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + rtnetif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* + * Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = 0; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + rtnetif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->dma = 0; + + if (buffer_info->skb) { + kfree_rtskb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + kfree_rtskb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, downshift_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static int e1000_intr_msi(rtdm_irq_t *irq_handle) +{ + struct e1000_adapter *adapter = + rtdm_irq_get_arg(irq_handle, struct e1000_adapter); + struct e1000_hw *hw = &adapter->hw; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + u32 icr = er32(ICR); + + /* + * read ICR disables interrupts using IAM + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* + * ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + rtdm_schedule_nrt_work(&adapter->downshift_task); + + /* + * 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog + */ + if (rtnetif_carrier_ok(adapter->netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + rtdm_nrtsig_pend(&adapter->mod_timer_sig); + } + + if (!e1000_clean_tx_irq(adapter)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, adapter->tx_ring->ims_val); + + if (e1000_clean_rx_irq(adapter, &time_stamp)) + rt_mark_stack_mgr(adapter->netdev); + + return RTDM_IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static int e1000_intr(rtdm_irq_t *irq_handle) +{ + struct e1000_adapter *adapter = + rtdm_irq_get_arg(irq_handle, struct e1000_adapter); + struct e1000_hw *hw = &adapter->hw; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + u32 rctl, icr = er32(ICR); + + if (!icr || test_bit(__E1000_DOWN, &adapter->state)) + return RTDM_IRQ_NONE; /* Not our interrupt */ + + /* + * IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return RTDM_IRQ_NONE; + + /* + * Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* + * ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + rtdm_nrtsig_pend(&adapter->downshift_sig); + + /* + * 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (rtnetif_carrier_ok(adapter->netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + rtdm_nrtsig_pend(&adapter->mod_timer_sig); + } + + if (!e1000_clean_tx_irq(adapter)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, adapter->tx_ring->ims_val); + + if (e1000_clean_rx_irq(adapter, &time_stamp)) + rt_mark_stack_mgr(adapter->netdev); + + return RTDM_IRQ_HANDLED; +} + +static irqreturn_t e1000_msix_other(int irq, void *data) +{ + struct rtnet_device *netdev = data; + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + + +static int e1000_intr_msix_tx(rtdm_irq_t *irq_handle) +{ + struct e1000_adapter *adapter = + rtdm_irq_get_arg(irq_handle, struct e1000_adapter); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(adapter)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return RTDM_IRQ_HANDLED; +} + +static int e1000_intr_msix_rx(rtdm_irq_t *irq_handle) +{ + struct e1000_adapter *adapter = + rtdm_irq_get_arg(irq_handle, struct e1000_adapter); + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(1000000000 / (adapter->rx_ring->itr_val * 256), + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (e1000_clean_rx_irq(adapter, &time_stamp)) + rt_mark_stack_mgr(adapter->netdev); + + return RTDM_IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + rx_ring->itr_register); + else + writel(1, hw->hw_addr + rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + hw->hw_addr + tx_ring->itr_register); + else + writel(1, hw->hw_addr + tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ +#define E1000_EIAC_MASK_82574 0x01F00000 + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int i; + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < adapter->num_vectors; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + adapter->num_vectors, + adapter->num_vectors); + if (err == 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. " + "Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + fallthrough; + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling " + "back to legacy interrupts.\n"); + } + fallthrough; + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%s-rx-0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = rtdm_irq_request(&adapter->rx_irq_handle, + adapter->msix_entries[vector].vector, + e1000_intr_msix_rx, 0, adapter->rx_ring->name, + adapter); + if (err) + goto out; + adapter->rx_ring->itr_register = E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%s-tx-0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = rtdm_irq_request(&adapter->tx_irq_handle, + adapter->msix_entries[vector].vector, + e1000_intr_msix_tx, 0, adapter->tx_ring->name, + adapter); + if (err) + goto out; + adapter->tx_ring->itr_register = E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + e1000_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + e1000_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * e1000_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + int err; + + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); + } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = rtdm_irq_request(&adapter->irq_handle, + adapter->pdev->irq, e1000_intr_msi, + 0, netdev->name, adapter); + if (!err) + return err; + + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + } + + err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq, + e1000_intr, 0, netdev->name, adapter); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + + if (adapter->msix_entries) { + int vector = 0; + + rtdm_irq_disable(&adapter->rx_irq_handle); + rtdm_irq_free(&adapter->rx_irq_handle); + vector++; + + rtdm_irq_disable(&adapter->tx_irq_handle); + rtdm_irq_free(&adapter->tx_irq_handle); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; + } + + if (adapter->flags & FLAG_MSI_ENABLED) + rtdm_irq_disable(&adapter->irq_handle); + rtdm_irq_free(&adapter->irq_handle); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); + e1e_flush(); + + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } + e1e_flush(); +} + +/** + * e1000e_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +void e1000e_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000e_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +void e1000e_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * @e1000_alloc_ring - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + int size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; + +err: + vfree(rx_ring->buffer_info); + e_err("Unable to allocate memory for the receive descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + + e1000_clean_tx_ring(adapter); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void e1000e_free_rx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + int i; + + e1000_clean_rx_ring(adapter); + + for (i = 0; i < rx_ring->count; i++) + kfree(rx_ring->buffer_info[i].ps_pages); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + + rtdm_lock_init(&adapter->tx_ring->lock); + + adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +static void e1000_vlan_rx_add_vid(struct rtnet_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return; + + /* add VID to filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + set_bit(vid, adapter->active_vlans); +} + +static void e1000_vlan_rx_kill_vid(struct rtnet_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000e_release_hw_control(adapter); + return; + } + + /* remove VID from filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + clear_bit(vid, adapter->active_vlans); +} + +/** + * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); + ew32(RCTL, rctl); + + if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } +} + +/** + * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + } +} + +/** + * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +/** + * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) + e1000_vlan_rx_kill_vid(netdev, old_vid); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_add_vid(adapter->netdev, 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, vid); +} + +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h, mdef, i, j; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* + * enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS + */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* + * Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tipg, tarc; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH, (tdba >> 32)); + ew32(TDLEN, tdlen); + ew32(TDH, 0); + ew32(TDT, 0); + tx_ring->head = E1000_TDH; + tx_ring->tail = E1000_TDT; + + /* Set the default values for the Tx Inter Packet Gap timer */ + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ + ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ + ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ + + if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ + + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* Tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + u32 txdctl = er32(TXDCTL(0)); + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | + E1000_TXDCTL_WTHRESH); + /* + * set up some performance related parameters to encourage the + * hardware to use the bus more efficiently in bursts, depends + * on the tx_int_delay to be enabled, + * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time + * hthresh = 1 ==> prefetch when one or more available + * pthresh = 0x1f ==> prefetch if internal cache 31 or less + * BEWARE: this seems to work but should be considered first if + * there are Tx hangs or other Tx related bugs + */ + txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; + ew32(TXDCTL(0), txdctl); + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), txdctl); + } + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC(0)); + /* + * set the speed mode bit, we'll clear it if we're not at + * gigabit link later + */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC(0), tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC(0)); + tarc |= 1; + ew32(TARC(0), tarc); + tarc = er32(TARC(1)); + tarc |= 1; + ew32(TARC(1), tarc); + } + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + e1000e_config_collision_dist(hw); +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + + /* Workaround Si errata on PCHx - configure jumbo frame flow */ + if (hw->mac.type >= e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable jumbo frame workaround mode\n"); + } + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Some systems expect that the CRC is included in SMBUS traffic. The + * hardware strips the CRC before sending to both SMBUS (BMC) and to + * host memory when this is enabled + */ + if (adapter->flags2 & FLAG2_CRC_STRIPPING) + rctl |= E1000_RCTL_SECRC; + + /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ + if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { + u16 phy_data; + + e1e_rphy(hw, PHY_REG(770, 26), &phy_data); + phy_data &= 0xfff8; + phy_data |= (1 << 2); + e1e_wphy(hw, PHY_REG(770, 26), phy_data); + + e1e_rphy(hw, 22, &phy_data); + phy_data &= 0x0fff; + phy_data |= (1 << 14); + e1e_wphy(hw, 0x10, 0x2823); + e1e_wphy(hw, 0x11, 0x0003); + e1e_wphy(hw, 22, phy_data); + } + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* Enable Extended Status in all Receive Descriptors */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + + adapter->rx_ps_pages = 0; + + ew32(RFCTL, rfctl); + ew32(RCTL, rctl); + /* just started the receive unit, no need to restart */ + adapter->flags &= ~FLAG_RX_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + /* + * set the writeback threshold (only takes effect if the RDTR + * is set). set GRAN=1 and write back up to 0x4 worth, and + * enable prefetching of 0x20 Rx descriptors + * granularity = 01 + * wthresh = 04, + * hthresh = 04, + * pthresh = 0x20 + */ + ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); + ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); + + /* + * override the delay timers for enabling bursting, only if + * the value was not set by the user via module options + */ + if (adapter->rx_int_delay == DEFAULT_RDTR) + adapter->rx_int_delay = BURST_RDTR; + if (adapter->rx_abs_int_delay == DEFAULT_RADV) + adapter->rx_abs_int_delay = BURST_RADV; + } + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if ((adapter->itr_setting != 0) && (adapter->itr != 0)) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH, (rdba >> 32)); + ew32(RDLEN, rdlen); + ew32(RDH, 0); + ew32(RDT, 0); + rx_ring->head = E1000_RDH; + rx_ring->tail = E1000_RDT; + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->netdev->features & NETIF_F_RXCSUM) { + rxcsum |= E1000_RXCSUM_TUOFL; + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* no need to clear IPPCSE as it defaults to 0 */ + } + ew32(RXCSUM, rxcsum); + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); +} + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_multi(struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + /* Do not hardware filter VLANs in promisc mode */ + e1000e_vlan_filter_disable(adapter); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + e1000e_vlan_filter_enable(adapter); + } + + ew32(RCTL, rctl); + + e1000_update_mc_addr_list(hw, NULL, 0); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + e1000e_vlan_strip_enable(adapter); + else + e1000e_vlan_strip_disable(adapter); +} + +/** + * e1000_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + e1000_set_multi(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability_pt(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), + GFP_KERNEL); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_up) + adapter->hw.phy.ops.power_up(&adapter->hw); + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down. + * The PHY cannot be powered down if management or WoL is active. + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + /* WoL is enabled */ + if (adapter->wol) + return; + + if (adapter->hw.phy.ops.power_down) + adapter->hw.phy.ops.power_down(&adapter->hw); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u32 pba = adapter->pba; + u16 hwm; + + /* reset Packet Buffer Allocation to default */ + ew32(PBA, pba); + + if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* + * To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* + * the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* + * If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if ((tx_space < min_tx_space) && + ((min_tx_space - tx_space) < pba)) { + pba -= min_tx_space - tx_space; + + /* + * if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if ((pba < min_rx_space) && + (!(adapter->flags & FLAG_HAS_ERT))) + /* ERT enabled in e1000_configure_rx */ + pba = min_rx_space; + } + + ew32(PBA, pba); + } + + /* + * flow control settings + * + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + default: + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: + /* + * Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + fc->refresh_time = 0x1000; + break; + case e1000_pch2lan: + case e1000_pch_lpt: + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + fc->refresh_time = 0x0400; + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + } + break; + } + + /* + * Disable Adaptive Interrupt Moderation if 2 full packets cannot + * fit in receive buffer and early-receive not supported. + */ + if (adapter->itr_setting & 0x3) { + if (((adapter->max_frame_size * 2) > (pba << 10)) && + !(adapter->flags & FLAG_HAS_ERT)) { + if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate turned off\n"); + adapter->flags2 |= FLAG2_DISABLE_AIM; + ew32(ITR, 0); + } + } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate turned on\n"); + adapter->flags2 &= ~FLAG2_DISABLE_AIM; + adapter->itr = 20000; + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + } + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + + /* + * For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + e_err("Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + + if (!rtnetif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) { + e1000_power_down_phy(adapter); + return; + } + + e1000_get_phy_info(hw); + + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && + !(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* + * speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed + */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + if (adapter->msix_entries) + e1000_configure_msix(adapter); + e1000_irq_enable(adapter); + + rtnetif_start_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; +} + +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + +void e1000e_down(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* + * signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__E1000_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + rtnetif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + usleep_range(10000, 20000); + + e1000_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + rtnetif_carrier_off(netdev); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter); + e1000_clean_rx_ring(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); + + /* + * TODO: for power management, we could drop the link and + * pci_disable_device here. + */ +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + e1000e_down(adapter); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); + + e1000e_set_interrupt_capability(adapter); + + if (e1000_alloc_queues(adapter)) + return -ENOMEM; + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int irq, void *data) +{ + struct rtnet_device *netdev = data; + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + e_dbg("icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags &= ~FLAG_MSI_TEST_FAILED; + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + er32(ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); + + /* Assume that the test fails, if it succeeds then the test + * MSI irq handler will unset this flag */ + adapter->flags |= FLAG_MSI_TEST_FAILED; + + err = pci_enable_msi(adapter->pdev); + if (err) + goto msi_test_failed; + + err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + ew32(ICS, E1000_ICS_RXSEQ); + e1e_flush(); + msleep(50); + + e1000_irq_disable(adapter); + + rmb(); + + if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_info("MSI interrupt test failed, using legacy interrupt.\n"); + } else + e_dbg("MSI interrupt test succeeded!\n"); + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + +msi_test_failed: + e1000e_set_interrupt_capability(adapter); + return e1000_request_irq(adapter); +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & FLAG_MSI_ENABLED)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + pm_runtime_get_sync(&pdev->dev); + + rtnetif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* + * If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000e_get_hw_control(adapter); + e1000e_reset(adapter); + } + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* + * before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + rt_stack_connect(netdev, &STACK_manager); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* + * Work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000e MSI messages, which means we need to test our MSI + * interrupt now + */ + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { + err = e1000_test_msi(adapter); + if (err) { + e_err("Interrupt allocation failed\n"); + goto err_req_irq; + } + } + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + e1000_irq_enable(adapter); + + rtnetif_start_queue(netdev); + + adapter->idle_check = true; + pm_runtime_put(&pdev->dev); + + /* fire a link status change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000e_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter); +err_setup_rx: + e1000e_free_tx_resources(adapter); +err_setup_tx: + e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000_power_down_phy(adapter); + + rt_stack_disconnect(netdev); + + e1000e_free_tx_resources(adapter); + e1000e_free_rx_resources(adapter); + + /* + * kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + + /* + * If AMT is enabled, let the firmware know that the network + * interface is now closed + */ + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000_get_phy_info(&adapter->hw); +} + +/* + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) +static void e1000_update_phy_info(struct timer_list *t) +{ + struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer); +#else /* < 4.14 */ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; +#endif /* < 4.14 */ + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + rtdm_schedule_nrt_work(&adapter->update_phy_task); +} + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + + if ((er32(STATUS) & E1000_STATUS_LU) && + (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); + ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); + ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); + ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa); + ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion); + ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); + ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); + if (ret_val) + e_warn("Error reading PHY register\n"); + } else { + /* + * Do not read PHY registers if link is not up + * Set values to typical power-on defaults + */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + /* Link status message must follow this format for user tools */ + printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + adapter->netdev->name, + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "Rx/Tx" : + ((ctrl & E1000_CTRL_RFCE) ? "Rx" : + ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); +} + +static bool e1000e_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = 0; + s32 ret_val = 0; + + /* + * get_link_status is set on LSC (link status) interrupt or + * Rx sequence error interrupt. get_link_status will stay + * false until the check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = 1; + } + break; + case e1000_media_type_fiber: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + e_info("Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000e_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = er32(RCTL); + ew32(RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~FLAG_RX_RESTART_NOW; + } +} + +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + rtdm_schedule_nrt_work(&adapter->reset_task); + } +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) +static void e1000_watchdog(struct timer_list *t) +{ + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); +#else /* < 4.14 */ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; +#endif /* < 4.14 */ + + /* Do the rest outside of interrupt context */ + rtdm_schedule_nrt_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, watchdog_task); + struct rtnet_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + link = e1000e_has_link(adapter); + if ((rtnetif_carrier_ok(netdev)) && link) { + e1000e_enable_receives(adapter); + goto link_up; + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if (link) { + if (!rtnetif_carrier_ok(netdev)) { + bool txb2b = 1; + + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + /* + * On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + (hw->mac.autoneg == true) && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp); + + if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS)) + e_info("Autonegotiated half duplex but" + " link partner cannot autoneg. " + " Try forcing full duplex if " + "link gets many collisions.\n"); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = 0; + adapter->tx_timeout_factor = 10; + break; + } + + /* + * workaround: re-program speed mode bit after + * link-up event + */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + tarc0 = er32(TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC(0), tarc0); + } + + /* + * disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* + * enable transmits in the hardware, need to do this + * after setting TARC(0) + */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* + * Perform any post-link-up configuration before + * reporting link up. + */ + if (phy->ops.cfg_on_link_up) + phy->ops.cfg_on_link_up(hw); + + rtnetif_carrier_on(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (rtnetif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + /* Link status message must follow this format */ + printk(KERN_INFO "e1000e: %s NIC Link is Down\n", + adapter->netdev->name); + rtnetif_carrier_off(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + rtdm_schedule_nrt_work(&adapter->reset_task); + } + } + +link_up: + spin_lock(&adapter->stats64_lock); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + spin_unlock(&adapter->stats64_lock); + + e1000e_update_adaptive(&adapter->hw); + + if (!rtnetif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + rtdm_schedule_nrt_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* + * Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); + + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* + * With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] + */ + if (e1000e_get_laa_state_82571(hw)) + e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +#define E1000_MAX_PER_TXD 8192 +#define E1000_MAX_TXD_PWR 12 + +static int e1000_tx_map(struct e1000_adapter *adapter, + struct rtskb *skb, unsigned int first) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned int offset = 0, size, i; + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + size = skb->len; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = rtskb_data_dma_addr(skb, offset); + buffer_info->mapped_as_page = false; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = 1; + tx_ring->buffer_info[i].bytecount = size; + tx_ring->buffer_info[first].next_to_watch = i; + + return 1; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + int tx_flags, int count) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } while (--count > 0); + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(adapter, i); + else + writel(i, adapter->hw.hw_addr + tx_ring->tail); + + /* + * we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct rtskb *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static int e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev) +{ + struct e1000_adapter *adapter = netdev->priv; + struct e1000_ring *tx_ring = adapter->tx_ring; + rtdm_lockctx_t context; + unsigned int first; + unsigned int tx_flags = 0; + int count = 0; + + if (test_bit(__E1000_DOWN, &adapter->state)) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + count++; + + count += skb->len; + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + rtdm_lock_get_irqsave(&tx_ring->lock, context); + + first = tx_ring->next_to_use; + + if (skb->xmit_stamp) + *skb->xmit_stamp = + cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + + /* if count is 0 then mapping error has occurred */ + count = e1000_tx_map(adapter, skb, first); + if (count) { + e1000_tx_queue(adapter, tx_flags, count); + rtdm_lock_put_irqrestore(&tx_ring->lock, context); + } else { + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + rtdm_lock_put_irqrestore(&tx_ring->lock, context); + kfree_rtskb(skb); + } + + return NETDEV_TX_OK; +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW))) { + e1000e_dump(adapter); + e_err("Reset adapter\n"); + } + e1000e_reinit_locked(adapter); +} + +static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, mac_reg; + u16 phy_reg, wuc_enable; + int retval = 0; + + /* copy MAC RARs to PHY RARs */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + retval = hw->phy.ops.acquire(hw); + if (retval) { + e_err("Could not acquire PHY\n"); + return retval; + } + + /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ + retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + goto out; + + /* copy MAC MTA to PHY MTA - only needed for pchlan */ + for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { + mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, + (u16)((mac_reg >> 16) & 0xFFFF)); + } + + /* configure PHY Rx Control register */ + hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); + mac_reg = er32(RCTL); + if (mac_reg & E1000_RCTL_UPE) + phy_reg |= BM_RCTL_UPE; + if (mac_reg & E1000_RCTL_MPE) + phy_reg |= BM_RCTL_MPE; + phy_reg &= ~(BM_RCTL_MO_MASK); + if (mac_reg & E1000_RCTL_MO_3) + phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + << BM_RCTL_MO_SHIFT); + if (mac_reg & E1000_RCTL_BAM) + phy_reg |= BM_RCTL_BAM; + if (mac_reg & E1000_RCTL_PMCF) + phy_reg |= BM_RCTL_PMCF; + mac_reg = er32(CTRL); + if (mac_reg & E1000_CTRL_RFCE) + phy_reg |= BM_RCTL_RFCE; + hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + + /* enable PHY wakeup in MAC register */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + + /* configure and enable PHY wakeup in PHY registers */ + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); + + /* activate PHY wakeup */ + wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + e_err("Could not set PHY Host Wakeup bit\n"); +out: + hw->phy.ops.release(hw); + + return retval; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + + rtnetif_device_detach(netdev); + + if (rtnetif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000e_reset_interrupt_capability(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_multi(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; + if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) + ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.phy.media_type == e1000_media_type_fiber || + adapter->hw.phy.media_type == + e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + if (adapter->flags & FLAG_IS_ICH) + e1000_suspend_workarounds_ich8lan(&adapter->hw); + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + /* enable wakeup by the PHY */ + retval = e1000_init_phy_wakeup(adapter, wufc); + if (retval) + return retval; + } else { + /* enable wakeup by the MAC */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PME_EN); + } + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if ((adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + *enable_wake = true; + + if (adapter->hw.phy.type == e1000_phy_igp_3) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + + /* + * The pci-e switch on some quad port adapters will report a + * correctable error when the MAC transitions from D0 to D3. To + * prevent this we need to mask off the correctable errors on the + * downstream port of the pci-e switch. + */ + if (adapter->flags & FLAG_IS_QUAD_PORT) { + struct pci_dev *us_dev = pdev->bus->self; + int pos = pci_pcie_cap(us_dev); + u16 devctl; + + pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl); + pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); + + e1000_power_off(pdev, sleep, wake); + + pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); + } else { + e1000_power_off(pdev, sleep, wake); + } +} + +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + int pos; + u16 reg16; + + /* + * Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pos = pci_pcie_cap(pdev); + pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16); + reg16 &= ~state; + pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); + + if (!pdev->bus->self) + return; + + pos = pci_pcie_cap(pdev->bus->self); + pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, ®16); + reg16 &= ~state; + pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); +} + +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", + (state & PCIE_LINK_STATE_L1) ? "L1" : ""); + + __e1000e_disable_aspm(pdev, state); +} + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake = false; + + __e1000_shutdown(pdev, &wake, false); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); +} + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + + rtnetif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (rtnetif_running(netdev)) + e1000e_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + int err; + pci_ers_result_t result; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pdev->state_saved = true; + pci_restore_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_aer_clear_nonfatal_status(pdev); + + return result; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + + e1000_init_manageability_pt(adapter); + + if (rtnetif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + rtnetif_device_attach(netdev); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct rtnet_device *netdev = adapter->netdev; + u32 ret_val; + u8 pba_str[E1000_PBANUM_LENGTH]; + + /* print bus type/speed/width info */ + e_info("(PCI Express:2.5GT/s:%s) %pM\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); + ret_val = e1000_read_pba_string_generic(hw, pba_str, + E1000_PBANUM_LENGTH); + if (ret_val) + strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); + e_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, pba_str); +} + +static void e1000_eeprom_checks(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { + /* Deep Smart Power Down (DSPD) */ + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); + } +} + +static dma_addr_t e1000_map_rtskb(struct rtnet_device *netdev, + struct rtskb *skb) +{ + struct e1000_adapter *adapter = netdev->priv; + struct device *dev = &adapter->pdev->dev; + dma_addr_t addr; + + addr = dma_map_single(dev, skb->buf_start, RTSKB_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, addr)) { + dev_err(dev, "DMA map failed\n"); + return RTSKB_UNMAPPED; + } + return addr; +} + +static void e1000_unmap_rtskb(struct rtnet_device *netdev, + struct rtskb *skb) +{ + struct e1000_adapter *adapter = netdev->priv; + struct device *dev = &adapter->pdev->dev; + + dma_unmap_single(dev, skb->buf_dma_addr, RTSKB_SIZE, + DMA_BIDIRECTIONAL); +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct rtnet_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; + + static int cards_found; + u16 aspm_disable_flag = 0; + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions_exclusive(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + e1000e_driver_name); + if (err) + goto err_pci_reg; + + /* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + /* PCI config space info */ + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = rt_alloc_etherdev(sizeof(*adapter), + 2 * RT_E1000E_NUM_RXD + 256); + if (!netdev) + goto err_alloc_etherdev; + + rtdev_alloc_name(netdev, "rteth%d"); + rt_rtdev_connect(netdev, &RTDEV_manager); + netdev->vers = RTDEV_VERS_2_0; + netdev->sysbind = &pdev->dev; + + netdev->irq = pdev->irq; + + pci_set_drvdata(pdev, netdev); + adapter = netdev->priv; + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->flags2 = ei->flags2; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->max_hw_frame_size = ei->max_hw_frame_size; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* construct the net_device struct */ + netdev->open = e1000_open; + netdev->stop = e1000_close; + netdev->hard_start_xmit = e1000_xmit_frame; + //netdev->get_stats = e1000_get_stats; + netdev->map_rtskb = e1000_map_rtskb; + netdev->unmap_rtskb = e1000_unmap_rtskb; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + e1000e_check_options(adapter); + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_variants(adapter); + if (err) + goto err_hw_init; + + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.autoneg_wait_to_complete = 0; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (e1000_check_reset_block(&adapter->hw)) + e_info("PHY reset is blocked due to SOL/IDER session.\n"); + + /* Set initial default active device features */ + netdev->features = (NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM); + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + } + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* + * before reading the NVM, reset the controller to + * put the device in a known good starting state + */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* + * systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + e_err("The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + e1000_eeprom_checks(adapter); + + /* copy the MAC address */ + if (e1000e_read_mac_addr(&adapter->hw)) + e_err("NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); + timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); +#else /* < 4.14 */ + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long) adapter; +#endif /* < 4.14 */ + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + + rtdm_nrtsig_init(&adapter->mod_timer_sig, e1000e_mod_watchdog_timer, + (void*)&adapter->watchdog_timer); + rtdm_nrtsig_init(&adapter->downshift_sig, e1000e_trigger_downshift, + &adapter->downshift_task); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = 1; + adapter->hw.fc.requested_mode = e1000_fc_default; + adapter->hw.fc.current_mode = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* ring size defaults */ + adapter->rx_ring->count = RT_E1000E_NUM_RXD; + adapter->tx_ring->count = 256; + + /* + * Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) + adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + else + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* + * now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* save off EEPROM version number */ + e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* + * If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + strncpy(netdev->name, "rteth%d", sizeof(netdev->name) - 1); + err = rt_register_rtnetdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + rtnetif_carrier_off(netdev); + + e1000_print_device_info(adapter); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + rtdm_nrtsig_destroy(&adapter->downshift_sig); + rtdm_nrtsig_destroy(&adapter->mod_timer_sig); + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_release_hw_control(adapter); +err_eeprom: + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + e1000e_reset_interrupt_capability(adapter); +err_flashmap: + iounmap(adapter->hw.hw_addr); +err_ioremap: + rtdev_free(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + bool down = test_bit(__E1000_DOWN, &adapter->state); + + /* + * The timers may be rescheduled, so explicitly disable them + * from being rescheduled. + */ + if (!down) + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + rtdm_nrtsig_destroy(&adapter->downshift_sig); + rtdm_nrtsig_destroy(&adapter->mod_timer_sig); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + + if (!(netdev->flags & IFF_UP)) + e1000_power_down_phy(adapter); + + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); + rt_unregister_rtnetdev(netdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* + * Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + e1000e_reset_interrupt_capability(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + rtdev_free(netdev); + + /* AER disable */ + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static const struct pci_device_id e1000_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* e1000_main.c */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c new file mode 100644 index 0000000..22a6f5a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c @@ -0,0 +1,484 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/pci.h> + +#include "e1000.h" + +/* + * This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* + * All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] \ + = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* + * Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non-zero + * + * Valid Range: 0-65535 + * + * Default Value: 0 for rtnet + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 0 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* + * Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 0 for rtnet + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 0 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* + * Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* + * Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* + * Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + * + * Default Value: 0 for rtnet + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 0 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* IntMode (Interrupt Mode) + * + * Valid Range: 0 - 2 + * + * Default Value: 2 (MSI-X) + */ +E1000_PARAM(IntMode, "Interrupt Mode"); +#define MAX_INTMODE 2 +#define MIN_INTMODE 0 + +/* + * Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* + * Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +/* + * Write Protect NVM + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); + +/* + * Enable CRC Stripping + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ + "the CRC"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, + opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_notice("Warning: no configuration for board #%i\n", bd); + e_notice("Using defaults for all values\n"); + } + + { /* Transmit Interrupt Delay */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + static struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + static const struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_info("%s turned off\n", opt.name); + break; + case 1: + e_info("%s set to dynamic mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_info("%s set to dynamic conservative mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_info("%s set to simplified (2000-8000 ints) " + "mode\n", opt.name); + adapter->itr_setting = 4; + break; + default: + /* + * Save the setting, because the dynamic bits + * change itr. + */ + if (e1000_validate_option(&adapter->itr, &opt, + adapter) && + (adapter->itr == 3)) { + /* + * In case of invalid user value, + * default to conservative mode. + */ + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + } else { + /* + * Clear the lower two bits because + * they are used as control. + */ + adapter->itr_setting = + adapter->itr & ~3; + } + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 0; + } + } + { /* Interrupt Mode */ + static struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = "defaulting to 2 (MSI-X)", + .def = E1000E_INT_MODE_MSIX, + .arg = { .r = { .min = MIN_INTMODE, + .max = MAX_INTMODE } } + }; + + if (num_IntMode > bd) { + unsigned int int_mode = IntMode[bd]; + e1000_validate_option(&int_mode, &opt, adapter); + adapter->int_mode = int_mode; + } else { + adapter->int_mode = opt.def; + } + } + { /* Smart Power Down */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) + && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + { /* CRC Stripping */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "CRC Stripping", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_CrcStripping > bd) { + unsigned int crc_stripping = CrcStripping[bd]; + e1000_validate_option(&crc_stripping, &opt, adapter); + if (crc_stripping == OPTION_ENABLED) + adapter->flags2 |= FLAG2_CRC_STRIPPING; + } else { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + } + } + { /* Kumeran Lock Loss Workaround */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_KumeranLockLoss > bd) { + unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + kmrn_lock_loss); + } else { + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + opt.def); + } + } + { /* Write-protect NVM */ + static const struct e1000_option opt = { + .type = enable_option, + .name = "Write-protect NVM", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (adapter->flags & FLAG_IS_ICH) { + if (num_WriteProtectNVM > bd) { + unsigned int write_protect_nvm = WriteProtectNVM[bd]; + e1000_validate_option(&write_protect_nvm, &opt, + adapter); + if (write_protect_nvm) + adapter->flags |= FLAG_READ_ONLY_NVM; + } else { + if (opt.def) + adapter->flags |= FLAG_READ_ONLY_NVM; + } + } + } +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c new file mode 100644 index 0000000..9ec7835 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c @@ -0,0 +1,3385 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2011 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/delay.h> + +#include "e1000.h" + +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +static u32 e1000_get_phy_addr_for_hv_page(u32 page); +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_m88_cable_length_table) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_igp_2_cable_length_table) + +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 +#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u16 retry_count = 0; + + if (!(phy->ops.read_reg)) + goto out; + + while (retry_count < 2) { + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + goto out; + + retry_count++; + } +out: + return ret_val; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000e_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + /* + * Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + + /* + * Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * @brief Set page as on IGP-like PHY(s) + * @param hw pointer to the HW structure + * @param page page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + */ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + e_dbg("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + if (!locked) + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + s32 ret_val = 0; + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + +out: + return ret_val; +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* + * Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = e1e_wphy(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = e1e_wphy(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + if (phy->type == e1000_phy_82578) { + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + e_dbg("Error resetting the PHY.\n"); + return ret_val; + } + + /* + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D0\n"); + return ret_val; + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* + * when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* + * Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + e_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* + * Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* + * Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg("Error while waiting for " + "autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = 1; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + e_dbg("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + e_dbg("Valid link established!!!\n"); + e1000e_config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + e_dbg("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + if (hw->phy.type != e1000_phy_m88) { + e_dbg("Link taking longer than expected.\n"); + } else { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return 0; + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* + * Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &data); + if (ret_val) + goto out; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, data); + if (ret_val) + goto out; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + goto out; + + e_dbg("IFE PMC: %X\n", data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb\n"); + } + + e1000e_config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* + * Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* + * This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* + * Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* + * Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* + * If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* + * Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return ret_val; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + e_dbg("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = (phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + goto out; + phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) + ? false : true; + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + goto out; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + +out: + return ret_val; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = e1000_check_reset_block(hw); + if (ret_val) + return 0; + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + udelay(150); + + phy->ops.release(hw); + + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done(struct e1000_hw *hw) +{ + mdelay(10); + return 0; +} + +/** + * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) +{ + e_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + e1e_wphy(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + e1e_wphy(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + e1e_wphy(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + e1e_wphy(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + e1e_wphy(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + e1e_wphy(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + e1e_wphy(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + e1e_wphy(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + e1e_wphy(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + e1e_wphy(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + e1e_wphy(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + e1e_wphy(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + e1e_wphy(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + e1e_wphy(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + e1e_wphy(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + e1e_wphy(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + e1e_wphy(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + e1e_wphy(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + e1e_wphy(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + e1e_wphy(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + e1e_wphy(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + e1e_wphy(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + e1e_wphy(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + e1e_wphy(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + e1e_wphy(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + e1e_wphy(hw, 0x1798, 0xD008); + /* + * Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + e1e_wphy(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + e1e_wphy(hw, 0x187A, 0x0800); + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + e1e_wphy(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + e1e_wphy(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + e1e_wphy(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + e1e_wphy(hw, 0x0000, 0x1340); + + return 0; +} + +/* Internal function pointers */ + +/** + * e1000_get_phy_cfg_done - Generic PHY configuration done + * @hw: pointer to the HW structure + * + * Return success if silicon family did not implement a family specific + * get_cfg_done function. + **/ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cfg_done) + return hw->phy.ops.get_cfg_done(hw); + + return 0; +} + +/** + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex + * @hw: pointer to the HW structure + * + * When the silicon family has not implemented a forced speed/duplex + * function for the PHY, simply return 0. + **/ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + if (hw->phy.ops.force_speed_duplex) + return hw->phy.ops.force_speed_duplex(hw); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) { + ret_val = 0; + goto out; + } + usleep_range(1000, 2000); + i++; + } while (i < 10); + } + +out: + return ret_val; +} + +/** + * @brief Retrieve PHY page address + * @param page page to access + * + * @return PHY address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto out; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto out; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto out; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* + * Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto out; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto out; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto out; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto out; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto out; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +out: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + goto out; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + e_dbg("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + goto out; + } + + /* + * Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + e_dbg("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + goto out; + } + + /* Select Host Wakeup Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); + + /* caller now able to write registers on the Wakeup registers page */ +out: + return ret_val; +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val = 0; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + goto out; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + e_dbg("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); +out: + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + e_dbg("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + e_dbg("Could not enable PHY wakeup reg access\n"); + goto out; + } + } + + e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + e_dbg("Could not write address opcode to page %d\n", page); + goto out; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + e_dbg("Could not access PHY reg %d.%d\n", page, reg); + goto out; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +out: + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1e_wphy(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1e_wphy(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * e1000e_commit_phy - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000e_commit_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return 0; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return 0; +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* + * Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { + u16 data2 = 0x7EFF; + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * @brief Get PHY address based on page + * @param page page to be accessed + * @return PHY address + */ +static u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg = 0; + u32 data_reg = 0; + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = (hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG; + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + e_dbg("Could not write the Address Offset port register\n"); + goto out; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) { + e_dbg("Could not access the Data port register\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 data; + + if (hw->phy.type != e1000_phy_82578) + goto out; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + e1e_rphy(hw, PHY_CONTROL, &data); + if (data & PHY_CONTROL_LB) + goto out; + + /* check if link is up and at 1Gbps */ + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); + if (ret_val) + goto out; + + data &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (data != (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + goto out; + + mdelay(200); + + /* flush the packets in the fifo buffer */ + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED); + if (ret_val) + goto out; + + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); + +out: + return ret_val; +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + goto out; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c new file mode 100644 index 0000000..1985bef --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c @@ -0,0 +1,1854 @@ +/* rtnet/drivers/eepro100-rt.c: An Intel i82557-559 Real-Time-Ethernet driver for Linux. */ +/* + RTnet porting 2002 by Jan Kiszka <Jan.Kiszka@web.de> + Originally written 1996-1999 by Donald Becker. + + The driver also contains updates by different kernel developers + (see incomplete list below). + Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>. + Please use this email address and linux-kernel mailing list for bug reports. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the Intel EtherExpress Pro100 (Speedo3) design. + It should work with all i82557/558/559 boards. + + Version history: + 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg> + Serious fixes for multicast filter list setting, TX timeout routine; + RX ring refilling logic; other stuff + 2000 Feb Jeff Garzik <jgarzik@mandrakesoft.com> + Convert to new PCI driver interface + 2000 Mar 24 Dragan Stancevic <visitor@valinux.com> + Disabled FC and ER, to avoid lockups when when we get FCP interrupts. + 2000 Jul 17 Goutham Rao <goutham.rao@intel.com> + PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary + + 2002 May 16 Jan Kiszka <Jan.Kiszka@web.de> + Ported to RTnet (RTAI version) +*/ + +static const char *version = +"eepro100-rt.c:1.36-RTnet-0.8 2002-2006 Jan Kiszka <Jan.Kiszka@web.de>\n" +"eepro100-rt.c: based on eepro100.c 1.36 by D. Becker, A. V. Savochkin and others\n"; + +/* A few user-configurable values that apply to all boards. + First set is undocumented and spelled per Intel recommendations. */ + +static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */ +static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */ +/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */ +static int txdmacount = 128; +static int rxdmacount /* = 0 */; + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 20; + +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */ +static int multicast_filter_limit = 64; + +/* 'options' is used to pass a transceiver override or full-duplex flag + e.g. "options=16" for FD, "options=32" for 100mbps-only. */ +static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int local_debug = -1; /* The debug level */ + +/* A few values that may be tweaked. */ +/* The ring sizes should be a power of two for efficiency. */ +#define TX_RING_SIZE 32 +#define RX_RING_SIZE 8 /* RX_RING_SIZE*2 rtskbs will be preallocated */ +/* How much slots multicast filter setup may take. + Do not descrease without changing set_rx_mode() implementaion. */ +#define TX_MULTICAST_SIZE 2 +#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2) +/* Actual number of TX packets queued, must be + <= TX_RING_SIZE-TX_MULTICAST_RESERV. */ +#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV) +/* Hysteresis marking queue as no longer full. */ +#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4) + +/* Operational parameters that usually are not changed. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) +/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/ +#define PKT_BUF_SZ VLAN_ETH_FRAME_LEN + +#if !defined(__OPTIMIZE__) || !defined(__KERNEL__) +#warning You must compile this file with the correct options! +#warning See the last lines of the source file. +#error You must compile this driver with "-O". +#endif + +#include <linux/version.h> +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/mii.h> +#include <linux/delay.h> +#include <linux/uaccess.h> + +#include <asm/bitops.h> +#include <asm/io.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/delay.h> + +// *** RTnet *** +#include <linux/if_vlan.h> +#include <rtnet_port.h> + +#define MAX_UNITS 8 + +static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); +// *** RTnet *** + +MODULE_AUTHOR("Maintainer: Jan Kiszka <Jan.Kiszka@web.de>"); +MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver"); +MODULE_LICENSE("GPL"); +module_param_named(debug, local_debug, int, 0444); +module_param_array(options, int, NULL, 0444); +module_param_array(full_duplex, int, NULL, 0444); +module_param(txfifo, int, 0444); +module_param(rxfifo, int, 0444); +module_param(txdmacount, int, 0444); +module_param(rxdmacount, int, 0444); +module_param(max_interrupt_work, int, 0444); +module_param(multicast_filter_limit, int, 0444); +MODULE_PARM_DESC(debug, "eepro100 debug level (0-6)"); +MODULE_PARM_DESC(options, "eepro100: Bits 0-3: tranceiver type, bit 4: full duplex, bit 5: 100Mbps"); +MODULE_PARM_DESC(full_duplex, "eepro100 full duplex setting(s) (1)"); +MODULE_PARM_DESC(txfifo, "eepro100 Tx FIFO threshold in 4 byte units, (0-15)"); +MODULE_PARM_DESC(rxfifo, "eepro100 Rx FIFO threshold in 4 byte units, (0-15)"); +MODULE_PARM_DESC(txdmaccount, "eepro100 Tx DMA burst length; 128 - disable (0-128)"); +MODULE_PARM_DESC(rxdmaccount, "eepro100 Rx DMA burst length; 128 - disable (0-128)"); +MODULE_PARM_DESC(max_interrupt_work, "eepro100 maximum events handled per interrupt"); +MODULE_PARM_DESC(multicast_filter_limit, "eepro100 maximum number of filtered multicast addresses"); + +#define RUN_AT(x) (jiffies + (x)) + +// *** RTnet - no power management *** +#undef pci_set_power_state +#define pci_set_power_state null_set_power_state +static inline int null_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} +// *** RTnet *** + +#define netdevice_start(dev) +#define netdevice_stop(dev) +#define netif_set_tx_timeout(dev, tf, tm) \ + do { \ + (dev)->tx_timeout = (tf); \ + (dev)->watchdog_timeo = (tm); \ + } while(0) + + +#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG +static int speedo_debug = 1; +#else +#define speedo_debug 0 +#endif + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's +single-chip fast Ethernet controller for PCI, as used on the Intel +EtherExpress Pro 100 adapter. + +II. Board-specific settings + +PCI bus devices are configured by the system at boot time, so no jumpers +need to be set on the board. The system BIOS should be set to assign the +PCI INTA signal to an otherwise unused system IRQ line. While it's +possible to share PCI interrupt lines, it negatively impacts performance and +only recent kernels support it. + +III. Driver operation + +IIIA. General +The Speedo3 is very similar to other Intel network chips, that is to say +"apparently designed on a different planet". This chips retains the complex +Rx and Tx descriptors and multiple buffers pointers as previous chips, but +also has simplified Tx and Rx buffer modes. This driver uses the "flexible" +Tx mode, but in a simplified lower-overhead manner: it associates only a +single buffer descriptor with each frame descriptor. + +Despite the extra space overhead in each receive skbuff, the driver must use +the simplified Rx buffer mode to assure that only a single data buffer is +associated with each RxFD. The driver implements this by reserving space +for the Rx descriptor at the head of each Rx skbuff. + +The Speedo-3 has receive and command unit base addresses that are added to +almost all descriptor pointers. The driver sets these to zero, so that all +pointer fields are absolute addresses. + +The System Control Block (SCB) of some previous Intel chips exists on the +chip in both PCI I/O and memory space. This driver uses the I/O space +registers, but might switch to memory mapped mode to better support non-x86 +processors. + +IIIB. Transmit structure + +The driver must use the complex Tx command+descriptor mode in order to +have a indirect pointer to the skbuff data section. Each Tx command block +(TxCB) is associated with two immediately appended Tx Buffer Descriptor +(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the +speedo_private data structure for each adapter instance. + +The newer i82558 explicitly supports this structure, and can read the two +TxBDs in the same PCI burst as the TxCB. + +This ring structure is used for all normal transmit packets, but the +transmit packet descriptors aren't long enough for most non-Tx commands such +as CmdConfigure. This is complicated by the possibility that the chip has +already loaded the link address in the previous descriptor. So for these +commands we convert the next free descriptor on the ring to a NoOp, and point +that descriptor's link to the complex command. + +An additional complexity of these non-transmit commands are that they may be +added asynchronous to the normal transmit queue, so we disable interrupts +whenever the Tx descriptor ring is manipulated. + +A notable aspect of these special configure commands is that they do +work with the normal Tx ring entry scavenge method. The Tx ring scavenge +is done at interrupt time using the 'dirty_tx' index, and checking for the +command-complete bit. While the setup frames may have the NoOp command on the +Tx ring marked as complete, but not have completed the setup command, this +is not a problem. The tx_ring entry can be still safely reused, as the +tx_skbuff[] entry is always empty for config_cmd and mc_setup frames. + +Commands may have bits set e.g. CmdSuspend in the command word to either +suspend or stop the transmit/command unit. This driver always flags the last +command with CmdSuspend, erases the CmdSuspend in the previous command, and +then issues a CU_RESUME. +Note: Watch out for the potential race condition here: imagine + erasing the previous suspend + the chip processes the previous command + the chip processes the final command, and suspends + doing the CU_RESUME + the chip processes the next-yet-valid post-final-command. +So blindly sending a CU_RESUME is only safe if we do it immediately after +after erasing the previous CmdSuspend, without the possibility of an +intervening delay. Thus the resume command is always within the +interrupts-disabled region. This is a timing dependence, but handling this +condition in a timing-independent way would considerably complicate the code. + +Note: In previous generation Intel chips, restarting the command unit was a +notoriously slow process. This is presumably no longer true. + +IIIC. Receive structure + +Because of the bus-master support on the Speedo3 this driver uses the new +SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer. +This scheme allocates full-sized skbuffs as receive buffers. The value +SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to +trade-off the memory wasted by passing the full-sized skbuff to the queue +layer for all frames vs. the copying cost of copying a frame to a +correctly-sized skbuff. + +For small frames the copying cost is negligible (esp. considering that we +are pre-loading the cache with immediately useful header information), so we +allocate a new, minimally-sized skbuff. For large frames the copying cost +is non-trivial, and the larger copy might flush the cache of useful data, so +we pass up the skbuff the packet was received into. + +IV. Notes + +Thanks to Steve Williams of Intel for arranging the non-disclosure agreement +that stated that I could disclose the information. But I still resent +having to sign an Intel NDA when I'm helping Intel sell their own product! + +*/ + +static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state); + +enum pci_flags_bit { + PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, + PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, +}; + +static inline unsigned int io_inw(unsigned long port) +{ + return inw(port); +} +static inline void io_outw(unsigned int val, unsigned long port) +{ + outw(val, port); +} + +#ifndef USE_IO +/* Currently alpha headers define in/out macros. + Undefine them. 2000/03/30 SAW */ +#undef inb +#undef inw +#undef inl +#undef outb +#undef outw +#undef outl +#define inb(addr) readb((void *)(addr)) +#define inw(addr) readw((void *)(addr)) +#define inl(addr) readl((void *)(addr)) +#define outb(val, addr) writeb(val, (void *)(addr)) +#define outw(val, addr) writew(val, (void *)(addr)) +#define outl(val, addr) writel(val, (void *)(addr)) +#endif + +/* How to wait for the command unit to accept a command. + Typically this takes 0 ticks. */ +static inline void wait_for_cmd_done(long cmd_ioaddr) +{ + int wait = 1000; + do udelay(1) ; + while(inb(cmd_ioaddr) && --wait >= 0); +#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG + if (wait < 0) + printk(KERN_ALERT "eepro100: wait_for_cmd_done timeout!\n"); +#endif +} + +#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDSTATS +static inline int rt_wait_for_cmd_done(long cmd_ioaddr, const char *cmd) +{ + int wait = CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT; + rtmd_time_t t0, t1; + + t0 = rtdm_clock_read(); + while (inb(cmd_ioaddr) != 0) { + if (wait-- == 0) { + rtdm_printk(KERN_ALERT "eepro100: rt_wait_for_cmd_done(%s) " + "timeout!\n", cmd); + return 1; + } + rtdm_task_busy_sleep(1000); + } + return 0; +} +#else +static inline int rt_wait_for_cmd_done(long cmd_ioaddr, const char *cmd) +{ + int wait = CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT; + + while (inb(cmd_ioaddr) != 0) { + if (wait-- == 0) + return 1; + rtdm_task_busy_sleep(1000); + } + return 0; +} +#endif + +/* Offsets to the various registers. + All accesses need not be longword aligned. */ +enum speedo_offsets { + SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */ + SCBPointer = 4, /* General purpose pointer. */ + SCBPort = 8, /* Misc. commands and operands. */ + SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */ + SCBCtrlMDI = 16, /* MDI interface control. */ + SCBEarlyRx = 20, /* Early receive byte count. */ +}; +/* Commands that can be put in a command list entry. */ +enum commands { + CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000, + CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000, + CmdDump = 0x60000, CmdDiagnose = 0x70000, + CmdSuspend = 0x40000000, /* Suspend after completion. */ + CmdIntr = 0x20000000, /* Interrupt after completion. */ + CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */ +}; +/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the + status bits. Previous driver versions used separate 16 bit fields for + commands and statuses. --SAW + */ +#if defined(__alpha__) +# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status); +#else +# if defined(__LITTLE_ENDIAN) +# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000 +# elif defined(__BIG_ENDIAN) +# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040 +# else +# error Unsupported byteorder +# endif +#endif + +enum SCBCmdBits { + SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000, + SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400, + SCBTriggerIntr=0x0200, SCBMaskAll=0x0100, + /* The rest are Rx and Tx commands. */ + CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050, + CUCmdBase=0x0060, /* CU Base address (set to zero) . */ + CUDumpStats=0x0070, /* Dump then reset stats counters. */ + RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006, + RxResumeNoResources=0x0007, +}; + +enum SCBPort_cmds { + PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3, +}; + +/* The Speedo3 Rx and Tx frame/buffer descriptors. */ +struct descriptor { /* A generic descriptor. */ + s32 cmd_status; /* All command and status fields. */ + u32 link; /* struct descriptor * */ + unsigned char params[0]; +}; + +/* The Speedo3 Rx and Tx buffer descriptors. */ +struct RxFD { /* Receive frame descriptor. */ + s32 status; + u32 link; /* struct RxFD * */ + u32 rx_buf_addr; /* void * */ + u32 count; +}; + +/* Selected elements of the Tx/RxFD.status word. */ +enum RxFD_bits { + RxComplete=0x8000, RxOK=0x2000, + RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010, + RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002, + TxUnderrun=0x1000, StatusComplete=0x8000, +}; + +#define CONFIG_DATA_SIZE 22 +struct TxFD { /* Transmit frame descriptor set. */ + s32 status; + u32 link; /* void * */ + u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */ + s32 count; /* # of TBD (=1), Tx start thresh., etc. */ + /* This constitutes two "TBD" entries -- we only use one. */ +#define TX_DESCR_BUF_OFFSET 16 + u32 tx_buf_addr0; /* void *, frame to be transmitted. */ + s32 tx_buf_size0; /* Length of Tx frame. */ + u32 tx_buf_addr1; /* void *, frame to be transmitted. */ + s32 tx_buf_size1; /* Length of Tx frame. */ + /* the structure must have space for at least CONFIG_DATA_SIZE starting + * from tx_desc_addr field */ +}; + +/* Multicast filter setting block. --SAW */ +struct speedo_mc_block { + struct speedo_mc_block *next; + unsigned int tx; + dma_addr_t frame_dma; + unsigned int len; + struct descriptor frame __attribute__ ((__aligned__(16))); +}; + +/* Elements of the dump_statistics block. This block must be lword aligned. */ +struct speedo_stats { + u32 tx_good_frames; + u32 tx_coll16_errs; + u32 tx_late_colls; + u32 tx_underruns; + u32 tx_lost_carrier; + u32 tx_deferred; + u32 tx_one_colls; + u32 tx_multi_colls; + u32 tx_total_colls; + u32 rx_good_frames; + u32 rx_crc_errs; + u32 rx_align_errs; + u32 rx_resource_errs; + u32 rx_overrun_errs; + u32 rx_colls_errs; + u32 rx_runt_errs; + u32 done_marker; +}; + +enum Rx_ring_state_bits { + RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8, +}; + +/* Do not change the position (alignment) of the first few elements! + The later elements are grouped for cache locality. + + Unfortunately, all the positions have been shifted since there. + A new re-alignment is required. 2000/03/06 SAW */ +struct speedo_private { + struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */ + struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */ + + // *** RTnet *** + /* The addresses of a Tx/Rx-in-place packets/buffers. */ + struct rtskb *tx_skbuff[TX_RING_SIZE]; + struct rtskb *rx_skbuff[RX_RING_SIZE]; + // *** RTnet *** + + /* Mapped addresses of the rings. */ + dma_addr_t tx_ring_dma; +#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD)) + dma_addr_t rx_ring_dma[RX_RING_SIZE]; + struct descriptor *last_cmd; /* Last command sent. */ + unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */ + rtdm_lock_t lock; /* Group with Tx control cache line. */ + u32 tx_threshold; /* The value for txdesc.count. */ + struct RxFD *last_rxf; /* Last filled RX buffer. */ + dma_addr_t last_rxf_dma; + unsigned int cur_rx, dirty_rx; /* The next free ring entry */ + long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ + struct net_device_stats stats; + struct speedo_stats *lstats; + dma_addr_t lstats_dma; + int chip_id; + struct pci_dev *pdev; + struct speedo_mc_block *mc_setup_head;/* Multicast setup frame list head. */ + struct speedo_mc_block *mc_setup_tail;/* Multicast setup frame list tail. */ + long in_interrupt; /* Word-aligned rtdev->interrupt */ + unsigned char acpi_pwr; + signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */ + unsigned int tx_full:1; /* The Tx queue is full. */ + unsigned int full_duplex:1; /* Full-duplex operation requested. */ + unsigned int flow_ctrl:1; /* Use 802.3x flow control. */ + unsigned int rx_bug:1; /* Work around receiver hang errata. */ + unsigned char default_port:8; /* Last rtdev->if_port value. */ + unsigned char rx_ring_state; /* RX ring status flags. */ + unsigned short phy[2]; /* PHY media interfaces available. */ + unsigned short advertising; /* Current PHY advertised caps. */ + unsigned short partner; /* Link partner caps. */ + rtdm_irq_t irq_handle; +}; + +/* The parameters for a CmdConfigure operation. + There are so many options that it would be difficult to document each bit. + We mostly use the default or recommended settings. */ +static const char i82558_config_cmd[CONFIG_DATA_SIZE] = { + 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */ + 0, 0x2E, 0, 0x60, 0x08, 0x88, + 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */ + 0x31, 0x05, }; + +/* PHY media interface chips. */ +enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240, + S80C24, I82555, DP83840A=10, }; +#define EE_READ_CMD (6) + +static int eepro100_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void eepro100_remove_one (struct pci_dev *pdev); + +static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len); +static int mdio_read(long ioaddr, int phy_id, int location); +static int speedo_open(struct rtnet_device *rtdev); +static void speedo_resume(struct rtnet_device *rtdev); +static void speedo_init_rx_ring(struct rtnet_device *rtdev); +static int speedo_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev); +static void speedo_refill_rx_buffers(struct rtnet_device *rtdev, int force); +static int speedo_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp); +static void speedo_tx_buffer_gc(struct rtnet_device *rtdev); +static int speedo_interrupt(rtdm_irq_t *irq_handle); +static int speedo_close(struct rtnet_device *rtdev); +static void set_rx_mode(struct rtnet_device *rtdev); +static void speedo_show_state(struct rtnet_device *rtdev); +static struct net_device_stats *speedo_get_stats(struct rtnet_device *rtdev); + + +static inline void speedo_write_flush(long ioaddr) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)readb((void *)(ioaddr + SCBStatus)); +} + +static int eepro100_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned long ioaddr; + int irq; + int acpi_idle_state = 0, pm; + static int cards_found = -1; + + static int did_version /* = 0 */; /* Already printed version info. */ + if (speedo_debug > 0 && did_version++ == 0) + printk(version); + + // *** RTnet *** + cards_found++; + if (cards[cards_found] == 0) + goto err_out_none; + // *** RTnet *** + + if (!request_region(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1), "eepro100")) { + printk (KERN_ERR "eepro100: cannot reserve I/O ports\n"); + goto err_out_none; + } + if (!request_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), "eepro100")) { + printk (KERN_ERR "eepro100: cannot reserve MMIO region\n"); + goto err_out_free_pio_region; + } + + irq = pdev->irq; +#ifdef USE_IO + ioaddr = pci_resource_start(pdev, 1); + if (speedo_debug > 2) + printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n", + ioaddr, irq); +#else + ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!ioaddr) { + printk(KERN_ERR "eepro100: cannot remap MMIO region %llx @ %llx\n", + (unsigned long long)pci_resource_len(pdev, 0), + (unsigned long long)pci_resource_start(pdev, 0)); + goto err_out_free_mmio_region; + } + if (speedo_debug > 2) + printk("Found Intel i82557 PCI Speedo, MMIO at %#llx, IRQ %d.\n", + (unsigned long long)pci_resource_start(pdev, 0), irq); +#endif + + /* save power state b4 pci_enable_device overwrites it */ + pm = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm) { + u16 pwr_command; + pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command); + acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; + } + + if (pci_enable_device(pdev)) + goto err_out_free_mmio_region; + + pci_set_master(pdev); + + if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) != 0) + goto err_out_iounmap; + + return 0; + +err_out_iounmap: ; +#ifndef USE_IO + iounmap ((void *)ioaddr); +#endif +err_out_free_mmio_region: + release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); +err_out_free_pio_region: + release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); +err_out_none: + return -ENODEV; +} + +static int speedo_found1(struct pci_dev *pdev, + long ioaddr, int card_idx, int acpi_idle_state) +{ + // *** RTnet *** + struct rtnet_device *rtdev = NULL; + // *** RTnet *** + + struct speedo_private *sp; + const char *product; + int i, option; + u16 eeprom[0x100]; + int size; + void *tx_ring_space; + dma_addr_t tx_ring_dma; + + size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats); + tx_ring_space = dma_alloc_coherent(&pdev->dev, size, &tx_ring_dma, + GFP_ATOMIC); + if (tx_ring_space == NULL) + return -1; + + // *** RTnet *** + rtdev = rt_alloc_etherdev(sizeof(struct speedo_private), + RX_RING_SIZE * 2 + TX_RING_SIZE); + if (rtdev == NULL) { + printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n"); + dma_free_coherent(&pdev->dev, size, tx_ring_space, tx_ring_dma); + return -1; + } + rtdev_alloc_name(rtdev, "rteth%d"); + memset(rtdev->priv, 0, sizeof(struct speedo_private)); + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + rtdev->sysbind = &pdev->dev; + // *** RTnet *** + + if (rtdev->mem_start > 0) + option = rtdev->mem_start; + else if (card_idx >= 0 && options[card_idx] >= 0) + option = options[card_idx]; + else + option = 0; + + /* Read the station address EEPROM before doing the reset. + Nominally his should even be done before accepting the device, but + then we wouldn't have a device name with which to report the error. + The size test is for 6 bit vs. 8 bit address serial EEPROMs. + */ + { + unsigned long iobase; + int read_cmd, ee_size; + u16 sum; + int j; + + /* Use IO only to avoid postponed writes and satisfy EEPROM timing + requirements. */ + iobase = pci_resource_start(pdev, 1); + if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000) + == 0xffe0000) { + ee_size = 0x100; + read_cmd = EE_READ_CMD << 24; + } else { + ee_size = 0x40; + read_cmd = EE_READ_CMD << 22; + } + + for (j = 0, i = 0, sum = 0; i < ee_size; i++) { + u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27); + eeprom[i] = value; + sum += value; + if (i < 3) { + rtdev->dev_addr[j++] = value; + rtdev->dev_addr[j++] = value >> 8; + } + } + if (sum != 0xBABA) + printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, " + "check settings before activating this device!\n", + rtdev->name, sum); + /* Don't unregister_netdev(dev); as the EEPro may actually be + usable, especially if the MAC address is set later. + On the other hand, it may be unusable if MDI data is corrupted. */ + } + + /* Reset the chip: stop Tx and Rx processes and clear counters. + This takes less than 10usec and will easily finish before the next + action. */ + outl(PortReset, ioaddr + SCBPort); + inl(ioaddr + SCBPort); + udelay(10); + + if (eeprom[3] & 0x0100) + product = "OEM i82557/i82558 10/100 Ethernet"; + else + product = pci_name(pdev); + + printk(KERN_INFO "%s: %s, ", rtdev->name, product); + + for (i = 0; i < 5; i++) + printk("%2.2X:", rtdev->dev_addr[i]); + printk("%2.2X, ", rtdev->dev_addr[i]); +#ifdef USE_IO + printk("I/O at %#3lx, ", ioaddr); +#endif + printk("IRQ %d.\n", pdev->irq); + + outl(PortReset, ioaddr + SCBPort); + inl(ioaddr + SCBPort); + udelay(10); + + /* Return the chip to its original power state. */ + pci_set_power_state(pdev, acpi_idle_state); + + rtdev->base_addr = ioaddr; + rtdev->irq = pdev->irq; + + sp = rtdev->priv; + sp->pdev = pdev; + sp->acpi_pwr = acpi_idle_state; + sp->tx_ring = tx_ring_space; + sp->tx_ring_dma = tx_ring_dma; + sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE); + sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE); + + sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0; + if (card_idx >= 0) { + if (full_duplex[card_idx] >= 0) + sp->full_duplex = full_duplex[card_idx]; + } + sp->default_port = option >= 0 ? (option & 0x0f) : 0; + + sp->phy[0] = eeprom[6]; + sp->phy[1] = eeprom[7]; + sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1; + if (((pdev->device > 0x1030 && (pdev->device < 0x1039))) + || (pdev->device == 0x2449)) { + sp->chip_id = 1; + } + + if (sp->rx_bug) + printk(KERN_ERR " *** Receiver lock-up bug detected ***\n" + KERN_ERR " Your device may not work reliably!\n"); + + // *** RTnet *** + /* The Speedo-specific entries in the device structure. */ + rtdev->open = &speedo_open; + rtdev->hard_start_xmit = &speedo_start_xmit; + rtdev->stop = &speedo_close; + rtdev->hard_header = &rt_eth_header; + rtdev->get_stats = &speedo_get_stats; + //rtdev->do_ioctl = NULL; + + if ( (i=rt_register_rtnetdev(rtdev)) ) + { + dma_free_coherent(&pdev->dev, size, tx_ring_space, tx_ring_dma); + rtdev_free(rtdev); + return i; + } + + pci_set_drvdata (pdev, rtdev); + // *** RTnet *** + + return 0; +} + +/* Serial EEPROM section. + A "bit" grungy, but we work our way through bit-by-bit :->. */ +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */ +#define EE_CS 0x02 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */ +#define EE_DATA_READ 0x08 /* EEPROM chip data out. */ +#define EE_ENB (0x4800 | EE_CS) +#define EE_WRITE_0 0x4802 +#define EE_WRITE_1 0x4806 +#define EE_OFFSET SCBeeprom + +/* The fixes for the code were kindly provided by Dragan Stancevic + <visitor@valinux.com> to strictly follow Intel specifications of EEPROM + access timing. + The publicly available sheet 64486302 (sec. 3.1) specifies 1us access + interval for serial EEPROM. However, it looks like that there is an + additional requirement dictating larger udelay's in the code below. + 2000/05/24 SAW */ +static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len) +{ + unsigned retval = 0; + long ee_addr = ioaddr + SCBeeprom; + + io_outw(EE_ENB, ee_addr); udelay(2); + io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2); + + /* Shift the command bits out. */ + do { + short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0; + io_outw(dataval, ee_addr); udelay(2); + io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2); + retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0); + } while (--cmd_len >= 0); + io_outw(EE_ENB, ee_addr); udelay(2); + + /* Terminate the EEPROM access. */ + io_outw(EE_ENB & ~EE_CS, ee_addr); + return retval; +} + +static int mdio_read(long ioaddr, int phy_id, int location) +{ + int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */ + outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI); + do { + val = inl(ioaddr + SCBCtrlMDI); + if (--boguscnt < 0) { + printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val); + break; + } + } while (! (val & 0x10000000)); + return val & 0xffff; +} + + +static int +speedo_open(struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + int retval; + + if (speedo_debug > 1) + printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", rtdev->name, rtdev->irq); + + pci_set_power_state(sp->pdev, 0); + + /* Set up the Tx queue early.. */ + sp->cur_tx = 0; + sp->dirty_tx = 0; + sp->last_cmd = 0; + sp->tx_full = 0; + rtdm_lock_init(&sp->lock); + sp->in_interrupt = 0; + + // *** RTnet *** + rt_stack_connect(rtdev, &STACK_manager); + + retval = rtdm_irq_request(&sp->irq_handle, rtdev->irq, + speedo_interrupt, RTDM_IRQTYPE_SHARED, + "rt_eepro100", rtdev); + if (retval) { + return retval; + } + // *** RTnet *** + + rtdev->if_port = sp->default_port; + + speedo_init_rx_ring(rtdev); + + /* Fire up the hardware. */ + outw(SCBMaskAll, ioaddr + SCBCmd); + speedo_write_flush(ioaddr); + speedo_resume(rtdev); + + netdevice_start(rtdev); + rtnetif_start_queue(rtdev); + + /* Setup the chip and configure the multicast list. */ + sp->mc_setup_head = NULL; + sp->mc_setup_tail = NULL; + sp->flow_ctrl = sp->partner = 0; + sp->rx_mode = -1; /* Invalid -> always reset the mode. */ + set_rx_mode(rtdev); + if ((sp->phy[0] & 0x8000) == 0) + sp->advertising = mdio_read(ioaddr, sp->phy[0] & 0x1f, 4); + + if (mdio_read(ioaddr, sp->phy[0] & 0x1f, MII_BMSR) & BMSR_LSTATUS) + rtnetif_carrier_on(rtdev); + else + rtnetif_carrier_off(rtdev); + + if (speedo_debug > 2) { + printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n", + rtdev->name, inw(ioaddr + SCBStatus)); + } + + /* No need to wait for the command unit to accept here. */ + if ((sp->phy[0] & 0x8000) == 0) + mdio_read(ioaddr, sp->phy[0] & 0x1f, 0); + + return 0; +} + +/* Start the chip hardware after a full reset. */ +static void speedo_resume(struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + + /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */ + sp->tx_threshold = 0x01208000; + + /* Set the segment registers to '0'. */ + wait_for_cmd_done(ioaddr + SCBCmd); + outl(0, ioaddr + SCBPointer); + /* impose a delay to avoid a bug */ + inl(ioaddr + SCBPointer); + udelay(10); + outb(RxAddrLoad, ioaddr + SCBCmd); + wait_for_cmd_done(ioaddr + SCBCmd); + outb(CUCmdBase, ioaddr + SCBCmd); + + /* Load the statistics block and rx ring addresses. */ + wait_for_cmd_done(ioaddr + SCBCmd); + outl(sp->lstats_dma, ioaddr + SCBPointer); + outb(CUStatsAddr, ioaddr + SCBCmd); + sp->lstats->done_marker = 0; + + if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) { + if (speedo_debug > 2) + printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n", + rtdev->name); + } else { + wait_for_cmd_done(ioaddr + SCBCmd); + outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE], + ioaddr + SCBPointer); + outb(RxStart, ioaddr + SCBCmd); + } + + wait_for_cmd_done(ioaddr + SCBCmd); + outb(CUDumpStats, ioaddr + SCBCmd); + udelay(30); + + /* Fill the first command with our physical address. */ + { + struct descriptor *ias_cmd; + + ias_cmd = + (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE]; + /* Avoid a bug(?!) here by marking the command already completed. */ + ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000); + ias_cmd->link = + cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE)); + memcpy(ias_cmd->params, rtdev->dev_addr, 6); + sp->last_cmd = ias_cmd; + } + + /* Start the chip's Tx process and unmask interrupts. */ + wait_for_cmd_done(ioaddr + SCBCmd); + outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE), + ioaddr + SCBPointer); + /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should + remain masked --Dragan */ + outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd); +} + +static void speedo_show_state(struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + unsigned int i; + + /* Print a few items for debugging. */ + if (speedo_debug > 0) { + printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n", rtdev->name, + sp->cur_tx, sp->dirty_tx); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", rtdev->name, + i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ', + i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ', + i, sp->tx_ring[i].status); + } + printk(KERN_DEBUG "%s: Printing Rx ring" + " (next to receive into %u, dirty index %u).\n", + rtdev->name, sp->cur_rx, sp->dirty_rx); + + for (i = 0; i < RX_RING_SIZE; i++) + printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", rtdev->name, + sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ', + i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ', + i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ', + i, (sp->rx_ringp[i] != NULL) ? + (unsigned)sp->rx_ringp[i]->status : 0); + + { + long ioaddr = rtdev->base_addr; + int phy_num = sp->phy[0] & 0x1f; + for (i = 0; i < 16; i++) { + /* FIXME: what does it mean? --SAW */ + if (i == 6) i = 21; + printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n", + rtdev->name, phy_num, i, mdio_read(ioaddr, phy_num, i)); + } + } +} + +static struct net_device_stats *speedo_get_stats(struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + return &sp->stats; +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void +speedo_init_rx_ring(struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + struct RxFD *rxf, *last_rxf = NULL; + dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */; + int i; + + sp->cur_rx = 0; + + for (i = 0; i < RX_RING_SIZE; i++) { + struct rtskb *skb; + skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ + 2 + sizeof(struct RxFD)); + sp->rx_skbuff[i] = skb; + if (skb == NULL) + break; /* OK. Just initially short of Rx bufs. */ + // *** RTnet *** + rtskb_reserve(skb, 2); /* IP header alignment */ + // *** RTnet *** + rxf = (struct RxFD *)skb->tail; + sp->rx_ringp[i] = rxf; + sp->rx_ring_dma[i] = + dma_map_single(&sp->pdev->dev, rxf, + PKT_BUF_SZ + sizeof(struct RxFD), + DMA_BIDIRECTIONAL); + rtskb_reserve(skb, sizeof(struct RxFD)); + if (last_rxf) { + last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]); + dma_sync_single_for_device(&sp->pdev->dev, last_rxf_dma, + sizeof(struct RxFD), + DMA_TO_DEVICE); + } + last_rxf = rxf; + last_rxf_dma = sp->rx_ring_dma[i]; + rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */ + rxf->link = 0; /* None yet. */ + /* This field unused by i82557. */ + rxf->rx_buf_addr = 0xffffffff; + rxf->count = cpu_to_le32(PKT_BUF_SZ << 16); + dma_sync_single_for_device(&sp->pdev->dev, sp->rx_ring_dma[i], + sizeof(struct RxFD), DMA_TO_DEVICE); + } + sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + /* Mark the last entry as end-of-list. */ + last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */ + dma_sync_single_for_device(&sp->pdev->dev, + sp->rx_ring_dma[RX_RING_SIZE-1], + sizeof(struct RxFD), DMA_TO_DEVICE); + sp->last_rxf = last_rxf; + sp->last_rxf_dma = last_rxf_dma; +} + +static int +speedo_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + int entry; + // *** RTnet *** + rtdm_lockctx_t context; + + /* Prevent interrupts from changing the Tx ring from underneath us. */ + rtdm_lock_get_irqsave(&sp->lock, context); + // *** RTnet *** + + /* Check if there are enough space. */ + if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) { + // *** RTnet *** + rtnetif_stop_queue(rtdev); + sp->tx_full = 1; + + rtdm_lock_put_irqrestore(&sp->lock, context); + + rtdm_printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", rtdev->name); + // *** RTnet *** + + return 1; + } + + /* Calculate the Tx descriptor entry. */ + entry = sp->cur_tx++ % TX_RING_SIZE; + + sp->tx_skbuff[entry] = skb; + sp->tx_ring[entry].status = + cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex); + if (!(entry & ((TX_RING_SIZE>>2)-1))) + sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr); + sp->tx_ring[entry].link = + cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE)); + sp->tx_ring[entry].tx_desc_addr = + cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET); + /* The data region is always in one buffer descriptor. */ + sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold); + sp->tx_ring[entry].tx_buf_addr0 = + cpu_to_le32(dma_map_single(&sp->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE)); + sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len); + +// *** RTnet *** +// Disabled to gain shorter worst-case execution times. +// Hope this bug is not relevant for us + + /* Trigger the command unit resume. */ + if (rt_wait_for_cmd_done(ioaddr + SCBCmd, __FUNCTION__) != 0) { + rtdm_lock_put_irqrestore(&sp->lock, context); + + return 1; + } + + /* get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); +// *** RTnet *** + + clear_suspend(sp->last_cmd); + /* We want the time window between clearing suspend flag on the previous + command and resuming CU to be as small as possible. + Interrupts in between are very undesired. --SAW */ + outb(CUResume, ioaddr + SCBCmd); + sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; + + /* Leave room for set_rx_mode(). If there is no more space than reserved + for multicast filter mark the ring as full. */ + if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) { + rtnetif_stop_queue(rtdev); + sp->tx_full = 1; + } + + // *** RTnet *** + rtdm_lock_put_irqrestore(&sp->lock, context); + // *** RTnet *** + + return 0; +} + +static void speedo_tx_buffer_gc(struct rtnet_device *rtdev) +{ + unsigned int dirty_tx; + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + + dirty_tx = sp->dirty_tx; + while ((int)(sp->cur_tx - dirty_tx) > 0) { + int entry = dirty_tx % TX_RING_SIZE; + int status = le32_to_cpu(sp->tx_ring[entry].status); + + if (speedo_debug > 5) + printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n", + entry, status); + if ((status & StatusComplete) == 0) + break; /* It still hasn't been processed. */ + if (status & TxUnderrun) + if (sp->tx_threshold < 0x01e08000) { + if (speedo_debug > 2) + printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n", + rtdev->name); + sp->tx_threshold += 0x00040000; + } + /* Free the original skb. */ + if (sp->tx_skbuff[entry]) { + sp->stats.tx_packets++; /* Count only user packets. */ + sp->stats.tx_bytes += sp->tx_skbuff[entry]->len; + dma_unmap_single(&sp->pdev->dev, + le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0), + sp->tx_skbuff[entry]->len, DMA_TO_DEVICE); + + // *** RTnet *** + dev_kfree_rtskb(sp->tx_skbuff[entry]); + // *** RTnet *** + + sp->tx_skbuff[entry] = 0; + } + dirty_tx++; + } + +// *** RTnet *** +// *** RTnet *** + + sp->dirty_tx = dirty_tx; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static int speedo_interrupt(rtdm_irq_t *irq_handle) +{ + // *** RTnet *** + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtnet_device *rtdev = + rtdm_irq_get_arg(irq_handle, struct rtnet_device); + int packets = 0; + int ret = RTDM_IRQ_NONE; + // *** RTnet *** + + struct speedo_private *sp; + long ioaddr, boguscnt = max_interrupt_work; + unsigned short status; + + + ioaddr = rtdev->base_addr; + sp = (struct speedo_private *)rtdev->priv; + +#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG + /* A lock to prevent simultaneous entry on SMP machines. */ + if (test_and_set_bit(0, (void*)&sp->in_interrupt)) { + rtdm_printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", + rtdev->name); + sp->in_interrupt = 0; /* Avoid halting machine. */ + return ret; + } +#endif + + do { + status = inw(ioaddr + SCBStatus); + /* Acknowledge all of the current interrupt sources ASAP. */ + /* Will change from 0xfc00 to 0xff00 when we start handling + FCP and ER interrupts --Dragan */ + outw(status & 0xfc00, ioaddr + SCBStatus); + speedo_write_flush(ioaddr); + + if (speedo_debug > 4) + rtdm_printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n", + rtdev->name, status); + + if ((status & 0xfc00) == 0) + break; + + ret = RTDM_IRQ_HANDLED; + + /* Always check if all rx buffers are allocated. --SAW */ + speedo_refill_rx_buffers(rtdev, 0); + + if ((status & 0x5000) || /* Packet received, or Rx error. */ + (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed) + /* Need to gather the postponed packet. */ + speedo_rx(rtdev, &packets, &time_stamp); + + if (status & 0x1000) { + rtdm_lock_get(&sp->lock); + if ((status & 0x003c) == 0x0028) { /* No more Rx buffers. */ + struct RxFD *rxf; + rtdm_printk(KERN_WARNING "%s: card reports no RX buffers.\n", + rtdev->name); + rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]; + if (rxf == NULL) { + if (speedo_debug > 2) + rtdm_printk(KERN_DEBUG + "%s: NULL cur_rx in speedo_interrupt().\n", + rtdev->name); + sp->rx_ring_state |= RrNoMem|RrNoResources; + } else if (rxf == sp->last_rxf) { + if (speedo_debug > 2) + rtdm_printk(KERN_DEBUG + "%s: cur_rx is last in speedo_interrupt().\n", + rtdev->name); + sp->rx_ring_state |= RrNoMem|RrNoResources; + } else + outb(RxResumeNoResources, ioaddr + SCBCmd); + } else if ((status & 0x003c) == 0x0008) { /* No resources. */ + struct RxFD *rxf; + rtdm_printk(KERN_WARNING "%s: card reports no resources.\n", + rtdev->name); + rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]; + if (rxf == NULL) { + if (speedo_debug > 2) + rtdm_printk(KERN_DEBUG + "%s: NULL cur_rx in speedo_interrupt().\n", + rtdev->name); + sp->rx_ring_state |= RrNoMem|RrNoResources; + } else if (rxf == sp->last_rxf) { + if (speedo_debug > 2) + rtdm_printk(KERN_DEBUG + "%s: cur_rx is last in speedo_interrupt().\n", + rtdev->name); + sp->rx_ring_state |= RrNoMem|RrNoResources; + } else { + /* Restart the receiver. */ + outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE], + ioaddr + SCBPointer); + outb(RxStart, ioaddr + SCBCmd); + } + } + sp->stats.rx_errors++; + rtdm_lock_put(&sp->lock); + } + + if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) { + rtdm_printk(KERN_WARNING + "%s: restart the receiver after a possible hang.\n", + rtdev->name); + rtdm_lock_get(&sp->lock); + /* Restart the receiver. + I'm not sure if it's always right to restart the receiver + here but I don't know another way to prevent receiver hangs. + 1999/12/25 SAW */ + outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE], + ioaddr + SCBPointer); + outb(RxStart, ioaddr + SCBCmd); + sp->rx_ring_state &= ~RrNoResources; + rtdm_lock_put(&sp->lock); + } + + /* User interrupt, Command/Tx unit interrupt or CU not active. */ + if (status & 0xA400) { + rtdm_lock_get(&sp->lock); + speedo_tx_buffer_gc(rtdev); + if (sp->tx_full + && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) { + /* The ring is no longer full. */ + sp->tx_full = 0; + rtnetif_wake_queue(rtdev); /* Attention: under a spinlock. --SAW */ + } + rtdm_lock_put(&sp->lock); + } + + if (--boguscnt < 0) { + rtdm_printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n", + rtdev->name, status); + /* Clear all interrupt sources. */ + /* Will change from 0xfc00 to 0xff00 when we start handling + FCP and ER interrupts --Dragan */ + outw(0xfc00, ioaddr + SCBStatus); + break; + } + } while (1); + + if (speedo_debug > 3) + rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", + rtdev->name, inw(ioaddr + SCBStatus)); + + clear_bit(0, (void*)&sp->in_interrupt); + if (packets > 0) + rt_mark_stack_mgr(rtdev); + return ret; +} + +static inline struct RxFD *speedo_rx_alloc(struct rtnet_device *rtdev, int entry) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + struct RxFD *rxf; + struct rtskb *skb; + /* Get a fresh skbuff to replace the consumed one. */ + skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ + 2 + sizeof(struct RxFD)); + sp->rx_skbuff[entry] = skb; + if (skb == NULL) { + sp->rx_ringp[entry] = NULL; + return NULL; + } + rtskb_reserve(skb, 2); /* IP header alignment */ + rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; + sp->rx_ring_dma[entry] = + dma_map_single(&sp->pdev->dev, rxf, + PKT_BUF_SZ + sizeof(struct RxFD), + DMA_FROM_DEVICE); + rtskb_reserve(skb, sizeof(struct RxFD)); + rxf->rx_buf_addr = 0xffffffff; + dma_sync_single_for_device(&sp->pdev->dev, sp->rx_ring_dma[entry], + sizeof(struct RxFD), DMA_TO_DEVICE); + return rxf; +} + +static inline void speedo_rx_link(struct rtnet_device *rtdev, int entry, + struct RxFD *rxf, dma_addr_t rxf_dma) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */ + rxf->link = 0; /* None yet. */ + rxf->count = cpu_to_le32(PKT_BUF_SZ << 16); + sp->last_rxf->link = cpu_to_le32(rxf_dma); + sp->last_rxf->status &= cpu_to_le32(~0xC0000000); + dma_sync_single_for_device(&sp->pdev->dev, sp->last_rxf_dma, + sizeof(struct RxFD), DMA_TO_DEVICE); + sp->last_rxf = rxf; + sp->last_rxf_dma = rxf_dma; +} + +static int speedo_refill_rx_buf(struct rtnet_device *rtdev, int force) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + int entry; + struct RxFD *rxf; + + entry = sp->dirty_rx % RX_RING_SIZE; + if (sp->rx_skbuff[entry] == NULL) { + rxf = speedo_rx_alloc(rtdev, entry); + if (rxf == NULL) { + unsigned int forw; + int forw_entry; + if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) { + // *** RTnet *** + rtdm_printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n", + rtdev->name, force); + // *** RTnet *** + sp->rx_ring_state |= RrOOMReported; + } + if (!force) + return -1; /* Better luck next time! */ + /* Borrow an skb from one of next entries. */ + for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++) + if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL) + break; + if (forw == sp->cur_rx) + return -1; + forw_entry = forw % RX_RING_SIZE; + sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry]; + sp->rx_skbuff[forw_entry] = NULL; + rxf = sp->rx_ringp[forw_entry]; + sp->rx_ringp[forw_entry] = NULL; + sp->rx_ringp[entry] = rxf; + } + } else { + rxf = sp->rx_ringp[entry]; + } + speedo_rx_link(rtdev, entry, rxf, sp->rx_ring_dma[entry]); + sp->dirty_rx++; + sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */ + return 0; +} + +static void speedo_refill_rx_buffers(struct rtnet_device *rtdev, int force) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + + /* Refill the RX ring. */ + while ((int)(sp->cur_rx - sp->dirty_rx) > 0 && + speedo_refill_rx_buf(rtdev, force) != -1); +} + +static int +speedo_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + int entry = sp->cur_rx % RX_RING_SIZE; + int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx; + int alloc_ok = 1; + + if (speedo_debug > 4) + rtdm_printk(KERN_DEBUG " In speedo_rx().\n"); + /* If we own the next entry, it's a new packet. Send it up. */ + while (sp->rx_ringp[entry] != NULL) { + int status; + int pkt_len; + + dma_sync_single_for_cpu(&sp->pdev->dev, sp->rx_ring_dma[entry], + sizeof(struct RxFD), DMA_FROM_DEVICE); + status = le32_to_cpu(sp->rx_ringp[entry]->status); + pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff; + + if (!(status & RxComplete)) + break; + + if (--rx_work_limit < 0) + break; + + /* Check for a rare out-of-memory case: the current buffer is + the last buffer allocated in the RX ring. --SAW */ + if (sp->last_rxf == sp->rx_ringp[entry]) { + /* Postpone the packet. It'll be reaped at an interrupt when this + packet is no longer the last packet in the ring. */ + if (speedo_debug > 2) + rtdm_printk(KERN_DEBUG "%s: RX packet postponed!\n", + rtdev->name); + sp->rx_ring_state |= RrPostponed; + break; + } + + if (speedo_debug > 4) + rtdm_printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status, + pkt_len); + if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) { + if (status & RxErrTooBig) + rtdm_printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, " + "status %8.8x!\n", rtdev->name, status); + else if (! (status & RxOK)) { + /* There was a fatal error. This *should* be impossible. */ + sp->stats.rx_errors++; + rtdm_printk(KERN_ERR "%s: Anomalous event in speedo_rx(), " + "status %8.8x.\n", + rtdev->name, status); + } + } else { + struct rtskb *skb; + +// *** RTnet *** + { +// *** RTnet *** + /* Pass up the already-filled skbuff. */ + skb = sp->rx_skbuff[entry]; + if (skb == NULL) { + rtdm_printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n", + rtdev->name); + break; + } + sp->rx_skbuff[entry] = NULL; + rtskb_put(skb, pkt_len); + sp->rx_ringp[entry] = NULL; + dma_unmap_single(&sp->pdev->dev, + sp->rx_ring_dma[entry], + PKT_BUF_SZ + sizeof(struct RxFD), + DMA_FROM_DEVICE); + } + skb->protocol = rt_eth_type_trans(skb, rtdev); + //rtmac + skb->time_stamp = *time_stamp; + //rtmac + rtnetif_rx(skb); + (*packets)++; + sp->stats.rx_packets++; + sp->stats.rx_bytes += pkt_len; + } + entry = (++sp->cur_rx) % RX_RING_SIZE; + sp->rx_ring_state &= ~RrPostponed; + /* Refill the recently taken buffers. + Do it one-by-one to handle traffic bursts better. */ + if (alloc_ok && speedo_refill_rx_buf(rtdev, 0) == -1) + alloc_ok = 0; + } + + /* Try hard to refill the recently taken buffers. */ + speedo_refill_rx_buffers(rtdev, 1); + + sp->last_rx_time = jiffies; + + return 0; +} + +static int +speedo_close(struct rtnet_device *rtdev) +{ + long ioaddr = rtdev->base_addr; + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + int i; + + netdevice_stop(rtdev); + rtnetif_stop_queue(rtdev); + + if (speedo_debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", + rtdev->name, inw(ioaddr + SCBStatus)); + + /* Shutdown procedure according to Intel's e100 */ + outl(PortPartialReset, ioaddr + SCBPort); + speedo_write_flush(ioaddr); udelay(20); + + outl(PortReset, ioaddr + SCBPort); + speedo_write_flush(ioaddr); udelay(20); + + outw(SCBMaskAll, ioaddr + SCBCmd); + speedo_write_flush(ioaddr); + + // *** RTnet *** + if ( (i=rtdm_irq_free(&sp->irq_handle))<0 ) + return i; + + rt_stack_disconnect(rtdev); + + // *** RTnet *** + + /* Print a few items for debugging. */ + if (speedo_debug > 3) + speedo_show_state(rtdev); + + /* Free all the skbuffs in the Rx and Tx queues. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct rtskb *skb = sp->rx_skbuff[i]; + sp->rx_skbuff[i] = 0; + /* Clear the Rx descriptors. */ + if (skb) { + dma_unmap_single(&sp->pdev->dev, + sp->rx_ring_dma[i], + PKT_BUF_SZ + sizeof(struct RxFD), + DMA_FROM_DEVICE); + dev_kfree_rtskb(skb); + } + } + + for (i = 0; i < TX_RING_SIZE; i++) { + struct rtskb *skb = sp->tx_skbuff[i]; + sp->tx_skbuff[i] = 0; + /* Clear the Tx descriptors. */ + if (skb) { + dma_unmap_single(&sp->pdev->dev, + le32_to_cpu(sp->tx_ring[i].tx_buf_addr0), + skb->len, DMA_TO_DEVICE); + + // *** RTnet *** + dev_kfree_rtskb(skb); + // *** RTnet *** + } + } + +// *** RTnet *** +// *** RTnet *** + + pci_set_power_state(sp->pdev, 2); + + return 0; +} + + +/* Set or clear the multicast filter for this adaptor. + This is very ugly with Intel chips -- we usually have to execute an + entire configuration command, plus process a multicast command. + This is complicated. We must put a large configuration command and + an arbitrarily-sized multicast command in the transmit list. + To minimize the disruption -- the previous command might have already + loaded the link -- we convert the current command block, normally a Tx + command, into a no-op and link it to the new command. +*/ +static void set_rx_mode(struct rtnet_device *rtdev) +{ + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + struct descriptor *last_cmd; + char new_rx_mode; + //unsigned long flags; + int entry/*, i*/; + + if (rtdev->flags & IFF_PROMISC) { /* Set promiscuous. */ + new_rx_mode = 3; + } else if (rtdev->flags & IFF_ALLMULTI) { + new_rx_mode = 1; + } else + new_rx_mode = 0; + + if (speedo_debug > 3) + printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", rtdev->name, + sp->rx_mode, new_rx_mode); + + if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) { + /* The Tx ring is full -- don't add anything! Hope the mode will be + * set again later. */ + sp->rx_mode = -1; + return; + } + + if (new_rx_mode != sp->rx_mode) { + u8 *config_cmd_data; + + //spin_lock_irqsave(&sp->lock, flags); --- disabled for now as it runs before irq handler is active + entry = sp->cur_tx++ % TX_RING_SIZE; + last_cmd = sp->last_cmd; + sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; + + sp->tx_skbuff[entry] = 0; /* Redundant. */ + sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure); + sp->tx_ring[entry].link = + cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE)); + config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr; + /* Construct a full CmdConfig frame. */ + memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE); + config_cmd_data[1] = (txfifo << 4) | rxfifo; + config_cmd_data[4] = rxdmacount; + config_cmd_data[5] = txdmacount + 0x80; + config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0; + /* 0x80 doesn't disable FC 0x84 does. + Disable Flow control since we are not ACK-ing any FC interrupts + for now. --Dragan */ + config_cmd_data[19] = 0x84; + config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0; + config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05; + if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */ + config_cmd_data[15] |= 0x80; + config_cmd_data[8] = 0; + } + /* Trigger the command unit resume. */ + wait_for_cmd_done(ioaddr + SCBCmd); + clear_suspend(last_cmd); + outb(CUResume, ioaddr + SCBCmd); + if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) { + rtnetif_stop_queue(rtdev); + sp->tx_full = 1; + } + //spin_unlock_irqrestore(&sp->lock, flags); + } + + if (new_rx_mode == 0) { + /* The simple case of 0-3 multicast list entries occurs often, and + fits within one tx_ring[] entry. */ + /*struct dev_mc_list *mclist;*/ + u16 *setup_params/*, *eaddrs*/; + + //spin_lock_irqsave(&sp->lock, flags); --- disabled for now as it runs before irq handler is active + entry = sp->cur_tx++ % TX_RING_SIZE; + last_cmd = sp->last_cmd; + sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry]; + + sp->tx_skbuff[entry] = 0; + sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList); + sp->tx_ring[entry].link = + cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE)); + sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */ + setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr; + *setup_params++ = cpu_to_le16(0); /* mc_count */ +// *** RTnet *** +// *** RTnet *** + + wait_for_cmd_done(ioaddr + SCBCmd); + clear_suspend(last_cmd); + /* Immediately trigger the command unit resume. */ + outb(CUResume, ioaddr + SCBCmd); + + if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) { + rtnetif_stop_queue(rtdev); + sp->tx_full = 1; + } + //spin_unlock_irqrestore(&sp->lock, flags); +// *** RTnet *** +// *** RTnet *** + } + + sp->rx_mode = new_rx_mode; +} + + +static void eepro100_remove_one (struct pci_dev *pdev) +{ + // *** RTnet *** + struct rtnet_device *rtdev = pci_get_drvdata (pdev); + + struct speedo_private *sp = (struct speedo_private *)rtdev->priv; + + rt_unregister_rtnetdev(rtdev); + rt_rtdev_disconnect(rtdev); + // *** RTnet *** + + release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); + release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + +#ifndef USE_IO + iounmap((char *)rtdev->base_addr); +#endif + + dma_free_coherent( + &pdev->dev, + TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats), + sp->tx_ring, sp->tx_ring_dma); + pci_disable_device(pdev); + + // *** RTnet *** + rtdev_free(rtdev); + // *** RTnet *** +} + +static struct pci_device_id eepro100_pci_tbl[] = { + { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1092, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x27DC, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, }, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl); + +static struct pci_driver eepro100_driver = { + name: "eepro100_rt", + id_table: eepro100_pci_tbl, + probe: eepro100_init_one, + remove: eepro100_remove_one, + suspend: NULL, + resume: NULL, +}; + +static int __init eepro100_init_module(void) +{ +#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG + if (local_debug >= 0 && speedo_debug != local_debug) + printk(KERN_INFO "eepro100.c: Debug level is %d.\n", local_debug); + if (local_debug >= 0) + speedo_debug = local_debug; +#else /* !CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG */ + local_debug = speedo_debug; /* touch debug variable */ +#endif /* CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG */ + + return pci_register_driver(&eepro100_driver); +} + +static void __exit eepro100_cleanup_module(void) +{ + pci_unregister_driver(&eepro100_driver); +} + +module_init(eepro100_init_module); +module_exit(eepro100_cleanup_module); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c new file mode 100644 index 0000000..e6bf2d6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c @@ -0,0 +1,1536 @@ +/* + * eth1394.h -- RTnet Driver for Ethernet emulation over FireWire + * (adapted from Linux1394) + * + * Copyright (C) 2005 Zhang Yuchen <yuchen623@gmail.com> + * + * Mainly based on work by Emanuel Pirker and Andreas E. Bombe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/if_arp.h> +#include <linux/if_ether.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/bitops.h> +#include <linux/uaccess.h> +#include <net/arp.h> + +#define rtos_spinlock_t rtdm_lock_t +#define nanosecs_abs_t nanosecs_t + +#include <rt_eth1394.h> + +#include <rtnet_port.h> + +#include <ieee1394_types.h> +#include <ieee1394_core.h> +#include <ieee1394_transactions.h> +#include <ieee1394.h> +#include <highlevel.h> +#include <iso.h> + +#define driver_name "RT-ETH1394" + + +#define ETH1394_PRINT_G(level, fmt, args...) \ + rtdm_printk(level "%s: " fmt, driver_name, ## args) + +#define ETH1394_PRINT(level, dev_name, fmt, args...) \ + rtdm_printk(level "%s: %s: " fmt, driver_name, dev_name, ## args) + +//#define ETH1394_DEBUG 1 + +#ifdef ETH1394_DEBUG +#define DEBUGP(fmt, args...) \ + rtdm_printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args) +#else +#define DEBUGP(fmt, args...) +#endif + +#define TRACE() rtdm_printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) + +/* Change this to IEEE1394_SPEED_S100 to make testing easier */ +#define ETH1394_SPEED_DEF 0x03 /*IEEE1394_SPEED_MAX*/ + +/* For now, this needs to be 1500, so that XP works with us */ +#define ETH1394_DATA_LEN 1500/*ETH_DATA_LEN*/ + +struct fragment_info { + struct list_head list; + int offset; + int len; +}; + +struct partial_datagram { + struct list_head list; + u16 dgl; + u16 dg_size; + u16 ether_type; + struct rtskb *skb; + char *pbuf; + struct list_head frag_info; +}; + + static const u16 eth1394_speedto_maxpayload[] = { +/* S100, S200, S400, S800, S1600, S3200 */ + 512, 1024, 2048, 4096, 4096, 4096 +}; + +static struct hpsb_highlevel eth1394_highlevel; + +/* Use common.lf to determine header len */ +static const int hdr_type_len[] = { + sizeof (struct eth1394_uf_hdr), + sizeof (struct eth1394_ff_hdr), + sizeof (struct eth1394_sf_hdr), + sizeof (struct eth1394_sf_hdr) +}; + +/* The max_partial_datagrams parameter is the maximum number of fragmented + * datagrams per node that eth1394 will keep in memory. Providing an upper + * bound allows us to limit the amount of memory that partial datagrams + * consume in the event that some partial datagrams are never completed. This + * should probably change to a sysctl item or the like if possible. + */ +static int max_partial_datagrams = 25; +module_param(max_partial_datagrams, int, 0444); +MODULE_PARM_DESC(max_partial_datagrams, + "Maximum number of partially received fragmented datagrams " + "(default = 25)."); + + +static int eth1394_header(struct rtskb *skb, struct rtnet_device *dev, + unsigned short type, void *daddr, void *saddr, + unsigned len); + +static int eth1394_write(struct hpsb_host *host,struct hpsb_packet *packet, unsigned int length); + +static inline void purge_partial_datagram(struct list_head *old); +static int eth1394_tx(struct rtskb *skb, struct rtnet_device *dev); +static void eth1394_iso(struct hpsb_iso *iso, void *arg); + +/* Function for incoming 1394 packets */ +static struct hpsb_address_ops eth1394_ops = { + .write = eth1394_write, +}; + +static void eth1394_add_host (struct hpsb_host *host); +static void eth1394_remove_host (struct hpsb_host *host); +static void eth1394_host_reset (struct hpsb_host *host); + +/* Ieee1394 highlevel driver functions */ +static struct hpsb_highlevel eth1394_highlevel = { + .name = driver_name, + .add_host = eth1394_add_host, + .remove_host = eth1394_remove_host, + .host_reset = eth1394_host_reset, +}; + +static void eth1394_iso_shutdown(struct eth1394_priv *priv) +{ + priv->bc_state = ETHER1394_BC_CLOSED; + + if (priv->iso != NULL) { + //~ if (!in_interrupt()) + hpsb_iso_shutdown(priv->iso); + priv->iso = NULL; + } +} + +static int eth1394_init_bc(struct rtnet_device *dev) +{ + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + + /* First time sending? Need a broadcast channel for ARP and for + * listening on */ + if (priv->bc_state == ETHER1394_BC_CHECK) { + quadlet_t bc; + + /* Get the local copy of the broadcast channel and check its + * validity (the IRM should validate it for us) */ + + bc = priv->host->csr.broadcast_channel; + + if ((bc & 0x80000000) != 0x80000000) { //used to be 0xc0000000 + /* broadcast channel not validated yet */ + ETH1394_PRINT(KERN_WARNING, dev->name, + "Error BROADCAST_CHANNEL register valid " + "bit not set, can't send IP traffic\n"); + + eth1394_iso_shutdown(priv); + + return -EAGAIN; + } + if (priv->broadcast_channel != (bc & 0x3f)) { + /* This really shouldn't be possible, but just in case + * the IEEE 1394 spec changes regarding broadcast + * channels in the future. */ + + eth1394_iso_shutdown(priv); + + //~ if (in_interrupt()) + //~ return -EAGAIN; + + priv->broadcast_channel = bc & 0x3f; + ETH1394_PRINT(KERN_INFO, dev->name, + "Changing to broadcast channel %d...\n", + priv->broadcast_channel); + + priv->iso = hpsb_iso_recv_init(priv->host, 16 * 4096, + 16, priv->broadcast_channel, HPSB_ISO_DMA_PACKET_PER_BUFFER, + 1, eth1394_iso, 0, "eth1394_iso", IEEE1394_PRIORITY_HIGHEST); + + if (priv->iso == NULL) { + ETH1394_PRINT(KERN_ERR, dev->name, + "failed to change broadcast " + "channel\n"); + return -EAGAIN; + } + } + if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0) { + ETH1394_PRINT(KERN_ERR, dev->name, + "Could not start data stream reception\n"); + + eth1394_iso_shutdown(priv); + + return -EAGAIN; + } + priv->bc_state = ETHER1394_BC_OPENED; + } + + return 0; +} + +static int eth1394_open (struct rtnet_device *dev) +{ + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + rtdm_lockctx_t context; + int ret; + + /* Something bad happened, don't even try */ + if (priv->bc_state == ETHER1394_BC_CLOSED) + { + return -EAGAIN; + } + rtdm_lock_get_irqsave(&priv->lock, context); + ret = eth1394_init_bc(dev); + rtdm_lock_put_irqrestore(&priv->lock, context); + + if (ret) + return ret; + rt_stack_connect(dev,&STACK_manager); + rtnetif_start_queue (dev); + return 0; +} + +static int eth1394_stop (struct rtnet_device *dev) +{ + rtnetif_stop_queue (dev); + rt_stack_disconnect(dev); + return 0; +} + +/* Return statistics to the caller */ +static struct net_device_stats *eth1394_stats (struct rtnet_device *dev) +{ + return &(((struct eth1394_priv *)dev->priv)->stats); +} + +static inline void eth1394_register_limits(int nodeid, u16 maxpayload, + unsigned char sspd, + struct eth1394_priv *priv) +{ + + if (nodeid < 0 || nodeid >= ALL_NODES) { + ETH1394_PRINT_G (KERN_ERR, "Cannot register invalid nodeid %d\n", nodeid); + return; + } + + priv->maxpayload[nodeid] = maxpayload; + priv->sspd[nodeid] = sspd; + priv->maxpayload[ALL_NODES] = min(priv->maxpayload[ALL_NODES], maxpayload); + priv->sspd[ALL_NODES] = min(priv->sspd[ALL_NODES], sspd); + + return; +} + + +static void eth1394_reset_priv (struct rtnet_device *dev, int set_mtu) +{ + rtdm_lockctx_t context; + int i; + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + struct hpsb_host *host = priv->host; + int phy_id = NODEID_TO_NODE(host->node_id); + u16 maxpayload = 1 << (host->csr.max_rec + 1); + + rtdm_lock_get_irqsave(&priv->lock, context); + /* Clear the speed/payload/offset tables */ + memset (priv->maxpayload, 0, sizeof (priv->maxpayload)); + memset (priv->sspd, 0, sizeof (priv->sspd)); + + priv->sspd[ALL_NODES] = ETH1394_SPEED_DEF; + priv->maxpayload[ALL_NODES] = eth1394_speedto_maxpayload[priv->sspd[ALL_NODES]]; + + priv->bc_state = ETHER1394_BC_CHECK; + + /* Register our limits now */ + eth1394_register_limits(phy_id, maxpayload, + host->speed_map[(phy_id << 6) + phy_id], priv); + + /* We'll use our maxpayload as the default mtu */ + if (set_mtu) { + dev->mtu = min(ETH1394_DATA_LEN, (int)(priv->maxpayload[phy_id] - + (sizeof(union eth1394_hdr) + ETHER1394_GASP_OVERHEAD))); + + //~ /* Set our hardware address while we're at it */ + //~ *(u64*)dev->dev_addr = guid; + //~ *(u64*)dev->broadcast = ~0x0ULL; + *(u16*)dev->dev_addr = LOCAL_BUS | phy_id; //we directly use FireWire address for our MAC address + *(u16*)dev->broadcast = LOCAL_BUS | ALL_NODES; + } + + rtdm_lock_put_irqrestore(&priv->lock, context); + + for (i = 0; i < ALL_NODES; i++) { + struct list_head *lh, *n; + + rtdm_lock_get_irqsave(&priv->pdg[i].lock, context); + if (!set_mtu) { + list_for_each_safe(lh, n, &priv->pdg[i].list) { + //~ purge_partial_datagram(lh); + } + } + INIT_LIST_HEAD(&(priv->pdg[i].list)); + priv->pdg[i].sz = 0; + rtdm_lock_put_irqrestore(&priv->pdg[i].lock, context); + } + +} + +static void eth1394_add_host (struct hpsb_host *host) +{ + int i; + struct host_info *hi = NULL; + + //*******RTnet******** + struct rtnet_device *dev = NULL; + // + struct eth1394_priv *priv; + + /* We should really have our own alloc_hpsbdev() function in + * net_init.c instead of calling the one for ethernet then hijacking + * it for ourselves. That way we'd be a real networking device. */ + + //******RTnet****** + + dev = rt_alloc_etherdev(sizeof (struct eth1394_priv), + RX_RING_SIZE * 2 + TX_RING_SIZE); + if (dev == NULL) { + ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate " + "etherdevice for IEEE 1394 device\n"); + goto free_dev; + } + rtdev_alloc_name(dev, "rteth%d"); + memset(dev->priv, 0, sizeof(struct eth1394_priv)); + rt_rtdev_connect(dev, &RTDEV_manager); + + //dev->init = eth1394_init_dev; + + dev->vers = RTDEV_VERS_2_0; + dev->open = eth1394_open; + dev->hard_start_xmit = eth1394_tx; + dev->stop = eth1394_stop; + dev->hard_header = eth1394_header; + dev->get_stats = eth1394_stats; + dev->flags = IFF_BROADCAST | IFF_MULTICAST; + dev->addr_len = ETH_ALEN; + dev->hard_header_len = ETH_HLEN; + dev->type = ARPHRD_IEEE1394; + + //rtdev->do_ioctl = NULL; + priv = (struct eth1394_priv *)dev->priv; + + rtdm_lock_init(&priv->lock); + priv->host = host; + + for (i = 0; i < ALL_NODES; i++) { + rtdm_lock_init(&priv->pdg[i].lock); + INIT_LIST_HEAD(&priv->pdg[i].list); + priv->pdg[i].sz = 0; + } + + hi = hpsb_create_hostinfo(ð1394_highlevel, host, sizeof(*hi)); + if (hi == NULL) { + ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create " + "hostinfo for IEEE 1394 device\n"); + goto free_hi; + } + + if(rt_register_rtnetdev(dev)) + { + ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n"); + goto free_hi; + } + + ETH1394_PRINT (KERN_ERR, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet\n"); + + hi->host = host; + hi->dev = dev; + + eth1394_reset_priv (dev, 1); + + /* Ignore validity in hopes that it will be set in the future. It'll + * be checked when the eth device is opened. */ + priv->broadcast_channel = host->csr.broadcast_channel & 0x3f; + + priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 * + 2048), // XXX workaround for limitation in rawiso + //(1 << (host->csr.max_rec + 1))), + ETHER1394_GASP_BUFFERS, + priv->broadcast_channel, + HPSB_ISO_DMA_PACKET_PER_BUFFER, + 1, eth1394_iso, 0, "eth1394_iso", IEEE1394_PRIORITY_HIGHEST); + + + + if (priv->iso == NULL) { + ETH1394_PRINT(KERN_ERR, dev->name, + "Could not allocate isochronous receive context " + "for the broadcast channel\n"); + priv->bc_state = ETHER1394_BC_ERROR; + goto unregister_dev; + } else { + if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0){ + priv->bc_state = ETHER1394_BC_STOPPED; + goto unregister_dev; + } + else + priv->bc_state = ETHER1394_BC_RUNNING; + } + + hpsb_register_addrspace(ð1394_highlevel, host, ð1394_ops, ETHER1394_REGION_ADDR, + ETHER1394_REGION_ADDR_END); + + return; + +unregister_dev: + rt_unregister_rtnetdev(dev); +free_hi: + hpsb_destroy_hostinfo(ð1394_highlevel, host); +free_dev: + rtdev_free(dev); + + return; +} + +static void eth1394_remove_host (struct hpsb_host *host) +{ + struct host_info *hi = hpsb_get_hostinfo(ð1394_highlevel, host); + + if (hi != NULL) { + struct eth1394_priv *priv = (struct eth1394_priv *)hi->dev->priv; + + eth1394_iso_shutdown(priv); + + if (hi->dev) { + rt_stack_disconnect(hi->dev); + rt_unregister_rtnetdev (hi->dev); + rtdev_free(hi->dev); + } + } + return; +} + +static void eth1394_host_reset (struct hpsb_host *host) +{ + struct host_info *hi = hpsb_get_hostinfo(ð1394_highlevel, host); + struct rtnet_device *dev; + + /* This can happen for hosts that we don't use */ + if (hi == NULL) + return; + + dev = hi->dev; + + /* Reset our private host data, but not our mtu */ + rtnetif_stop_queue (dev); + eth1394_reset_priv (dev, 1); + rtnetif_wake_queue (dev); +} + + +/****************************************** + * HW Header net device functions + ******************************************/ +/* These functions have been adapted from net/ethernet/eth.c */ + + +/* Create a fake MAC header for an arbitrary protocol layer. + * saddr=NULL means use device source address + * daddr=NULL means leave destination address (eg unresolved arp). */ +static int eth1394_header(struct rtskb *skb, struct rtnet_device *dev, + unsigned short type, void *daddr, void *saddr, + unsigned len) +{ + struct ethhdr *eth = (struct ethhdr *)rtskb_push(skb,ETH_HLEN); + memset(eth, 0, sizeof(*eth)); + + eth->h_proto = htons(type); + + if (saddr) + memcpy(eth->h_source, saddr, sizeof(nodeid_t)); + else + memcpy(eth->h_source, dev->dev_addr, sizeof(nodeid_t)); + + if (dev->flags & (IFF_LOOPBACK|IFF_NOARP)) + { + memset(eth->h_dest, 0, dev->addr_len); + return(dev->hard_header_len); + } + + if (daddr) + { + memcpy(eth->h_dest,daddr, sizeof(nodeid_t)); + return dev->hard_header_len; + } + + return -dev->hard_header_len; + +} + + +/****************************************** + * Datagram reception code + ******************************************/ + +/* Copied from net/ethernet/eth.c */ +static inline u16 eth1394_type_trans(struct rtskb *skb, + struct rtnet_device *dev) +{ + struct ethhdr *eth; + unsigned char *rawp; + + skb->mac.raw = skb->data; + rtskb_pull (skb, ETH_HLEN); + eth = (struct ethhdr*)skb->mac.raw; + + if (*eth->h_dest & 1) { + if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0) + skb->pkt_type = PACKET_BROADCAST; + } else { + if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len)) + skb->pkt_type = PACKET_OTHERHOST; + } + + if (ntohs (eth->h_proto) >= 1536) + return eth->h_proto; + + rawp = skb->data; + + if (*(unsigned short *)rawp == 0xFFFF) + return htons (ETH_P_802_3); + + return htons (ETH_P_802_2); +} + +/* Parse an encapsulated IP1394 header into an ethernet frame packet. + * We also perform ARP translation here, if need be. */ +static inline u16 eth1394_parse_encap(struct rtskb *skb, + struct rtnet_device *dev, + nodeid_t srcid, nodeid_t destid, + u16 ether_type) +{ + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + unsigned short ret = 0; + + /* If this is an ARP packet, convert it. First, we want to make + * use of some of the fields, since they tell us a little bit + * about the sending machine. */ + if (ether_type == __constant_htons (ETH_P_ARP)) { + rtdm_lockctx_t context; + struct eth1394_arp *arp1394 = + (struct eth1394_arp*)((u8 *)skb->data); + struct arphdr *arp = + (struct arphdr *)((u8 *)skb->data); + unsigned char *arp_ptr = (unsigned char *)(arp + 1); + u8 max_rec = min(priv->host->csr.max_rec, + (u8)(arp1394->max_rec)); + int sspd = arp1394->sspd; + u16 maxpayload; + /* Sanity check. MacOSX seems to be sending us 131 in this + * field (atleast on my Panther G5). Not sure why. */ + if (sspd > 5 || sspd < 0) + sspd = 0; + + maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1))); + + + + /* Update our speed/payload/fifo_offset table */ + rtdm_lock_get_irqsave(&priv->lock, context); + eth1394_register_limits(NODEID_TO_NODE(srcid), maxpayload, + arp1394->sspd, + priv); + rtdm_lock_put_irqrestore(&priv->lock, context); + + /* Now that we're done with the 1394 specific stuff, we'll + * need to alter some of the data. Believe it or not, all + * that needs to be done is sender_IP_address needs to be + * moved, the destination hardware address get stuffed + * in and the hardware address length set to 8. + * + * IMPORTANT: The code below overwrites 1394 specific data + * needed above data so keep the call to + * eth1394_register_limits() before munging the data for the + * higher level IP stack. */ + + arp->ar_hln = ETH_ALEN; + arp_ptr += arp->ar_hln; /* skip over sender unique id */ + *(u32*)arp_ptr = arp1394->sip; /* move sender IP addr */ + arp_ptr += arp->ar_pln; /* skip over sender IP addr */ + + if (arp->ar_op == 1) + /* just set ARP req target unique ID to 0 */ + memset(arp_ptr, 0, ETH_ALEN); + else + memcpy(arp_ptr, dev->dev_addr, ETH_ALEN); + } + + /* Now add the ethernet header. */ + //no need to add ethernet header now, since we did not get rid of it on the sending side + if (dev->hard_header (skb, dev, __constant_ntohs (ether_type), + &destid, &srcid, skb->len) >= 0) + ret = eth1394_type_trans(skb, dev); + + return ret; +} + +static inline int fragment_overlap(struct list_head *frag_list, int offset, int len) +{ + struct list_head *lh; + struct fragment_info *fi; + + list_for_each(lh, frag_list) { + fi = list_entry(lh, struct fragment_info, list); + + if ( ! ((offset > (fi->offset + fi->len - 1)) || + ((offset + len - 1) < fi->offset))) + return 1; + } + return 0; +} + +static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl) +{ + struct list_head *lh; + struct partial_datagram *pd; + + list_for_each(lh, pdgl) { + pd = list_entry(lh, struct partial_datagram, list); + if (pd->dgl == dgl) + return lh; + } + return NULL; +} + +/* Assumes that new fragment does not overlap any existing fragments */ +static inline int new_fragment(struct list_head *frag_info, int offset, int len) +{ + struct list_head *lh; + struct fragment_info *fi, *fi2, *new; + + list_for_each(lh, frag_info) { + fi = list_entry(lh, struct fragment_info, list); + if ((fi->offset + fi->len) == offset) { + /* The new fragment can be tacked on to the end */ + fi->len += len; + /* Did the new fragment plug a hole? */ + fi2 = list_entry(lh->next, struct fragment_info, list); + if ((fi->offset + fi->len) == fi2->offset) { + /* glue fragments together */ + fi->len += fi2->len; + list_del(lh->next); + kfree(fi2); + } + return 0; + } else if ((offset + len) == fi->offset) { + /* The new fragment can be tacked on to the beginning */ + fi->offset = offset; + fi->len += len; + /* Did the new fragment plug a hole? */ + fi2 = list_entry(lh->prev, struct fragment_info, list); + if ((fi2->offset + fi2->len) == fi->offset) { + /* glue fragments together */ + fi2->len += fi->len; + list_del(lh); + kfree(fi); + } + return 0; + } else if (offset > (fi->offset + fi->len)) { + break; + } else if ((offset + len) < fi->offset) { + lh = lh->prev; + break; + } + } + + new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC); + if (!new) + return -ENOMEM; + + new->offset = offset; + new->len = len; + + list_add(&new->list, lh); + + return 0; +} + +static inline int new_partial_datagram(struct rtnet_device *dev, + struct list_head *pdgl, int dgl, + int dg_size, char *frag_buf, + int frag_off, int frag_len) +{ + struct partial_datagram *new; + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + + new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC); + if (!new) + return -ENOMEM; + + INIT_LIST_HEAD(&new->frag_info); + + if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) { + kfree(new); + return -ENOMEM; + } + + new->dgl = dgl; + new->dg_size = dg_size; + + new->skb = rtnetdev_alloc_rtskb(dev, dg_size + dev->hard_header_len + 15); + if (!new->skb) { + struct fragment_info *fi = list_entry(new->frag_info.next, + struct fragment_info, + list); + kfree(fi); + kfree(new); + return -ENOMEM; + } + + rtskb_reserve(new->skb, (dev->hard_header_len + 15) & ~15); + new->pbuf = rtskb_put(new->skb, dg_size); + memcpy(new->pbuf + frag_off, frag_buf, frag_len); + + list_add(&new->list, pdgl); + + return 0; +} + +static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh, + char *frag_buf, int frag_off, int frag_len) +{ + struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list); + + if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) { + return -ENOMEM; + } + + memcpy(pd->pbuf + frag_off, frag_buf, frag_len); + + /* Move list entry to beginnig of list so that oldest partial + * datagrams percolate to the end of the list */ + list_del(lh); + list_add(lh, pdgl); + + return 0; +} + +static inline void purge_partial_datagram(struct list_head *old) +{ + struct partial_datagram *pd = list_entry(old, struct partial_datagram, list); + struct list_head *lh, *n; + + list_for_each_safe(lh, n, &pd->frag_info) { + struct fragment_info *fi = list_entry(lh, struct fragment_info, list); + list_del(lh); + kfree(fi); + } + list_del(old); + kfree_rtskb(pd->skb); + kfree(pd); +} + +static inline int is_datagram_complete(struct list_head *lh, int dg_size) +{ + struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list); + struct fragment_info *fi = list_entry(pd->frag_info.next, + struct fragment_info, list); + + return (fi->len == dg_size); +} + + + + +/* Packet reception. We convert the IP1394 encapsulation header to an + * ethernet header, and fill it with some of our other fields. This is + * an incoming packet from the 1394 bus. */ +static int eth1394_data_handler(struct rtnet_device *dev, int srcid, int destid, + char *buf, int len, nanosecs_abs_t time_stamp) +{ + struct rtskb *skb; + rtdm_lockctx_t context; + struct eth1394_priv *priv; + union eth1394_hdr *hdr = (union eth1394_hdr *)buf; + u16 ether_type = 0; /* initialized to clear warning */ + int hdr_len; + + //~ nanosecs_abs_t time_stamp = rtdm_clock_read(); + + priv = (struct eth1394_priv *)dev->priv; + + /* First, did we receive a fragmented or unfragmented datagram? */ + hdr->words.word1 = ntohs(hdr->words.word1); + + hdr_len = hdr_type_len[hdr->common.lf]; + + if (hdr->common.lf == ETH1394_HDR_LF_UF) { + DEBUGP("a single datagram has been received\n"); + /* An unfragmented datagram has been received by the ieee1394 + * bus. Build an skbuff around it so we can pass it to the + * high level network layer. */ + + //~ if(rtpkb_acquire((struct rtpkb*)packet, &priv->skb_pool)){ + //~ HPSB_PRINT (KERN_ERR, "eth1394 rx: low on mem\n"); + //~ priv->stats.rx_dropped++; + //~ return -1; + //~ } + + skb = rtnetdev_alloc_rtskb(dev, len + dev->hard_header_len + 15); + if (!skb) { + ETH1394_PRINT_G(KERN_ERR, "eth1394 rx: low on mem\n"); + priv->stats.rx_dropped++; + return -1; + } + //~ skb = (struct rtskb *)packet;//we can do this, because these two belong to the same common object, rtpkb. + //~ rtpkb_put(skb, len-hdr_len); + //~ skb->data = (u8 *)packet->data + hdr_len; //we jump over the 1394-specific fragment overhead + //~ rtskb_put(skb, ); + rtskb_reserve(skb, (dev->hard_header_len + 15) & ~15);//we reserve the space to put in fake MAC address + memcpy(rtskb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len); + ether_type = hdr->uf.ether_type; + } else { + /* A datagram fragment has been received, now the fun begins. */ + struct list_head *pdgl, *lh; + struct partial_datagram *pd; + int fg_off; + int fg_len = len - hdr_len; + int dg_size; + int dgl; + int retval; + int sid = NODEID_TO_NODE(srcid); + struct pdg_list *pdg = &(priv->pdg[sid]); + + DEBUGP("a datagram fragment has been received\n"); + hdr->words.word3 = ntohs(hdr->words.word3); + /* The 4th header word is reserved so no need to do ntohs() */ + + if (hdr->common.lf == ETH1394_HDR_LF_FF) { + //first fragment + ether_type = hdr->ff.ether_type; + dgl = hdr->ff.dgl; + dg_size = hdr->ff.dg_size + 1; + fg_off = 0; + } else { + hdr->words.word2 = ntohs(hdr->words.word2); + dgl = hdr->sf.dgl; + dg_size = hdr->sf.dg_size + 1; + fg_off = hdr->sf.fg_off; + } + rtdm_lock_get_irqsave(&pdg->lock, context); + + pdgl = &(pdg->list); + lh = find_partial_datagram(pdgl, dgl); + + if (lh == NULL) { + if (pdg->sz == max_partial_datagrams) { + /* remove the oldest */ + purge_partial_datagram(pdgl->prev); + pdg->sz--; + } + + retval = new_partial_datagram(dev, pdgl, dgl, dg_size, + buf + hdr_len, fg_off, + fg_len); + if (retval < 0) { + rtdm_lock_put_irqrestore(&pdg->lock, context); + goto bad_proto; + } + pdg->sz++; + lh = find_partial_datagram(pdgl, dgl); + } else { + struct partial_datagram *pd; + + pd = list_entry(lh, struct partial_datagram, list); + + if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) { + /* Overlapping fragments, obliterate old + * datagram and start new one. */ + purge_partial_datagram(lh); + retval = new_partial_datagram(dev, pdgl, dgl, + dg_size, + buf + hdr_len, + fg_off, fg_len); + if (retval < 0) { + pdg->sz--; + rtdm_lock_put_irqrestore(&pdg->lock, context); + goto bad_proto; + } + } else { + retval = update_partial_datagram(pdgl, lh, + buf + hdr_len, + fg_off, fg_len); + if (retval < 0) { + /* Couldn't save off fragment anyway + * so might as well obliterate the + * datagram now. */ + purge_partial_datagram(lh); + pdg->sz--; + rtdm_lock_put_irqrestore(&pdg->lock, context); + goto bad_proto; + } + } /* fragment overlap */ + } /* new datagram or add to existing one */ + + pd = list_entry(lh, struct partial_datagram, list); + + if (hdr->common.lf == ETH1394_HDR_LF_FF) { + pd->ether_type = ether_type; + } + + if (is_datagram_complete(lh, dg_size)) { + ether_type = pd->ether_type; + pdg->sz--; + //skb = skb_get(pd->skb); + skb = pd->skb; + purge_partial_datagram(lh); + rtdm_lock_put_irqrestore(&pdg->lock, context); + } else { + /* Datagram is not complete, we're done for the + * moment. */ + rtdm_lock_put_irqrestore(&pdg->lock, context); + return 0; + } + } /* unframgented datagram or fragmented one */ + + /* Write metadata, and then pass to the receive level */ + skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */ + + /* Parse the encapsulation header. This actually does the job of + * converting to an ethernet frame header, aswell as arp + * conversion if needed. ARP conversion is easier in this + * direction, since we are using ethernet as our backend. */ + skb->protocol = eth1394_parse_encap(skb, dev, srcid, destid, + ether_type); + + + rtdm_lock_get_irqsave(&priv->lock, context); + if (!skb->protocol) { + DEBUG_PRINT("pointer to %s(%s)%d\n",__FILE__,__FUNCTION__,__LINE__); + priv->stats.rx_errors++; + priv->stats.rx_dropped++; + //dev_kfree_skb_any(skb); + kfree_rtskb(skb); + goto bad_proto; + } + + skb->time_stamp = time_stamp; + /*if (netif_rx(skb) == NET_RX_DROP) { + priv->stats.rx_errors++; + priv->stats.rx_dropped++; + goto bad_proto; + }*/ + rtnetif_rx(skb);//finally, we deliver the packet + + /* Statistics */ + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len; + rt_mark_stack_mgr(dev); + +bad_proto: + if (rtnetif_queue_stopped(dev)) + rtnetif_wake_queue(dev); + rtdm_lock_put_irqrestore(&priv->lock, context); + + //dev->last_rx = jiffies; + + return 0; +} + + +static int eth1394_write(struct hpsb_host *host, struct hpsb_packet *packet, unsigned int length) +{ + struct host_info *hi = hpsb_get_hostinfo(ð1394_highlevel, host); + int ret; + + if (hi == NULL) { + ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n", + host->driver->name); + return RCODE_ADDRESS_ERROR; + } + + //we need to parse the packet now + ret = eth1394_data_handler(hi->dev, packet->header[1]>>16, //source id + packet->header[0]>>16, //dest id + (char *)packet->data, //data + packet->data_size, packet->time_stamp); + //we only get the request packet, serve it, but dont free it, since it does not belong to us!!!! + + if(ret) + return RCODE_ADDRESS_ERROR; + else + return RCODE_COMPLETE; +} + + +/** + * callback function for broadcast channel + * called from hpsb_iso_wake( ) + */ +static void eth1394_iso(struct hpsb_iso *iso, void *arg) +{ + quadlet_t *data; + char *buf; + struct rtnet_device *dev; + unsigned int len; + u32 specifier_id; + u16 source_id; + int i; + int nready; + + struct host_info *hi = hpsb_get_hostinfo(ð1394_highlevel, iso->host); + if (hi == NULL) { + ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n", + iso->host->driver->name); + return; + } + + dev = hi->dev; + + nready = hpsb_iso_n_ready(iso); + for (i = 0; i < nready; i++) { + struct hpsb_iso_packet_info *info = + &iso->infos[(iso->first_packet + i) % iso->buf_packets]; + data = (quadlet_t*) (iso->data_buf.kvirt + info->offset); + + /* skip over GASP header */ + buf = (char *)data + 8; + len = info->len - 8; + + specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) | + ((be32_to_cpu(data[1]) & 0xff000000) >> 24)); + source_id = be32_to_cpu(data[0]) >> 16; + + if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) || + specifier_id != ETHER1394_GASP_SPECIFIER_ID) { + /* This packet is not for us */ + continue; + } + eth1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES, + buf, len, rtdm_clock_read()); + } + + hpsb_iso_recv_release_packets(iso, i); + + //dev->last_rx = jiffies; +} + +/****************************************** + * Datagram transmission code + ******************************************/ + +/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire + * arphdr) is the same format as the ip1394 header, so they overlap. The rest + * needs to be munged a bit. The remainder of the arphdr is formatted based + * on hwaddr len and ipaddr len. We know what they'll be, so it's easy to + * judge. + * + * Now that the EUI is used for the hardware address all we need to do to make + * this work for 1394 is to insert 2 quadlets that contain max_rec size, + * speed, and unicast FIFO address information between the sender_unique_id + * and the IP addresses. + */ + +//we dont need the EUI id now. fifo_hi should contain the bus id and node id. +//fifo_lo should contain the highest 32 bits of in-node address. +static inline void eth1394_arp_to_1394arp(struct rtskb *skb, + struct rtnet_device *dev) +{ + struct eth1394_priv *priv = (struct eth1394_priv *)(dev->priv); + u16 phy_id = NODEID_TO_NODE(priv->host->node_id); + + struct arphdr *arp = (struct arphdr *)skb->data; + unsigned char *arp_ptr = (unsigned char *)(arp + 1); + struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data; + + arp1394->hw_addr_len = 6; + arp1394->sip = *(u32*)(arp_ptr + ETH_ALEN); + arp1394->max_rec = priv->host->csr.max_rec; + arp1394->sspd = priv->sspd[phy_id]; + + return; +} + +/* We need to encapsulate the standard header with our own. We use the + * ethernet header's proto for our own. */ +static inline unsigned int eth1394_encapsulate_prep(unsigned int max_payload, + int proto, + union eth1394_hdr *hdr, + u16 dg_size, u16 dgl) +{ + unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF]; + + /* Does it all fit in one packet? */ + if (dg_size <= adj_max_payload) { + hdr->uf.lf = ETH1394_HDR_LF_UF; + hdr->uf.ether_type = proto; + } else { + hdr->ff.lf = ETH1394_HDR_LF_FF; + hdr->ff.ether_type = proto; + hdr->ff.dg_size = dg_size - 1; + hdr->ff.dgl = dgl; + adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF]; + } + return((dg_size + (adj_max_payload - 1)) / adj_max_payload); +} + +static inline unsigned int eth1394_encapsulate(struct rtskb *skb, + unsigned int max_payload, + union eth1394_hdr *hdr) +{ + union eth1394_hdr *bufhdr; + int ftype = hdr->common.lf; + int hdrsz = hdr_type_len[ftype]; + unsigned int adj_max_payload = max_payload - hdrsz; + + switch(ftype) { + case ETH1394_HDR_LF_UF: + bufhdr = (union eth1394_hdr *)rtskb_push(skb, hdrsz); + bufhdr->words.word1 = htons(hdr->words.word1); + bufhdr->words.word2 = hdr->words.word2; + break; + + case ETH1394_HDR_LF_FF: + bufhdr = (union eth1394_hdr *)rtskb_push(skb, hdrsz); + bufhdr->words.word1 = htons(hdr->words.word1); + bufhdr->words.word2 = hdr->words.word2; + bufhdr->words.word3 = htons(hdr->words.word3); + bufhdr->words.word4 = 0; + + /* Set frag type here for future interior fragments */ + hdr->common.lf = ETH1394_HDR_LF_IF; + hdr->sf.fg_off = 0; + break; + + default: + hdr->sf.fg_off += adj_max_payload; + bufhdr = (union eth1394_hdr *)rtskb_pull(skb, adj_max_payload); + if (max_payload >= skb->len) + hdr->common.lf = ETH1394_HDR_LF_LF; + bufhdr->words.word1 = htons(hdr->words.word1); + bufhdr->words.word2 = htons(hdr->words.word2); + bufhdr->words.word3 = htons(hdr->words.word3); + bufhdr->words.word4 = 0; + } + + return min(max_payload, skb->len); +} + +//just allocate a hpsb_packet header, without payload. +static inline struct hpsb_packet *eth1394_alloc_common_packet(struct hpsb_host *host, unsigned int priority) +{ + struct hpsb_packet *p; + + p = hpsb_alloc_packet(0,&host->pool, priority); + if (p) { + p->host = host; + p->data = NULL; + p->generation = get_hpsb_generation(host); + p->type = hpsb_async; + } + return p; +} + +//prepare an asynchronous write packet +static inline int eth1394_prep_write_packet(struct hpsb_packet *p, + struct hpsb_host *host, + nodeid_t node, u64 addr, + void * data, int tx_len) +{ + p->node_id = node; + + p->tcode = TCODE_WRITEB; + + p->header[1] = (host->node_id << 16) | (addr >> 32); + p->header[2] = addr & 0xffffffff; + + p->header_size = 16; + p->expect_response = 1; + + if (hpsb_get_tlabel(p)) { + ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending " + "to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node)); + return -1; + } + p->header[0] = (p->node_id << 16) | (p->tlabel << 10) + | (1 << 8) | (TCODE_WRITEB << 4); + + p->header[3] = tx_len << 16; + p->data_size = tx_len + (tx_len % 4 ? 4 - (tx_len % 4) : 0); + p->data = (quadlet_t*)data; + + return 0; +} + +//prepare gasp packet from skb. +static inline void eth1394_prep_gasp_packet(struct hpsb_packet *p, + struct eth1394_priv *priv, + struct rtskb *skb, int length) +{ + p->header_size = 4; + p->tcode = TCODE_STREAM_DATA; + + p->header[0] = (length << 16) | (3 << 14) + | ((priv->broadcast_channel) << 8) + | (TCODE_STREAM_DATA << 4); + p->data_size = length; + p->data = ((quadlet_t*)skb->data) - 2; //we need 64bits for extra spec_id and gasp version. + p->data[0] = cpu_to_be32((priv->host->node_id << 16) | + ETHER1394_GASP_SPECIFIER_ID_HI); + p->data[1] = cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) | + ETHER1394_GASP_VERSION); + + /* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES) + * prevents hpsb_send_packet() from setting the speed to an arbitrary + * value based on packet->node_id if packet->node_id is not set. */ + p->node_id = ALL_NODES; + p->speed_code = priv->sspd[ALL_NODES]; +} + + +static inline void eth1394_free_packet(struct hpsb_packet *packet) +{ + if (packet->tcode != TCODE_STREAM_DATA) + hpsb_free_tlabel(packet); + hpsb_free_packet(packet); +} + +static void eth1394_complete_cb(struct hpsb_packet *packet, void *__ptask); + + +/** + * this function does the real calling of hpsb_send_packet + *But before that, it also constructs the FireWire packet according to + * ptask + */ +static int eth1394_send_packet(struct packet_task *ptask, unsigned int tx_len, nanosecs_abs_t *xmit_stamp) +{ + struct eth1394_priv *priv = ptask->priv; + struct hpsb_packet *packet = NULL; + int ret; + + packet = eth1394_alloc_common_packet(priv->host, ptask->priority); + if (!packet) { + ret = -ENOMEM; + return ret; + } + if(xmit_stamp) + packet->xmit_stamp = xmit_stamp; + + if (ptask->tx_type == ETH1394_GASP) { + int length = tx_len + (2 * sizeof(quadlet_t)); //for the extra gasp overhead + + eth1394_prep_gasp_packet(packet, priv, ptask->skb, length); + } else if (eth1394_prep_write_packet(packet, priv->host, + ptask->dest_node, + ptask->addr, ptask->skb->data, + tx_len)) { + hpsb_free_packet(packet); + return -1; + } + + ptask->packet = packet; + hpsb_set_packet_complete_task(ptask->packet, eth1394_complete_cb, + ptask); + + ret = hpsb_send_packet(packet); + if (ret != 0) { + eth1394_free_packet(packet); + } + + return ret; +} + + +/* Task function to be run when a datagram transmission is completed */ +static inline void eth1394_dg_complete(struct packet_task *ptask, int fail) +{ + struct rtskb *skb = ptask->skb; + struct rtnet_device *dev = skb->rtdev; + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + rtdm_lockctx_t context; + + /* Statistics */ + rtdm_lock_get_irqsave(&priv->lock, context); + if (fail) { + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + } else { + priv->stats.tx_bytes += skb->len; + priv->stats.tx_packets++; + } + rtdm_lock_put_irqrestore(&priv->lock, context); + + //dev_kfree_skb_any(skb); + kfree_rtskb(skb); + //~ kmem_cache_free(packet_task_cache, ptask); + //this means this ptask structure has been freed + ptask->packet=NULL; +} + + +/* Callback for when a packet has been sent and the status of that packet is + * known */ +static void eth1394_complete_cb(struct hpsb_packet *packet, void *__ptask) +{ + struct packet_task *ptask = (struct packet_task *)__ptask; + int fail = 0; + + if (packet->tcode != TCODE_STREAM_DATA) + fail = hpsb_packet_success(packet); + + //we have no rights to free packet, since it belongs to RT-FireWire kernel. + //~ eth1394_free_packet(packet); + + ptask->outstanding_pkts--; + if (ptask->outstanding_pkts > 0 && !fail) + { + int tx_len; + + /* Add the encapsulation header to the fragment */ + tx_len = eth1394_encapsulate(ptask->skb, ptask->max_payload, + &ptask->hdr); + if (eth1394_send_packet(ptask, tx_len, NULL)) + eth1394_dg_complete(ptask, 1); + } else { + eth1394_dg_complete(ptask, fail); + } +} + + + +/** + *Transmit a packet (called by kernel) + * this is the dev->hard_start_transmit + */ +static int eth1394_tx (struct rtskb *skb, struct rtnet_device *dev) +{ + + struct ethhdr *eth; + struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv; + int proto; + rtdm_lockctx_t context; + nodeid_t dest_node; + eth1394_tx_type tx_type; + int ret = 0; + unsigned int tx_len; + unsigned int max_payload; + u16 dg_size; + u16 dgl; + + //we try to find the available ptask struct, if failed, we can not send packet + struct packet_task *ptask = NULL; + int i; + for(i=0;i<20;i++){ + if(priv->ptask_list[i].packet == NULL){ + ptask = &priv->ptask_list[i]; + break; + } + } + if(ptask == NULL) + return -EBUSY; + + rtdm_lock_get_irqsave(&priv->lock, context); + if (priv->bc_state == ETHER1394_BC_CLOSED) { + ETH1394_PRINT(KERN_ERR, dev->name, + "Cannot send packet, no broadcast channel available.\n"); + ret = -EAGAIN; + rtdm_lock_put_irqrestore(&priv->lock, context); + goto fail; + } + if ((ret = eth1394_init_bc(dev))) { + rtdm_lock_put_irqrestore(&priv->lock, context); + goto fail; + } + rtdm_lock_put_irqrestore(&priv->lock, context); + //if ((skb = skb_share_check (skb, kmflags)) == NULL) { + // ret = -ENOMEM; + // goto fail; + //} + + /* Get rid of the fake eth1394 header, but save a pointer */ + eth = (struct ethhdr*)skb->data; + rtskb_pull(skb, ETH_HLEN); + //dont get rid of the fake eth1394 header, since we need it on the receiving side + //eth = (struct ethhdr*)skb->data; + + //~ //find the node id via our fake MAC address + //~ ne = hpsb_guid_get_entry(be64_to_cpu(*(u64*)eth->h_dest)); + //~ if (!ne) + //~ dest_node = LOCAL_BUS | ALL_NODES; + //~ else + //~ dest_node = ne->nodeid; + //now it is much easier + dest_node = *(u16*)eth->h_dest; + if(dest_node != 0xffff) + DEBUGP("%s: dest_node is %x\n", __FUNCTION__, dest_node); + + proto = eth->h_proto; + + /* If this is an ARP packet, convert it */ + if (proto == __constant_htons (ETH_P_ARP)) + eth1394_arp_to_1394arp (skb, dev); + + max_payload = priv->maxpayload[NODEID_TO_NODE(dest_node)]; + DEBUGP("%s: max_payload is %d\n", __FUNCTION__, max_payload); + + /* This check should be unnecessary, but we'll keep it for safety for + * a while longer. */ + if (max_payload < 512) { + DEBUGP("max_payload too small: %d (setting to 512)\n", + max_payload); + max_payload = 512; + } + + /* Set the transmission type for the packet. ARP packets and IP + * broadcast packets are sent via GASP. */ + if (memcmp(eth->h_dest, dev->broadcast, sizeof(nodeid_t)) == 0 || + proto == __constant_htons(ETH_P_ARP) || + (proto == __constant_htons(ETH_P_IP) && + IN_MULTICAST(__constant_ntohl(skb->nh.iph->daddr)))) { + tx_type = ETH1394_GASP; + max_payload -= ETHER1394_GASP_OVERHEAD; //we have extra overhead for gasp packet + } else { + tx_type = ETH1394_WRREQ; + } + + dg_size = skb->len; + + rtdm_lock_get_irqsave(&priv->lock, context); + dgl = priv->dgl[NODEID_TO_NODE(dest_node)]; + if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF]) + priv->dgl[NODEID_TO_NODE(dest_node)]++; + rtdm_lock_put_irqrestore(&priv->lock, context); + + ptask->hdr.words.word1 = 0; + ptask->hdr.words.word2 = 0; + ptask->hdr.words.word3 = 0; + ptask->hdr.words.word4 = 0; + ptask->skb = skb; + ptask->priv = priv; + ptask->tx_type = tx_type; + + if (tx_type != ETH1394_GASP) { + u64 addr; + + /* This test is just temporary until ConfigROM support has + * been added to eth1394. Until then, we need an ARP packet + * after a bus reset from the current destination node so that + * we can get FIFO information. */ + //~ if (priv->fifo[NODEID_TO_NODE(dest_node)] == 0ULL) { + //~ ret = -EAGAIN; + //~ goto fail; + //~ } + + //~ rtos_spin_lock_irqsave(&priv->lock, flags); + //~ addr = priv->fifo[NODEID_TO_NODE(dest_node)]; + addr = ETHER1394_REGION_ADDR; + //~ rtos_spin_unlock_irqrestore(&priv->lock, flags); + + ptask->addr = addr; + ptask->dest_node = dest_node; + } + + ptask->tx_type = tx_type; + ptask->max_payload = max_payload; + ptask->outstanding_pkts = eth1394_encapsulate_prep(max_payload, proto, + &ptask->hdr, dg_size, + dgl); + + /* Add the encapsulation header to the fragment */ + tx_len = eth1394_encapsulate(skb, max_payload, &ptask->hdr); + //dev->trans_start = jiffies; + //~ if(skb->xmit_stamp) + //~ *skb->xmit_stamp = cpu_to_be64(rtos_get_time() + *skb->xmit_stamp); + + + if (eth1394_send_packet(ptask, tx_len, skb->xmit_stamp)) + goto fail; + + rtnetif_wake_queue(dev); + return 0; +fail: + if (ptask!=NULL){ + //~ kmem_cache_free(packet_task_cache, ptask); + ptask->packet=NULL; + ptask=NULL; + } + + if (skb != NULL) + dev_kfree_rtskb(skb); + + rtdm_lock_get_irqsave(&priv->lock, context); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + rtdm_lock_put_irqrestore(&priv->lock, context); + + if (rtnetif_queue_stopped(dev)) + rtnetif_wake_queue(dev); + + return 0; /* returning non-zero causes serious problems */ +} + +static int eth1394_init(void) +{ + hpsb_register_highlevel(ð1394_highlevel); + + return 0; +} + +static void eth1394_exit(void) +{ + hpsb_unregister_highlevel(ð1394_highlevel); +} + +module_init(eth1394_init); +module_exit(eth1394_exit); + +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c new file mode 100644 index 0000000..94e0c93 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c @@ -0,0 +1,2752 @@ +#warning ********************************************************************* +#warning This driver is probably not real-time safe! Under certain conditions +#warning it can cause interrupt locks of up to 1 second (issue_and_wait). We +#warning need a rewrite of critical parts, but we are lacking the knowledge +#warning about the hardware details (e.g. how long does a normal delay take => +#warning apply this value and throw an error message on timeouts). +#warning ********************************************************************* + +/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux / RTnet. */ +/* + RTnet porting 2002 by Mathias Koehrer (mathias_koehrer@yahoo.de) + -- Support only for PCI boards, EISA stuff ignored... + + Originally written 1996-1999 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. + Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 + and the EtherLink XL 3c900 and 3c905 cards. + + Problem reports and questions should be directed to + vortex@scyld.com + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Linux Kernel Additions: + + 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates + 0.99H+lk1.0 - Jeff Garzik <jgarzik@mandrakesoft.com> + Remove compatibility defines for kernel versions < 2.2.x. + Update for new 2.3.x module interface + LK1.1.2 (March 19, 2000) + * New PCI interface (jgarzik) + + LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au> + - Merged with 3c575_cb.c + - Don't set RxComplete in boomerang interrupt enable reg + - spinlock in vortex_timer to protect mdio functions + - disable local interrupts around call to vortex_interrupt in + vortex_tx_timeout() (So vortex_interrupt can use spin_lock()) + - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl + - In vortex_start_xmit(), move the lock to _after_ we've altered + vp->cur_tx and vp->tx_full. This defeats the race between + vortex_start_xmit() and vortex_interrupt which was identified + by Bogdan Costescu. + - Merged back support for six new cards from various sources + - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus + insertion oops) + - Tell it that 3c905C has NWAY for 100bT autoneg + - Fix handling of SetStatusEnd in 'Too much work..' code, as + per 2.3.99's 3c575_cb (Dave Hinds). + - Split ISR into two for vortex & boomerang + - Fix MOD_INC/DEC races + - Handle resource allocation failures. + - Fix 3CCFE575CT LED polarity + - Make tx_interrupt_mitigation the default + + LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au> + - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs. + - Put vortex_info_tbl into __devinitdata + - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well + as in the hardware. + - Increased the loop counter in issue_and_wait from 2,000 to 4,000. + + LK1.1.5 28 April 2000, andrewm + - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...) + - Some extra diagnostics + - In vortex_error(), reset the Tx on maxCollisions. Otherwise most + chips usually get a Tx timeout. + - Added extra_reset module parm + - Replaced some inline timer manip with mod_timer + (Franois romieu <Francois.Romieu@nic.fr>) + - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway + (this came across from 3c575_cb). + + LK1.1.6 06 Jun 2000, andrewm + - Backed out the PPC defines. + - Use del_timer_sync(), mod_timer(). + - Fix wrapped ulong comparison in boomerang_rx() + - Add IS_TORNADO, use it to suppress 3c905C checksum error msg + (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>) + - Replace union wn3_config with BFINS/BFEXT manipulation for + sparc64 (Pete Zaitcev, Peter Jones) + - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex): + do a netif_wake_queue() to better recover from errors. (Anders Pedersen, + Donald Becker) + - Print a warning on out-of-memory (rate limited to 1 per 10 secs) + - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland) + + LK1.1.7 2 Jul 2000 andrewm + - Better handling of shared IRQs + - Reset the transmitter on a Tx reclaim error + - Fixed crash under OOM during vortex_open() (Mark Hemment) + - Fix Rx cessation problem during OOM (help from Mark Hemment) + - The spinlocks around the mdio access were blocking interrupts for 300uS. + Fix all this to use spin_lock_bh() within mdio_read/write + - Only write to TxFreeThreshold if it's a boomerang - other NICs don't + have one. + - Added 802.3x MAC-layer flow control support + + LK1.1.8 13 Aug 2000 andrewm + - Ignore request_region() return value - already reserved if Cardbus. + - Merged some additional Cardbus flags from Don's 0.99Qk + - Some fixes for 3c556 (Fred Maciel) + - Fix for EISA initialisation (Jan Rekorajski) + - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers + - Fixed MII_XCVR_PWR for 3CCFE575CT + - Added INVERT_LED_PWR, used it. + - Backed out the extra_reset stuff + + LK1.1.9 12 Sep 2000 andrewm + - Backed out the tx_reset_resume flags. It was a no-op. + - In vortex_error, don't reset the Tx on txReclaim errors + - In vortex_error, don't reset the Tx on maxCollisions errors. + Hence backed out all the DownListPtr logic here. + - In vortex_error, give Tornado cards a partial TxReset on + maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this. + - Redid some driver flags and device names based on pcmcia_cs-3.1.20. + - Fixed a bug where, if vp->tx_full is set when the interface + is downed, it remains set when the interface is upped. Bad + things happen. + + LK1.1.10 17 Sep 2000 andrewm + - Added EEPROM_8BIT for 3c555 (Fred Maciel) + - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg) + - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO" + + LK1.1.11 13 Nov 2000 andrewm + - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER + + LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1) + - Call pci_enable_device before we request our IRQ (Tobias Ringstrom) + - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra) + - Added extended issue_and_wait for the 3c905CX. + - Look for an MII on PHY index 24 first (3c905CX oddity). + - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger) + - Don't free skbs we don't own on oom path in vortex_open(). + + LK1.1.13 27 Jan 2001 + - Added explicit `medialock' flag so we can truly + lock the media type down with `options'. + - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>) + - Added and used EEPROM_NORESET for 3c556B PM resumes. + - Fixed leakage of vp->rx_ring. + - Break out separate HAS_HWCKSM device capability flag. + - Kill vp->tx_full (ANK) + - Merge zerocopy fragment handling (ANK?) + + LK1.1.14 15 Feb 2001 + - Enable WOL. Can be turned on with `enable_wol' module option. + - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul) + - If a device's internalconfig register reports it has NWAY, + use it, even if autoselect is enabled. + + LK1.1.15 6 June 2001 akpm + - Prevent double counting of received bytes (Lars Christensen) + - Add ethtool support (jgarzik) + - Add module parm descriptions (Andrzej M. Krzysztofowicz) + - Implemented alloc_etherdev() API + - Special-case the 'Tx error 82' message. + + LK1.1.16 18 July 2001 akpm + - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM + - Lessen verbosity of bootup messages + - Fix WOL - use new PM API functions. + - Use netif_running() instead of vp->open in suspend/resume. + - Don't reset the interface logic on open/close/rmmod. It upsets + autonegotiation, and hence DHCP (from 0.99T). + - Back out EEPROM_NORESET flag because of the above (we do it for all + NICs). + - Correct 3c982 identification string + - Rename wait_for_completion() to issue_and_wait() to avoid completion.h + clash. + + - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details. + - Also see Documentation/networking/vortex.txt +*/ + +/* + * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation + * as well as other drivers + * + * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k + * due to dead code elimination. There will be some performance benefits from this due to + * elimination of all the tests and reduced cache footprint. + */ + + +#define DRV_NAME "3c59x" +#define DRV_VERSION "LK1.1.16" +#define DRV_RELDATE "19 July 2001" + + + +/* A few values that may be tweaked. */ +/* Keep the ring sizes a power of two for efficiency. */ +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 8 /*** RTnet ***/ +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* "Knobs" that adjust features and parameters. */ +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1512 effectively disables this feature. */ +/*** RTnet ***/ +/*** RTnet ***/ +/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ +static const int mtu = 1500; +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 32; +/* Tx timeout interval (millisecs) */ +// *** RTnet *** +//static int watchdog = 5000; +// *** RTnet *** + +/* Allow aggregation of Tx interrupts. Saves CPU load at the cost + * of possible Tx stalls if the system is blocking interrupts + * somewhere else. Undefine this to disable. + */ +#define tx_interrupt_mitigation 1 + +/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ +#define vortex_debug debug +#ifdef VORTEX_DEBUG +static int vortex_debug = VORTEX_DEBUG; +#else +static int vortex_debug = 1; +#endif + +#ifndef __OPTIMIZE__ +#error You must compile this file with the correct options! +#error See the last lines of the source file. +#error You must compile this driver with "-O". +#endif + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/in.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/mii.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/highmem.h> +#include <linux/uaccess.h> +#include <asm/irq.h> /* For NR_IRQS only. */ +#include <asm/bitops.h> +#include <asm/io.h> + +// *** RTnet *** +#include <rtnet_port.h> + +static int cards = INT_MAX; +module_param(cards, int, 0444); +MODULE_PARM_DESC(cards, "number of cards to be supported"); +// *** RTnet *** + +/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. + This is only in the support-all-kernels source code. */ + +#define RUN_AT(x) (jiffies + (x)) + +#include <linux/delay.h> + +// *** RTnet - no power management *** +#undef pci_set_power_state +#define pci_set_power_state null_set_power_state +static inline int null_set_power_state(struct pci_dev *dev, int state) +{ + return 0; +} +// *** RTnet *** + + +static char version[] = + DRV_NAME " for RTnet : Donald Becker and others. www.scyld.com/network/vortex.html\n"; + +MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); +MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver for RTnet " + DRV_VERSION " " DRV_RELDATE); +MODULE_LICENSE("GPL"); + +/* Operational parameter that usually are not changed. */ + +/* The Vortex size is twice that of the original EtherLinkIII series: the + runtime register window, window 1, is now always mapped in. + The Boomerang size is twice as large as the Vortex -- it has additional + bus master control registers. */ +#define VORTEX_TOTAL_SIZE 0x20 +#define BOOMERANG_TOTAL_SIZE 0x40 + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with the original DP83840 on older 3c905 boards, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required; + +#define PFX DRV_NAME ": " + + + +/* + Theory of Operation + + I. Board Compatibility + + This device driver is designed for the 3Com FastEtherLink and FastEtherLink + XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs + versions of the FastEtherLink cards. The supported product IDs are + 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 + + The related ISA 3c515 is supported with a separate driver, 3c515.c, included + with the kernel source or available from + cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html + + II. Board-specific settings + + PCI bus devices are configured by the system at boot time, so no jumpers + need to be set on the board. The system BIOS should be set to assign the + PCI INTA signal to an otherwise unused system IRQ line. + + The EEPROM settings for media type and forced-full-duplex are observed. + The EEPROM media type should be left at the default "autoselect" unless using + 10base2 or AUI connections which cannot be reliably detected. + + III. Driver operation + + The 3c59x series use an interface that's very similar to the previous 3c5x9 + series. The primary interface is two programmed-I/O FIFOs, with an + alternate single-contiguous-region bus-master transfer (see next). + + The 3c900 "Boomerang" series uses a full-bus-master interface with separate + lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, + DEC Tulip and Intel Speedo3. The first chip version retains a compatible + programmed-I/O interface that has been removed in 'B' and subsequent board + revisions. + + One extension that is advertised in a very large font is that the adapters + are capable of being bus masters. On the Vortex chip this capability was + only for a single contiguous region making it far less useful than the full + bus master capability. There is a significant performance impact of taking + an extra interrupt or polling for the completion of each transfer, as well + as difficulty sharing the single transfer engine between the transmit and + receive threads. Using DMA transfers is a win only with large blocks or + with the flawed versions of the Intel Orion motherboard PCI controller. + + The Boomerang chip's full-bus-master interface is useful, and has the + currently-unused advantages over other similar chips that queued transmit + packets may be reordered and receive buffer groups are associated with a + single frame. + + With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. + Rather than a fixed intermediate receive buffer, this scheme allocates + full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as + the copying breakpoint: it is chosen to trade-off the memory wasted by + passing the full-sized skbuff to the queue layer for all frames vs. the + copying cost of copying a frame to a correctly-sized skbuff. + + IIIC. Synchronization + The driver runs as two independent, single-threaded flows of control. One + is the send-packet routine, which enforces single-threaded use by the + dev->tbusy flag. The other thread is the interrupt handler, which is single + threaded by the hardware and other software. + + IV. Notes + + Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development + 3c590, 3c595, and 3c900 boards. + The name "Vortex" is the internal 3Com project name for the PCI ASIC, and + the EISA version is called "Demon". According to Terry these names come + from rides at the local amusement park. + + The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! + This driver only supports ethernet packets because of the skbuff allocation + limit of 4K. +*/ + +/* This table drives the PCI probe routines. It's mostly boilerplate in all + of the drivers, and will likely be provided by some future kernel. +*/ +enum pci_flags_bit { + PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, + PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, +}; + +enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, + EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ + HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, + INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, + EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000 }; + +enum vortex_chips { + CH_3C590 = 0, + CH_3C592, + CH_3C597, + CH_3C595_1, + CH_3C595_2, + + CH_3C595_3, + CH_3C900_1, + CH_3C900_2, + CH_3C900_3, + CH_3C900_4, + + CH_3C900_5, + CH_3C900B_FL, + CH_3C905_1, + CH_3C905_2, + CH_3C905B_1, + + CH_3C905B_2, + CH_3C905B_FX, + CH_3C905C, + CH_3C980, + CH_3C9805, + + CH_3CSOHO100_TX, + CH_3C555, + CH_3C556, + CH_3C556B, + CH_3C575, + + CH_3C575_1, + CH_3CCFE575, + CH_3CCFE575CT, + CH_3CCFE656, + CH_3CCFEM656, + + CH_3CCFEM656_1, + CH_3C450, +}; + + +/* note: this array directly indexed by above enums, and MUST + * be kept in sync with both the enums above, and the PCI device + * table below + */ +static struct vortex_chip_info { + const char *name; + int flags; + int drv_flags; + int io_size; +} vortex_info_tbl[] = { +#define EISA_TBL_OFFSET 0 /* Offset of this entry for vortex_eisa_init */ + {"3c590 Vortex 10Mbps", + PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ + PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ + PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c595 Vortex 100baseTx", + PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c595 Vortex 100baseT4", + PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, + + {"3c595 Vortex 100base-MII", + PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c900 Boomerang 10baseT", + PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, }, + {"3c900 Boomerang 10Mbps Combo", + PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, }, + {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c900 Cyclone 10Mbps Combo", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + + {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c900B-FL Cyclone 10base-FL", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c905 Boomerang 100baseTx", + PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, }, + {"3c905 Boomerang 100baseT4", + PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, }, + {"3c905B Cyclone 100baseTx", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + + {"3c905B Cyclone 10/100/BNC", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c905B-FX Cyclone 100baseFx", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c905C Tornado", + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c980 Cyclone", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c982 Dual Port Server Cyclone", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + + {"3cSOHO100-TX Hurricane", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c555 Laptop Hurricane", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, + {"3c556 Laptop Tornado", + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| + HAS_HWCKSM, 128, }, + {"3c556B Laptop Hurricane", + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| + HAS_HWCKSM, 128, }, + {"3c575 [Megahertz] 10/100 LAN CardBus", + PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, + + {"3c575 Boomerang CardBus", + PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, + {"3CCFE575BT Cyclone CardBus", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + {"3CCFE575CT Tornado CardBus", + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, + {"3CCFE656 Cyclone CardBus", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + {"3CCFEM656B Cyclone+Winmodem CardBus", + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + + {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, + {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ + PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {0,}, /* 0 terminated list. */ +}; + + +static struct pci_device_id vortex_pci_tbl[] = { + { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, + { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, + { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, + { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, + { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, + + { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, + { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, + { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, + { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, + { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, + + { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, + { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, + { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, + { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, + { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, + + { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, + { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, + { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, + { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, + { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, + + { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, + { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, + { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, + { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, + { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, + + { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, + { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, + { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, + { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, + { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, + + { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, + { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, + {0,} /* 0 terminated list. */ +}; +MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); + + +/* Operational definitions. + These are not used by other compilation units and thus are not + exported in a ".h" file. + + First the windows. There are eight register windows, with the command + and status registers available in each. +*/ +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. + Note that 11 parameters bits was fine for ethernet, but the new chip + can handle FDDI length frames (~4500 octets) and now parameters count + 32-bit 'Dwords' rather than octets. */ + +enum vortex_cmd { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, + UpStall = 6<<11, UpUnstall = (6<<11)+1, + DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, + RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, + StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; + +/* Bits in the general status register. */ +enum vortex_status { + IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, + DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, + DMAInProgress = 1<<11, /* DMA controller is still busy.*/ + CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the Vortex this window is always mapped at offsets 0x10-0x1f. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, + TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ +}; +enum Window0 { + Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ + Wn0EepromData = 12, /* Window 0: EEPROM results register. */ + IntrStatus=0x0E, /* Valid in all windows. */ +}; +enum Win0_EEPROM_bits { + EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; +/* EEPROM locations. */ +enum eeprom_offset { + PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, + EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, + NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, + DriverTune=13, Checksum=15}; + +enum Window2 { /* Window 2. */ + Wn2_ResetOptions=12, +}; +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8, +}; + +#define BFEXT(value, offset, bitcount) \ + ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) + +#define BFINS(lhs, rhs, offset, bitcount) \ + (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ + (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) + +#define RAM_SIZE(v) BFEXT(v, 0, 3) +#define RAM_WIDTH(v) BFEXT(v, 3, 1) +#define RAM_SPEED(v) BFEXT(v, 4, 2) +#define ROM_SIZE(v) BFEXT(v, 6, 2) +#define RAM_SPLIT(v) BFEXT(v, 16, 2) +#define XCVR(v) BFEXT(v, 20, 4) +#define AUTOSELECT(v) BFEXT(v, 24, 1) + +enum Window4 { /* Window 4: Xcvr/media bits. */ + Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, +}; +enum Win4_Media_bits { + Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ + Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ + Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ + Media_LnkBeat = 0x0800, +}; +enum Window7 { /* Window 7: Bus Master control. */ + Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, +}; +/* Boomerang bus master control registers. */ +enum MasterCtrl { + PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, + TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, +}; + +/* The Rx and Tx descriptor lists. + Caution Alpha hackers: these types are 32 bits! Note also the 8 byte + alignment contraint on tx_ring[] and rx_ring[]. */ +#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ +#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ +struct boom_rx_desc { + u32 next; /* Last entry points to 0. */ + s32 status; + u32 addr; /* Up to 63 addr/len pairs possible. */ + s32 length; /* Set LAST_FRAG to indicate last pair. */ +}; +/* Values for the Rx status entry. */ +enum rx_desc_status { + RxDComplete=0x00008000, RxDError=0x4000, + /* See boomerang_rx() for actual error bits */ + IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, + IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, +}; + +// *** RTnet *** +//#ifdef MAX_SKB_FRAGS +//#define DO_ZEROCOPY 1 +//#else +#define DO_ZEROCOPY 0 +//#endif + +struct boom_tx_desc { + u32 next; /* Last entry points to 0. */ + s32 status; /* bits 0:12 length, others see below. */ +#if DO_ZEROCOPY + struct { + u32 addr; + s32 length; + } frag[1+MAX_SKB_FRAGS]; +#else + u32 addr; + s32 length; +#endif +}; + +/* Values for the Tx status entry. */ +enum tx_desc_status { + CRCDisable=0x2000, TxDComplete=0x8000, + AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, + TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ +}; + +/* Chip features we care about in vp->capabilities, read from the EEPROM. */ +enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; + +struct vortex_private { + /* The Rx and Tx rings should be quad-word-aligned. */ + struct boom_rx_desc* rx_ring; + struct boom_tx_desc* tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + /* The addresses of transmit- and receive-in-place skbuffs. */ + + // *** RTnet *** + struct rtskb *tx_skbuff[TX_RING_SIZE]; + struct rtskb *rx_skbuff[RX_RING_SIZE]; + // *** RTnet *** + + struct rtnet_device *next_module; /* NULL if PCI device */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + struct net_device_stats stats; + struct rtskb *tx_skb; /* Packet being eaten by bus master ctrl. */ + dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ + + /* PCI configuration space information. */ + struct pci_dev *pdev; + char *cb_fn_base; /* CardBus function status addr space. */ + + /* Some values here only for performance evaluation and path-coverage */ + int rx_nocopy, rx_copy, queued_packet, rx_csumhits; + int card_idx; + + /* The remainder are related to chip state, mostly media selection. */ + struct timer_list timer; /* Media selection timer. */ + struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ + int options; /* User-settable misc. driver options. */ + unsigned int media_override:4, /* Passed-in media type. */ + default_media:4, /* Read from the EEPROM/Wn3_Config. */ + full_duplex:1, force_fd:1, autoselect:1, + bus_master:1, /* Vortex can only do a fragment bus-m. */ + full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ + flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ + partner_flow_ctrl:1, /* Partner supports flow control */ + has_nway:1, + enable_wol:1, /* Wake-on-LAN is enabled */ + pm_state_valid:1, /* power_state[] has sane contents */ + open:1, + medialock:1, + must_free_region:1; /* Flag: if zero, Cardbus owns the I/O region */ + int drv_flags; + u16 status_enable; + u16 intr_enable; + u16 available_media; /* From Wn3_Options. */ + u16 capabilities, info1, info2; /* Various, from EEPROM. */ + u16 advertising; /* NWay media advertisement */ + unsigned char phys[2]; /* MII device addresses. */ + u16 deferred; /* Resend these interrupts when we + * bale from the ISR */ + u16 io_size; /* Size of PCI region (for release_region) */ + rtdm_lock_t lock; /* Serialise access to device & its vortex_private */ + spinlock_t mdio_lock; /* Serialise access to mdio hardware */ + u32 power_state[16]; + rtdm_irq_t irq_handle; +}; + +/* The action to take with a media selection timer tick. + Note that we deviate from the 3Com order by checking 10base2 before AUI. +*/ +enum xcvr_types { + XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, + XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, +}; + +static struct media_table { + char *name; + unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ + mask:8, /* The transceiver-present bit in Wn3_Config.*/ + next:8; /* The media type to try next. */ + int wait; /* Time before we check media status. */ +} media_tbl[] = { + { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, + { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, + { "undefined", 0, 0x80, XCVR_10baseT, 10000}, + { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, + { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, + { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, + { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, + { "undefined", 0, 0x01, XCVR_10baseT, 10000}, + { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, + { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, + { "Default", 0, 0xFF, XCVR_10baseT, 10000}, +}; + +static int vortex_probe1(struct pci_dev *pdev, long ioaddr, int irq, + int chip_idx, int card_idx); +static void vortex_up(struct rtnet_device *rtdev); +static void vortex_down(struct rtnet_device *rtdev); +static int vortex_open(struct rtnet_device *rtdev); +static void mdio_sync(long ioaddr, int bits); +static int mdio_read(struct rtnet_device *rtdev, int phy_id, int location); +static void mdio_write(struct rtnet_device *vp, int phy_id, int location, int value); + +// *** RTnet *** +//static void vortex_timer(unsigned long arg); +//static void rx_oom_timer(unsigned long arg); +// *** RTnet *** + +static int vortex_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev); +static int boomerang_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev); +static int vortex_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp); +static int boomerang_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp); +static int vortex_interrupt(rtdm_irq_t *irq_handle); +static int boomerang_interrupt(rtdm_irq_t *irq_handle); +static int vortex_close(struct rtnet_device *rtdev); +static void dump_tx_ring(struct rtnet_device *rtdev); + +static void update_stats(long ioaddr, struct rtnet_device *dev); +static struct net_device_stats *vortex_get_stats(struct rtnet_device *rtdev); + +static void set_rx_mode(struct rtnet_device *rtdev); + +// *** RTnet *** +//static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +//static void vortex_tx_timeout(struct net_device *dev); +// *** RTnet *** + +static void acpi_set_WOL(struct rtnet_device *rtdev); + +/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ +/* Option count limit only -- unlimited interfaces are supported. */ +#define MAX_UNITS 8 +static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +module_param(debug, int, 0444); +module_param_array(options, int, NULL, 0444); +module_param_array(full_duplex, int, NULL, 0444); +module_param_array(hw_checksums, int, NULL, 0444); +module_param_array(flow_ctrl, int, NULL, 0444); +module_param_array(enable_wol, int, NULL, 0444); +/*** RTnet *** + MODULE_PARM(rx_copybreak, "i"); + *** RTnet ***/ +module_param(max_interrupt_work, int, 0444); +/*** RTnet *** + MODULE_PARM(compaq_ioaddr, "i"); + MODULE_PARM(compaq_irq, "i"); + MODULE_PARM(compaq_device_id, "i"); + MODULE_PARM(watchdog, "i"); + *** RTnet ***/ +MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); +MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); +MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); +MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); +MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); +MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); +/*** RTnet *** + MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); + *** RTnet ***/ +MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); +/*** RTnet *** + MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); + MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); + MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); + MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); + *** RTnet ***/ + +/* #define dev_alloc_skb dev_alloc_skb_debug */ + +/* A list of all installed Vortex EISA devices, for removing the driver module. */ +static struct rtnet_device *root_vortex_eisa_dev; + +/* Variables to work-around the Compaq PCI BIOS32 problem. */ +// *** RTnet *** +//static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; +// *** RTnet *** + +static int vortex_cards_found; + +#ifdef CONFIG_PM + +#endif /* CONFIG_PM */ + +/* returns count found (>= 0), or negative on error */ + +/* returns count (>= 0), or negative on error */ +static int vortex_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + + if( vortex_cards_found >= cards ) + return -ENODEV; + + /* wake up and enable device */ + if (pci_enable_device (pdev)) { + rc = -EIO; + } else { + rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq, + ent->driver_data, vortex_cards_found); + if (rc == 0) + vortex_cards_found++; + } + return rc; +} + +/* + * Start up the PCI device which is described by *pdev. + * Return 0 on success. + * + * NOTE: pdev can be NULL, for the case of an EISA driver + */ +static int vortex_probe1(struct pci_dev *pdev, + long ioaddr, int irq, + int chip_idx, int card_idx) +{ + // *** RTnet *** + struct rtnet_device *rtdev = NULL; + // *** RTnet *** + + struct vortex_private *vp; + int option; + unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + int i, step; + static int printed_version; + int retval, print_info; + struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; + const char *print_name; + + + + if (!printed_version) { + printk (version); + printed_version = 1; + } + + print_name = pdev ? pci_name(pdev) : "3c59x"; + + // *** RTnet *** + rtdev = rt_alloc_etherdev(sizeof(*vp), RX_RING_SIZE * 2 + TX_RING_SIZE); + retval = -ENOMEM; + if (!rtdev) { + printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n"); + goto out; + } + rtdev_alloc_name(rtdev, "rteth%d"); + memset(rtdev->priv, 0, sizeof(*vp)); + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + // *** RTnet *** + + vp = rtdev->priv; + + /* The lower four bits are the media type. */ + if (rtdev->mem_start) { + /* + * The 'options' param is passed in as the third arg to the + * LILO 'ether=' argument for non-modular use + */ + option = rtdev->mem_start; + } + else if (card_idx < MAX_UNITS) + option = options[card_idx]; + else + option = -1; + + if (option > 0) { + if (option & 0x8000) + vortex_debug = 7; + if (option & 0x4000) + vortex_debug = 2; + if (option & 0x0400) + vp->enable_wol = 1; + } + + print_info = (vortex_debug > 1); + if (print_info) + printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); + + printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n", + print_name, + pdev ? "PCI" : "EISA", + vci->name, + ioaddr); + + rtdev->base_addr = ioaddr; + rtdev->irq = irq; + rtdev->mtu = mtu; + vp->drv_flags = vci->drv_flags; + vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; + vp->io_size = vci->io_size; + vp->card_idx = card_idx; + + /* module list only for EISA devices */ + if (pdev == NULL) { + vp->next_module = root_vortex_eisa_dev; + root_vortex_eisa_dev = rtdev; + } + + /* PCI-only startup logic */ + if (pdev) { + /* EISA resources already marked, so only PCI needs to do this here */ + /* Ignore return value, because Cardbus drivers already allocate for us */ + if (!request_region(ioaddr, vci->io_size, print_name)) + printk(KERN_INFO "rt_3c50x: request region failed\n"); + else + vp->must_free_region = 1; + + /* enable bus-mastering if necessary */ + if (vci->flags & PCI_USES_MASTER) + pci_set_master (pdev); + + if (vci->drv_flags & IS_VORTEX) { + u8 pci_latency; + u8 new_latency = 248; + + /* Check the PCI latency value. On the 3c590 series the latency timer + must be set to the maximum value to avoid data corruption that occurs + when the timer expires during a transfer. This bug exists the Vortex + chip only. */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); + if (pci_latency < new_latency) { + printk(KERN_INFO "%s: Overriding PCI latency" + " timer (CFLT) setting of %d, new value is %d.\n", + print_name, pci_latency, new_latency); + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); + } + } + } + + rtdm_lock_init(&vp->lock); + spin_lock_init(&vp->mdio_lock); + vp->pdev = pdev; + + /* Makes sure rings are at least 16 byte aligned. */ + vp->rx_ring = dma_alloc_coherent( + &pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + &vp->rx_ring_dma, + GFP_ATOMIC); + retval = -ENOMEM; + if (vp->rx_ring == 0) + goto free_region; + + vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); + vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; + + /* if we are a PCI driver, we store info in pdev->driver_data + * instead of a module list */ + if (pdev) + pci_set_drvdata(pdev, rtdev); + + vp->media_override = 7; + if (option >= 0) { + vp->media_override = ((option & 7) == 2) ? 0 : option & 15; + if (vp->media_override != 7) + vp->medialock = 1; + vp->full_duplex = (option & 0x200) ? 1 : 0; + vp->bus_master = (option & 16) ? 1 : 0; + } + + if (card_idx < MAX_UNITS) { + if (full_duplex[card_idx] > 0) + vp->full_duplex = 1; + if (flow_ctrl[card_idx] > 0) + vp->flow_ctrl = 1; + if (enable_wol[card_idx] > 0) + vp->enable_wol = 1; + } + + vp->force_fd = vp->full_duplex; + vp->options = option; + + /* Read the station address from the EEPROM. */ + EL3WINDOW(0); + { + int base; + + if (vci->drv_flags & EEPROM_8BIT) + base = 0x230; + else if (vci->drv_flags & EEPROM_OFFSET) + base = EEPROM_Read + 0x30; + else + base = EEPROM_Read; + + for (i = 0; i < 0x40; i++) { + int timer; + outw(base + i, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 10; timer >= 0; timer--) { + udelay(162); + if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) + break; + } + eeprom[i] = inw(ioaddr + Wn0EepromData); + } + } + for (i = 0; i < 0x18; i++) + checksum ^= eeprom[i]; + checksum = (checksum ^ (checksum >> 8)) & 0xff; + if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ + while (i < 0x21) + checksum ^= eeprom[i++]; + checksum = (checksum ^ (checksum >> 8)) & 0xff; + } + if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) + printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); + + for (i = 0; i < 3; i++) + ((u16 *)rtdev->dev_addr)[i] = htons(eeprom[i + 10]); + if (print_info) { + for (i = 0; i < 6; i++) + printk("%c%2.2x", i ? ':' : ' ', rtdev->dev_addr[i]); + } + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(rtdev->dev_addr[i], ioaddr + i); + +#ifdef __sparc__ + if (print_info) + printk(", IRQ %s\n", __irq_itoa(rtdev->irq)); +#else + if (print_info) + printk(", IRQ %d\n", rtdev->irq); + /* Tell them about an invalid IRQ. */ + if (rtdev->irq <= 0 || rtdev->irq >= NR_IRQS) + printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n", + rtdev->irq); +#endif + + EL3WINDOW(4); + step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1; + if (print_info) { + printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-" + "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], + step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); + } + + + if (pdev && vci->drv_flags & HAS_CB_FNS) { + unsigned long fn_st_addr; /* Cardbus function status space */ + unsigned short n; + + fn_st_addr = pci_resource_start (pdev, 2); + if (fn_st_addr) { + vp->cb_fn_base = ioremap(fn_st_addr, 128); + retval = -ENOMEM; + if (!vp->cb_fn_base) + goto free_ring; + } + if (print_info) { + printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n", + print_name, fn_st_addr, vp->cb_fn_base); + } + EL3WINDOW(2); + + n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010; + if (vp->drv_flags & INVERT_LED_PWR) + n |= 0x10; + if (vp->drv_flags & INVERT_MII_PWR) + n |= 0x4000; + outw(n, ioaddr + Wn2_ResetOptions); + } + + /* Extract our information from the EEPROM data. */ + vp->info1 = eeprom[13]; + vp->info2 = eeprom[15]; + vp->capabilities = eeprom[16]; + + if (vp->info1 & 0x8000) { + vp->full_duplex = 1; + if (print_info) + printk(KERN_INFO "Full duplex capable\n"); + } + + { + static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + unsigned int config; + EL3WINDOW(3); + vp->available_media = inw(ioaddr + Wn3_Options); + if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ + vp->available_media = 0x40; + config = inl(ioaddr + Wn3_Config); + if (print_info) { + printk(KERN_DEBUG " Internal config register is %4.4x, " + "transceivers %#x.\n", config, inw(ioaddr + Wn3_Options)); + printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", + 8 << RAM_SIZE(config), + RAM_WIDTH(config) ? "word" : "byte", + ram_split[RAM_SPLIT(config)], + AUTOSELECT(config) ? "autoselect/" : "", + XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" : + media_tbl[XCVR(config)].name); + } + vp->default_media = XCVR(config); + if (vp->default_media == XCVR_NWAY) + vp->has_nway = 1; + vp->autoselect = AUTOSELECT(config); + } + + if (vp->media_override != 7) { + printk(KERN_INFO "%s: Media override to transceiver type %d (%s).\n", + print_name, vp->media_override, + media_tbl[vp->media_override].name); + rtdev->if_port = vp->media_override; + } else + rtdev->if_port = vp->default_media; + + if (rtdev->if_port == XCVR_MII || rtdev->if_port == XCVR_NWAY) { + int phy, phy_idx = 0; + EL3WINDOW(4); + mii_preamble_required++; + mii_preamble_required++; + mdio_read(rtdev, 24, 1); + for (phy = 0; phy < 32 && phy_idx < 1; phy++) { + int mii_status, phyx; + + /* + * For the 3c905CX we look at index 24 first, because it bogusly + * reports an external PHY at all indices + */ + if (phy == 0) + phyx = 24; + else if (phy <= 24) + phyx = phy - 1; + else + phyx = phy; + mii_status = mdio_read(rtdev, phyx, 1); + if (mii_status && mii_status != 0xffff) { + vp->phys[phy_idx++] = phyx; + if (print_info) { + printk(KERN_INFO " MII transceiver found at address %d," + " status %4x.\n", phyx, mii_status); + } + if ((mii_status & 0x0040) == 0) + mii_preamble_required++; + } + } + mii_preamble_required--; + if (phy_idx == 0) { + printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n"); + vp->phys[0] = 24; + } else { + vp->advertising = mdio_read(rtdev, vp->phys[0], 4); + if (vp->full_duplex) { + /* Only advertise the FD media types. */ + vp->advertising &= ~0x02A0; + mdio_write(rtdev, vp->phys[0], 4, vp->advertising); + } + } + } + + if (vp->capabilities & CapBusMaster) { + vp->full_bus_master_tx = 1; + if (print_info) { + printk(KERN_INFO " Enabling bus-master transmits and %s receives.\n", + (vp->info2 & 1) ? "early" : "whole-frame" ); + } + vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; + vp->bus_master = 0; /* AKPM: vortex only */ + } + + // *** RTnet *** + /* The 3c59x-specific entries in the device structure. */ + rtdev->open = vortex_open; + if (vp->full_bus_master_tx) { + rtdev->hard_start_xmit = boomerang_start_xmit; + /* Actually, it still should work with iommu. */ + rtdev->features |= NETIF_F_SG; + if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) || + (hw_checksums[card_idx] == 1)) { + rtdev->features |= NETIF_F_IP_CSUM; + } + } else { + rtdev->hard_start_xmit = vortex_start_xmit; + } + rtdev->get_stats = vortex_get_stats; + + if (print_info) { + printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", + print_name, + (rtdev->features & NETIF_F_SG) ? "en":"dis", + (rtdev->features & NETIF_F_IP_CSUM) ? "en":"dis"); + } + + rtdev->stop = vortex_close; + retval = rt_register_rtnetdev(rtdev); + if (retval) { + printk(KERN_ERR "rt_3c59x: rtnet device registration failed %d\n",retval); + goto free_ring; + } + return 0; + + // *** RTnet *** + + free_ring: + dma_free_coherent(&pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, + vp->rx_ring_dma); + free_region: + if (vp->must_free_region) + release_region(ioaddr, vci->io_size); + rtdev_free (rtdev); + printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval); + out: + return retval; +} + +static void +issue_and_wait(struct rtnet_device *rtdev, int cmd) +{ + int i; + + outw(cmd, rtdev->base_addr + EL3_CMD); + for (i = 0; i < 2000; i++) { + if (!(inw(rtdev->base_addr + EL3_STATUS) & CmdInProgress)) + return; + } + + /* OK, that didn't work. Do it the slow way. One second */ + for (i = 0; i < 100000; i++) { + if (!(inw(rtdev->base_addr + EL3_STATUS) & CmdInProgress)) { + if (vortex_debug > 1) + rtdm_printk(KERN_INFO "%s: command 0x%04x took %d usecs\n", + rtdev->name, cmd, i * 10); + return; + } + udelay(10); + } + rtdm_printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n", + rtdev->name, cmd, inw(rtdev->base_addr + EL3_STATUS)); +} + +static void +vortex_up(struct rtnet_device *rtdev) +{ + long ioaddr = rtdev->base_addr; + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + unsigned int config; + int i; + + if (vp->pdev && vp->enable_wol) { + pci_set_power_state(vp->pdev, 0); /* Go active */ + pci_restore_state(vp->pdev, vp->power_state); + } + + /* Before initializing select the active media port. */ + EL3WINDOW(3); + config = inl(ioaddr + Wn3_Config); + + if (vp->media_override != 7) { + printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n", + rtdev->name, vp->media_override, + media_tbl[vp->media_override].name); + rtdev->if_port = vp->media_override; + } else if (vp->autoselect) { + if (vp->has_nway) { + if (vortex_debug > 1) + printk(KERN_INFO "%s: using NWAY device table, not %d\n", + rtdev->name, rtdev->if_port); + rtdev->if_port = XCVR_NWAY; + } else { + /* Find first available media type, starting with 100baseTx. */ + rtdev->if_port = XCVR_100baseTx; + while (! (vp->available_media & media_tbl[rtdev->if_port].mask)) + rtdev->if_port = media_tbl[rtdev->if_port].next; + if (vortex_debug > 1) + printk(KERN_INFO "%s: first available media type: %s\n", + rtdev->name, media_tbl[rtdev->if_port].name); + } + } else { + rtdev->if_port = vp->default_media; + if (vortex_debug > 1) + printk(KERN_INFO "%s: using default media %s\n", + rtdev->name, media_tbl[rtdev->if_port].name); + } + + init_timer(&vp->timer); + vp->timer.expires = RUN_AT(media_tbl[rtdev->if_port].wait); + vp->timer.data = (unsigned long)rtdev; + // *** RTnet vp->timer.function = vortex_timer; /* timer handler */ + // *** RTnet add_timer(&vp->timer); + + init_timer(&vp->rx_oom_timer); + vp->rx_oom_timer.data = (unsigned long)rtdev; + // **** RTnet *** vp->rx_oom_timer.function = rx_oom_timer; + + if (vortex_debug > 1) + printk(KERN_DEBUG "%s: Initial media type %s.\n", + rtdev->name, media_tbl[rtdev->if_port].name); + + vp->full_duplex = vp->force_fd; + config = BFINS(config, rtdev->if_port, 20, 4); + if (vortex_debug > 6) + printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); + outl(config, ioaddr + Wn3_Config); + + if (rtdev->if_port == XCVR_MII || rtdev->if_port == XCVR_NWAY) { + int mii_reg1, mii_reg5; + EL3WINDOW(4); + /* Read BMSR (reg1) only to clear old status. */ + mii_reg1 = mdio_read(rtdev, vp->phys[0], 1); + mii_reg5 = mdio_read(rtdev, vp->phys[0], 5); + if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) + ; /* No MII device or no link partner report */ + else if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */ + || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */ + vp->full_duplex = 1; + vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); + if (vortex_debug > 1) + printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x," + " info1 %04x, setting %s-duplex.\n", + rtdev->name, vp->phys[0], + mii_reg1, mii_reg5, + vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half"); + EL3WINDOW(3); + } + + /* Set the full-duplex bit. */ + outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | + (rtdev->mtu > 1500 ? 0x40 : 0) | + ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), + ioaddr + Wn3_MAC_Ctrl); + + if (vortex_debug > 1) { + printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n", + rtdev->name, config); + } + + issue_and_wait(rtdev, TxReset); + /* + * Don't reset the PHY - that upsets autonegotiation during DHCP operations. + */ + issue_and_wait(rtdev, RxReset|0x04); + + outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + if (vortex_debug > 1) { + EL3WINDOW(4); + printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n", + rtdev->name, rtdev->irq, inw(ioaddr + Wn4_Media)); + } + + /* Set the station address and mask in window 2 each time opened. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(rtdev->dev_addr[i], ioaddr + i); + for (; i < 12; i+=2) + outw(0, ioaddr + i); + + if (vp->cb_fn_base) { + unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010; + if (vp->drv_flags & INVERT_LED_PWR) + n |= 0x10; + if (vp->drv_flags & INVERT_MII_PWR) + n |= 0x4000; + outw(n, ioaddr + Wn2_ResetOptions); + } + + if (rtdev->if_port == XCVR_10base2) + /* Start the thinnet transceiver. We should really wait 50ms...*/ + outw(StartCoax, ioaddr + EL3_CMD); + if (rtdev->if_port != XCVR_NWAY) { + EL3WINDOW(4); + outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) | + media_tbl[rtdev->if_port].media_bits, ioaddr + Wn4_Media); + } + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 10; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + EL3WINDOW(4); + inb(ioaddr + 12); + /* ..and on the Boomerang we enable the extra statistics bits. */ + outw(0x0040, ioaddr + Wn4_NetDiag); + + /* Switch to register set 7 for normal use. */ + EL3WINDOW(7); + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + vp->cur_rx = vp->dirty_rx = 0; + /* Initialize the RxEarly register as recommended. */ + outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); + outl(0x0020, ioaddr + PktStatus); + outl(vp->rx_ring_dma, ioaddr + UpListPtr); + } + if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ + vp->cur_tx = vp->dirty_tx = 0; + if (vp->drv_flags & IS_BOOMERANG) + outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ + /* Clear the Rx, Tx rings. */ + for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ + vp->rx_ring[i].status = 0; + for (i = 0; i < TX_RING_SIZE; i++) + vp->tx_skbuff[i] = 0; + outl(0, ioaddr + DownListPtr); + } + /* Set receiver mode: presumably accept b-case and phys addr only. */ + set_rx_mode(rtdev); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + +// issue_and_wait(dev, SetTxStart|0x07ff); + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| + (vp->full_bus_master_tx ? DownComplete : TxAvailable) | + (vp->full_bus_master_rx ? UpComplete : RxComplete) | + (vp->bus_master ? DMADone : 0); + vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | + (vp->full_bus_master_rx ? 0 : RxComplete) | + StatsFull | HostError | TxComplete | IntReq + | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; + outw(vp->status_enable, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(vp->intr_enable, ioaddr + EL3_CMD); + if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ + writel(0x8000, vp->cb_fn_base + 4); + rtnetif_start_queue (rtdev); +} + +static int +vortex_open(struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + int i; + int retval; + + // *** RTnet *** + rt_stack_connect(rtdev, &STACK_manager); + + if ((retval = rtdm_irq_request(&vp->irq_handle, rtdev->irq, + (vp->full_bus_master_rx ? boomerang_interrupt : vortex_interrupt), + 0, "rt_3c59x", rtdev))) { + printk(KERN_ERR "%s: Could not reserve IRQ %d\n", rtdev->name, rtdev->irq); + goto out; + } + // *** RTnet *** + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + if (vortex_debug > 2) + printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", rtdev->name); + for (i = 0; i < RX_RING_SIZE; i++) { + struct rtskb *skb; // *** RTnet + vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); + vp->rx_ring[i].status = 0; /* Clear complete bit. */ + vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); + skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ); + vp->rx_skbuff[i] = skb; + if (skb == NULL) + break; /* Bad news! */ + // *** RTnet *** + rtskb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(vp->pdev, + skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + // *** RTnet *** + } + if (i != RX_RING_SIZE) { + int j; + printk(KERN_EMERG "%s: no memory for rx ring\n", rtdev->name); + for (j = 0; j < i; j++) { + if (vp->rx_skbuff[j]) { + dev_kfree_rtskb(vp->rx_skbuff[j]); + vp->rx_skbuff[j] = 0; + } + } + retval = -ENOMEM; + goto out_free_irq; + } + /* Wrap the ring. */ + vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); + } + + vortex_up(rtdev); + return 0; + + out_free_irq: + + // *** RTnet *** + if ( (i=rtdm_irq_free(&vp->irq_handle))<0 ) + return i; + rt_stack_disconnect(rtdev); + // *** RTnet *** + out: + if (vortex_debug > 1) + printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", rtdev->name, retval); + return retval; +} + +/* + * Handle uncommon interrupt sources. This is a separate routine to minimize + * the cache impact. + */ +static void +vortex_error(struct rtnet_device *rtdev, int status, nanosecs_abs_t *time_stamp) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + int do_tx_reset = 0, reset_mask = 0; + unsigned char tx_status = 0; + int packets=0; + + if (vortex_debug > 2) { + rtdm_printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", rtdev->name, status); + } + + if (status & TxComplete) { /* Really "TxError" for us. */ + tx_status = inb(ioaddr + TxStatus); + /* Presumably a tx-timeout. We must merely re-enable. */ + if (vortex_debug > 2 + || (tx_status != 0x88 && vortex_debug > 0)) { + rtdm_printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n", + rtdev->name, tx_status); + if (tx_status == 0x82) { + rtdm_printk(KERN_ERR "Probably a duplex mismatch. See " + "Documentation/networking/vortex.txt\n"); + } + dump_tx_ring(rtdev); + } + if (tx_status & 0x14) vp->stats.tx_fifo_errors++; + if (tx_status & 0x38) vp->stats.tx_aborted_errors++; + outb(0, ioaddr + TxStatus); + if (tx_status & 0x30) { /* txJabber or txUnderrun */ + do_tx_reset = 1; + } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ + do_tx_reset = 1; + reset_mask = 0x0108; /* Reset interface logic, but not download logic */ + } else { /* Merely re-enable the transmitter. */ + outw(TxEnable, ioaddr + EL3_CMD); + } + } + + if (status & RxEarly) { /* Rx early is unused. */ + vortex_rx(rtdev, &packets, time_stamp); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & StatsFull) { /* Empty statistics. */ + static int DoneDidThat; + if (vortex_debug > 4) + rtdm_printk(KERN_DEBUG "%s: Updating stats.\n", rtdev->name); + // *** RTnet *** update_stats(ioaddr, dev); + /* HACK: Disable statistics as an interrupt source. */ + /* This occurs when we have the wrong media type! */ + if (DoneDidThat == 0 && + inw(ioaddr + EL3_STATUS) & StatsFull) { + rtdm_printk(KERN_WARNING "%s: Updating statistics failed, disabling " + "stats as an interrupt source.\n", rtdev->name); + EL3WINDOW(5); + outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); + vp->intr_enable &= ~StatsFull; + EL3WINDOW(7); + DoneDidThat++; + } + } + if (status & IntReq) { /* Restore all interrupt sources. */ + outw(vp->status_enable, ioaddr + EL3_CMD); + outw(vp->intr_enable, ioaddr + EL3_CMD); + } + if (status & HostError) { + u16 fifo_diag; + EL3WINDOW(4); + fifo_diag = inw(ioaddr + Wn4_FIFODiag); + rtdm_printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n", + rtdev->name, fifo_diag); + /* Adapter failure requires Tx/Rx reset and reinit. */ + if (vp->full_bus_master_tx) { + int bus_status = inl(ioaddr + PktStatus); + /* 0x80000000 PCI master abort. */ + /* 0x40000000 PCI target abort. */ + if (vortex_debug) + rtdm_printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", rtdev->name, bus_status); + + /* In this case, blow the card away */ + vortex_down(rtdev); + issue_and_wait(rtdev, TotalReset | 0xff); + vortex_up(rtdev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ + } else if (fifo_diag & 0x0400) + do_tx_reset = 1; + if (fifo_diag & 0x3000) { + /* Reset Rx fifo and upload logic */ + issue_and_wait(rtdev, RxReset|0x07); + /* Set the Rx filter to the current state. */ + set_rx_mode(rtdev); + outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + outw(AckIntr | HostError, ioaddr + EL3_CMD); + } + } + + if (do_tx_reset) { + issue_and_wait(rtdev, TxReset|reset_mask); + outw(TxEnable, ioaddr + EL3_CMD); + if (!vp->full_bus_master_tx) + rtnetif_wake_queue(rtdev); + } +} + +static int +vortex_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + rtdm_lockctx_t context; + + /* Put out the doubleword header... */ + outl(skb->len, ioaddr + TX_FIFO); + if (vp->bus_master) { + /* Set the bus-master controller to transfer the packet. */ + int len = (skb->len + 3) & ~3; + outl( vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data, + len, PCI_DMA_TODEVICE), + ioaddr + Wn7_MasterAddr); + outw(len, ioaddr + Wn7_MasterLen); + vp->tx_skb = skb; + + rtdm_lock_irqsave(context); + if (unlikely(skb->xmit_stamp != NULL)) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + + *skb->xmit_stamp); + outw(StartDMADown, ioaddr + EL3_CMD); + rtdm_lock_irqrestore(context); + + /* rtnetif_wake_queue() will be called at the DMADone interrupt. */ + } else { + rtdm_printk("rt_3x59x: UNSUPPORTED CODE PATH (device is lacking DMA support)!\n"); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_kfree_rtskb (skb); + if (inw(ioaddr + TxFree) > 1536) { + rtnetif_start_queue (rtdev); /* AKPM: redundant? */ + } else { + /* Interrupt us when the FIFO has room for max-sized packet. */ + rtnetif_stop_queue(rtdev); + outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + } + } + + //rtdev->trans_start = jiffies; + + /* Clear the Tx status stack. */ + { + int tx_status; + int i = 32; + + while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { + if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ + if (vortex_debug > 2) + printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", + rtdev->name, tx_status); + if (tx_status & 0x04) vp->stats.tx_fifo_errors++; + if (tx_status & 0x38) vp->stats.tx_aborted_errors++; + if (tx_status & 0x30) { + issue_and_wait(rtdev, TxReset); + } + outw(TxEnable, ioaddr + EL3_CMD); + } + outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } + } + return 0; +} + +static int +boomerang_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + /* Calculate the next Tx descriptor entry. */ + int entry = vp->cur_tx % TX_RING_SIZE; + struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; + rtdm_lockctx_t context; + + if (vortex_debug > 6) { + rtdm_printk(KERN_DEBUG "boomerang_start_xmit()\n"); + if (vortex_debug > 3) + rtdm_printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n", + rtdev->name, vp->cur_tx); + } + + if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { + if (vortex_debug > 0) + rtdm_printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n", + rtdev->name); + rtnetif_stop_queue(rtdev); + return 1; + } + + vp->tx_skbuff[entry] = skb; + + vp->tx_ring[entry].next = 0; +#if DO_ZEROCOPY + if (skb->ip_summed != CHECKSUM_HW) + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); + else + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum); + + if (!skb_shinfo(skb)->nr_frags) { + { +// int j; +// for (j=0; j<skb->len; j++) +// { +// rtdm_printk("%02x ", skb->data[j]); +// } + + } + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE)); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); + } else { + int i; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE)); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + vp->tx_ring[entry].frag[i+1].addr = + cpu_to_le32(pci_map_single(vp->pdev, // *** RTnet: page mapping correct? Or is this code never used? + (void*)page_address(frag->page) + frag->page_offset, + frag->size, PCI_DMA_TODEVICE)); + + if (i == skb_shinfo(skb)->nr_frags-1) + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); + else + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size); + } + } +#else + vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE)); + vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); +#endif + + // *** RTnet *** + rtdm_irq_disable(&vp->irq_handle); + rtdm_lock_get(&vp->lock); + // *** RTnet *** + + /* Wait for the stall to complete. */ + issue_and_wait(rtdev, DownStall); + + rtdm_lock_irqsave(context); + if (unlikely(skb->xmit_stamp != NULL)) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + + prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); + if (inl(ioaddr + DownListPtr) == 0) { + outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); + vp->queued_packet++; + } + + vp->cur_tx++; + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { + rtnetif_stop_queue (rtdev); + } else { /* Clear previous interrupt enable. */ +#if defined(tx_interrupt_mitigation) + /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef + * were selected, this would corrupt DN_COMPLETE. No? + */ + prev_entry->status &= cpu_to_le32(~TxIntrUploaded); +#endif + } + outw(DownUnstall, ioaddr + EL3_CMD); + rtdm_lock_put_irqrestore(&vp->lock, context); + rtdm_irq_enable(&vp->irq_handle); + //rtdev->trans_start = jiffies; + return 0; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ + +/* + * This is the ISR for the vortex series chips. + * full_bus_master_tx == 0 && full_bus_master_rx == 0 + */ + +static int vortex_interrupt(rtdm_irq_t *irq_handle) +{ + // *** RTnet *** + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + int packets = 0; + // *** RTnet *** + + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr; + int status; + int work_done = max_interrupt_work; + + ioaddr = rtdev->base_addr; + rtdm_lock_get(&vp->lock); + + status = inw(ioaddr + EL3_STATUS); + + if (vortex_debug > 6) + printk("vortex_interrupt(). status=0x%4x\n", status); + + if ((status & IntLatch) == 0) + goto handler_exit; /* No interrupt: shared IRQs cause this */ + + if (status & IntReq) { + status |= vp->deferred; + vp->deferred = 0; + } + + if (status == 0xffff) /* h/w no longer present (hotplug)? */ + goto handler_exit; + + if (vortex_debug > 4) + rtdm_printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", + rtdev->name, status, inb(ioaddr + Timer)); + + do { + if (vortex_debug > 5) + rtdm_printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", + rtdev->name, status); + if (status & RxComplete) + vortex_rx(rtdev, &packets, &time_stamp); + + if (status & TxAvailable) { + if (vortex_debug > 5) + rtdm_printk(KERN_DEBUG " TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + rtnetif_wake_queue (rtdev); + } + + if (status & DMADone) { + if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) { + outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ + pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); + dev_kfree_rtskb(vp->tx_skb); /* Release the transferred buffer */ + if (inw(ioaddr + TxFree) > 1536) { + /* + * AKPM: FIXME: I don't think we need this. If the queue was stopped due to + * insufficient FIFO room, the TxAvailable test will succeed and call + * rtnetif_wake_queue() + */ + rtnetif_wake_queue(rtdev); + } else { /* Interrupt when FIFO has room for max-sized packet. */ + outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + rtnetif_stop_queue(rtdev); + } + } + } + /* Check for all uncommon interrupts at once. */ + if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { + if (status == 0xffff) + break; + vortex_error(rtdev, status, &time_stamp); + } + + if (--work_done < 0) { + rtdm_printk(KERN_WARNING "%s: Too much work in interrupt, status " + "%4.4x.\n", rtdev->name, status); + /* Disable all pending interrupts. */ + do { + vp->deferred |= status; + outw(SetStatusEnb | (~vp->deferred & vp->status_enable), + ioaddr + EL3_CMD); + outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); + } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch); + /* The timer will reenable interrupts. */ + mod_timer(&vp->timer, jiffies + 1*HZ); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + + if (vortex_debug > 4) + rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", + rtdev->name, status); + handler_exit: + rtdm_lock_put(&vp->lock); + if (packets > 0) + rt_mark_stack_mgr(rtdev); + + return RTDM_IRQ_HANDLED; +} + +/* + * This is the ISR for the boomerang series chips. + * full_bus_master_tx == 1 && full_bus_master_rx == 1 + */ + +static int boomerang_interrupt(rtdm_irq_t *irq_handle) +{ + // *** RTnet *** + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + int packets = 0; + // *** RTnet *** + + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr; + int status; + int work_done = max_interrupt_work; + + ioaddr = rtdev->base_addr; + + /* + * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout + * and boomerang_start_xmit + */ + rtdm_lock_get(&vp->lock); + + status = inw(ioaddr + EL3_STATUS); + + if (vortex_debug > 6) + rtdm_printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status); + + if ((status & IntLatch) == 0) + goto handler_exit; /* No interrupt: shared IRQs can cause this */ + + if (status == 0xffff) { /* h/w no longer present (hotplug)? */ + if (vortex_debug > 1) + rtdm_printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n"); + goto handler_exit; + } + + if (status & IntReq) { + status |= vp->deferred; + vp->deferred = 0; + } + + if (vortex_debug > 4) + rtdm_printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", + rtdev->name, status, inb(ioaddr + Timer)); + do { + if (vortex_debug > 5) + rtdm_printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", + rtdev->name, status); + if (status & UpComplete) { + outw(AckIntr | UpComplete, ioaddr + EL3_CMD); + if (vortex_debug > 5) + rtdm_printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n"); + boomerang_rx(rtdev, &packets, &time_stamp); + } + + if (status & DownComplete) { + unsigned int dirty_tx = vp->dirty_tx; + + outw(AckIntr | DownComplete, ioaddr + EL3_CMD); + while (vp->cur_tx - dirty_tx > 0) { + int entry = dirty_tx % TX_RING_SIZE; + if (inl(ioaddr + DownListPtr) == + vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) + break; /* It still hasn't been processed. */ + + if (vp->tx_skbuff[entry]) { + struct rtskb *skb = vp->tx_skbuff[entry]; +#if DO_ZEROCOPY + int i; + for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) + pci_unmap_single(vp->pdev, + le32_to_cpu(vp->tx_ring[entry].frag[i].addr), + le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(vp->pdev, + le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_rtskb(skb); + vp->tx_skbuff[entry] = 0; + } else { + rtdm_printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); + } + /* vp->stats.tx_packets++; Counted below. */ + dirty_tx++; + } + vp->dirty_tx = dirty_tx; + if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { + if (vortex_debug > 6) + rtdm_printk(KERN_DEBUG "boomerang_interrupt: wake queue\n"); + rtnetif_wake_queue (rtdev); + } + } + + /* Check for all uncommon interrupts at once. */ + if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) + vortex_error(rtdev, status, &time_stamp); + + if (--work_done < 0) { + rtdm_printk(KERN_WARNING "%s: Too much work in interrupt, status " + "%4.4x.\n", rtdev->name, status); + /* Disable all pending interrupts. */ + do { + vp->deferred |= status; + outw(SetStatusEnb | (~vp->deferred & vp->status_enable), + ioaddr + EL3_CMD); + outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); + } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch); + /* The timer will reenable interrupts. */ + mod_timer(&vp->timer, jiffies + 1*HZ); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ + writel(0x8000, vp->cb_fn_base + 4); + + } while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch); + + if (vortex_debug > 4) + rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", + rtdev->name, status); + handler_exit: + rtdm_lock_put(&vp->lock); + if (packets > 0) + rt_mark_stack_mgr(rtdev); + + return RTDM_IRQ_HANDLED; +} + +static int vortex_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + int i; + short rx_status; + + if (vortex_debug > 5) + printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); + while ((rx_status = inw(ioaddr + RxStatus)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + unsigned char rx_error = inb(ioaddr + RxErrors); + if (vortex_debug > 2) + printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); + vp->stats.rx_errors++; + if (rx_error & 0x01) vp->stats.rx_over_errors++; + if (rx_error & 0x02) vp->stats.rx_length_errors++; + if (rx_error & 0x04) vp->stats.rx_frame_errors++; + if (rx_error & 0x08) vp->stats.rx_crc_errors++; + if (rx_error & 0x10) vp->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + int pkt_len = rx_status & 0x1fff; + struct rtskb *skb; + + skb = rtnetdev_alloc_rtskb(rtdev, pkt_len + 5); + if (vortex_debug > 4) + printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + rtskb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + if (vp->bus_master && + ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) { + dma_addr_t dma = pci_map_single(vp->pdev, + rtskb_put(skb, pkt_len), + pkt_len, PCI_DMA_FROMDEVICE); + outl(dma, ioaddr + Wn7_MasterAddr); + outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); + outw(StartDMAUp, ioaddr + EL3_CMD); + while (inw(ioaddr + Wn7_MasterStatus) & 0x8000) + ; + pci_unmap_single(vp->pdev, dma, pkt_len, PCI_DMA_FROMDEVICE); + } else { + insl(ioaddr + RX_FIFO, rtskb_put(skb, pkt_len), + (pkt_len + 3) >> 2); + } + outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = rt_eth_type_trans(skb, rtdev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + //rtdev->last_rx = jiffies; + vp->stats.rx_packets++; + (*packets)++; + + /* Wait a limited time to go to next packet. */ + for (i = 200; i >= 0; i--) + if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + continue; + } else if (vortex_debug > 0) + printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " + "size %d.\n", rtdev->name, pkt_len); + } + vp->stats.rx_dropped++; + issue_and_wait(rtdev, RxDiscard); + } + + return 0; +} + +static int +boomerang_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + int entry = vp->cur_rx % RX_RING_SIZE; + long ioaddr = rtdev->base_addr; + int rx_status; + int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; + + + if (vortex_debug > 5) + rtdm_printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS)); + + while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ + if (--rx_work_limit < 0) + break; + if (rx_status & RxDError) { /* Error, update stats. */ + unsigned char rx_error = rx_status >> 16; + if (vortex_debug > 2) + rtdm_printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); + vp->stats.rx_errors++; + if (rx_error & 0x01) vp->stats.rx_over_errors++; + if (rx_error & 0x02) vp->stats.rx_length_errors++; + if (rx_error & 0x04) vp->stats.rx_frame_errors++; + if (rx_error & 0x08) vp->stats.rx_crc_errors++; + if (rx_error & 0x10) vp->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + int pkt_len = rx_status & 0x1fff; + struct rtskb *skb; + dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); + + if (vortex_debug > 4) + rtdm_printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + + /* Check if the packet is long enough to just accept without + copying to a properly sized skbuff. */ + { +/*** RTnet ***/ + /* Pass up the skbuff already on the Rx ring. */ + skb = vp->rx_skbuff[entry]; + vp->rx_skbuff[entry] = NULL; + rtskb_put(skb, pkt_len); + pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + vp->rx_nocopy++; + } + skb->protocol = rt_eth_type_trans(skb, rtdev); + skb->time_stamp = *time_stamp; + { /* Use hardware checksum info. */ + int csum_bits = rx_status & 0xee000000; + if (csum_bits && + (csum_bits == (IPChksumValid | TCPChksumValid) || + csum_bits == (IPChksumValid | UDPChksumValid))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + vp->rx_csumhits++; + } + } + rtnetif_rx(skb); + //rtdev->last_rx = jiffies; + vp->stats.rx_packets++; + (*packets)++; + } + entry = (++vp->cur_rx) % RX_RING_SIZE; + } + /* Refill the Rx ring buffers. */ + for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { + struct rtskb *skb; + entry = vp->dirty_rx % RX_RING_SIZE; + if (vp->rx_skbuff[entry] == NULL) { + skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ); + if (skb == NULL) { + static unsigned long last_jif; + if ((jiffies - last_jif) > 10 * HZ) { + rtdm_printk(KERN_WARNING "%s: memory shortage\n", rtdev->name); + last_jif = jiffies; + } + if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) + { + // *** RTnet *** mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); + ; + } + break; /* Bad news! */ + } + rtskb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, + skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + vp->rx_skbuff[entry] = skb; + } + vp->rx_ring[entry].status = 0; /* Clear complete bit. */ + outw(UpUnstall, ioaddr + EL3_CMD); + } + return 0; +} + +/* + * If we've hit a total OOM refilling the Rx ring we poll once a second + * for some memory. Otherwise there is no way to restart the rx process. + */ +static void +vortex_down(struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + + rtnetif_stop_queue (rtdev); + + del_timer_sync(&vp->rx_oom_timer); + del_timer_sync(&vp->timer); + + /* Turn off statistics ASAP. We update vp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (rtdev->if_port == XCVR_10base2) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + // *** RTnet *** update_stats(ioaddr, dev); + if (vp->full_bus_master_rx) + outl(0, ioaddr + UpListPtr); + if (vp->full_bus_master_tx) + outl(0, ioaddr + DownListPtr); + + if (vp->pdev && vp->enable_wol) { + pci_save_state(vp->pdev, vp->power_state); + acpi_set_WOL(rtdev); + } +} + +static int +vortex_close(struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + int i; + + // rtnet_device is always present after vortex_open was called. + //if (netif_device_present(dev)) + // vortex_down(dev); + vortex_down(rtdev); + + if (vortex_debug > 1) { + printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n", + rtdev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus)); + printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d" + " tx_queued %d Rx pre-checksummed %d.\n", + rtdev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); + } + +#if DO_ZEROCOPY + if ( vp->rx_csumhits && + ((vp->drv_flags & HAS_HWCKSM) == 0) && + (hw_checksums[vp->card_idx] == -1)) { + printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", rtdev->name); + printk(KERN_WARNING "Please see http://www.uow.edu.au/~andrewm/zerocopy.html\n"); + } +#endif + + // *** RTnet *** + if ( (i=rtdm_irq_free(&vp->irq_handle))<0 ) + return i; + + rt_stack_disconnect(rtdev); + + // *** RTnet *** + + if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ + for (i = 0; i < RX_RING_SIZE; i++) + if (vp->rx_skbuff[i]) { + pci_unmap_single( vp->pdev, le32_to_cpu(vp->rx_ring[i].addr), + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dev_kfree_rtskb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = 0; + } + } + if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ + for (i = 0; i < TX_RING_SIZE; i++) { + if (vp->tx_skbuff[i]) { + struct rtskb *skb = vp->tx_skbuff[i]; +#if DO_ZEROCOPY + int k; + + for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) + pci_unmap_single(vp->pdev, + le32_to_cpu(vp->tx_ring[i].frag[k].addr), + le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_rtskb(skb); + vp->tx_skbuff[i] = 0; + } + } + } + + return 0; +} + +static void +dump_tx_ring(struct rtnet_device *rtdev) +{ + if (vortex_debug > 0) { + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + + if (vp->full_bus_master_tx) { + int i; + int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ + + rtdm_printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", + vp->full_bus_master_tx, + vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, + vp->cur_tx, vp->cur_tx % TX_RING_SIZE); + rtdm_printk(KERN_ERR " Transmit list %8.8x vs. %p.\n", + inl(ioaddr + DownListPtr), + &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); + issue_and_wait(rtdev, DownStall); + for (i = 0; i < TX_RING_SIZE; i++) { + rtdm_printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i, + &vp->tx_ring[i], +#if DO_ZEROCOPY + le32_to_cpu(vp->tx_ring[i].frag[0].length), +#else + le32_to_cpu(vp->tx_ring[i].length), +#endif + le32_to_cpu(vp->tx_ring[i].status)); + } + if (!stalled) + outw(DownUnstall, ioaddr + EL3_CMD); + } + } +} + +static struct net_device_stats *vortex_get_stats(struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + rtdm_lockctx_t flags; + + if (rtnetif_device_present(rtdev)) { /* AKPM: Used to be netif_running */ + rtdm_lock_get_irqsave (&vp->lock, flags); + update_stats(rtdev->base_addr, rtdev); + rtdm_lock_put_irqrestore (&vp->lock, flags); + } + return &vp->stats; +} + +/* Update statistics. + Unlike with the EL3 we need not worry about interrupts changing + the window setting from underneath us, but we must still guard + against a race condition with a StatsUpdate interrupt updating the + table. This is done by checking that the ASM (!) code generated uses + atomic updates with '+='. +*/ +static void update_stats(long ioaddr, struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + int old_window = inw(ioaddr + EL3_CMD); + + if (old_window == 0xffff) /* Chip suspended or ejected. */ + return; + /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + vp->stats.tx_carrier_errors += inb(ioaddr + 0); + vp->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + vp->stats.collisions += inb(ioaddr + 3); + vp->stats.tx_window_errors += inb(ioaddr + 4); + vp->stats.rx_fifo_errors += inb(ioaddr + 5); + vp->stats.tx_packets += inb(ioaddr + 6); + vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4; + /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */ + /* Tx deferrals */ inb(ioaddr + 8); + /* Don't bother with register 9, an extension of registers 6&7. + If we do use the 6&7 values the atomic update assumption above + is invalid. */ + vp->stats.rx_bytes += inw(ioaddr + 10); + vp->stats.tx_bytes += inw(ioaddr + 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + EL3WINDOW(4); + inb(ioaddr + 12); + + { + u8 up = inb(ioaddr + 13); + vp->stats.rx_bytes += (up & 0x0f) << 16; + vp->stats.tx_bytes += (up & 0xf0) << 12; + } + + EL3WINDOW(old_window >> 13); + return; +} + +/* Pre-Cyclone chips have no documented multicast filter, so the only + multicast setting is to receive all multicast frames. At least + the chip has a very clean way to set the mode, unlike many others. */ +static void set_rx_mode(struct rtnet_device *rtdev) +{ + long ioaddr = rtdev->base_addr; + int new_mode; + + if (rtdev->flags & IFF_PROMISC) { + if (vortex_debug > 0) + printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", rtdev->name); + new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; + } else if (rtdev->flags & IFF_ALLMULTI) { + new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; + } else + new_mode = SetRxFilter | RxStation | RxBroadcast; + + outw(new_mode, ioaddr + EL3_CMD); +} + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. */ + +/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +#define mdio_delay() inl(mdio_addr) + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DIR_WRITE 0x04 +#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) +#define MDIO_DATA_READ 0x02 +#define MDIO_ENB_IN 0x00 + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(long ioaddr, int bits) +{ + long mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + /* Establish sync by sending at least 32 logic ones. */ + while (-- bits >= 0) { + outw(MDIO_DATA_WRITE1, mdio_addr); + mdio_delay(); + outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } +} + +static int mdio_read(struct rtnet_device *rtdev, int phy_id, int location) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + int i; + long ioaddr = rtdev->base_addr; + int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; + unsigned int retval = 0; + long mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + spin_lock_bh(&vp->mdio_lock); + + if (mii_preamble_required) + mdio_sync(ioaddr, 32); + + /* Shift the read command bits out. */ + for (i = 14; i >= 0; i--) { + int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; + outw(dataval, mdio_addr); + mdio_delay(); + outw(dataval | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + outw(MDIO_ENB_IN, mdio_addr); + mdio_delay(); + retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); + outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + spin_unlock_bh(&vp->mdio_lock); + return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; +} + +static void mdio_write(struct rtnet_device *rtdev, int phy_id, int location, int value) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; + long mdio_addr = ioaddr + Wn4_PhysicalMgmt; + int i; + + spin_lock_bh(&vp->mdio_lock); + + if (mii_preamble_required) + mdio_sync(ioaddr, 32); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; + outw(dataval, mdio_addr); + mdio_delay(); + outw(dataval | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + /* Leave the interface idle. */ + for (i = 1; i >= 0; i--) { + outw(MDIO_ENB_IN, mdio_addr); + mdio_delay(); + outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + spin_unlock_bh(&vp->mdio_lock); + return; +} + +/* ACPI: Advanced Configuration and Power Interface. */ +/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ +static void acpi_set_WOL(struct rtnet_device *rtdev) +{ + struct vortex_private *vp = (struct vortex_private *)rtdev->priv; + long ioaddr = rtdev->base_addr; + + /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ + EL3WINDOW(7); + outw(2, ioaddr + 0x0c); + /* The RxFilter must accept the WOL frames. */ + outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); + outw(RxEnable, ioaddr + EL3_CMD); + + /* Change the power state to D3; RxEnable doesn't take effect. */ + pci_enable_wake(vp->pdev, 0, 1); + pci_set_power_state(vp->pdev, 3); +} + + +static void vortex_remove_one (struct pci_dev *pdev) +{ + struct vortex_private *vp; + // *** RTnet *** + struct rtnet_device *rtdev = pci_get_drvdata (pdev); + + + + if (!rtdev) { + printk("vortex_remove_one called for EISA device!\n"); + BUG(); + } + + vp = rtdev->priv; + + /* AKPM: FIXME: we should have + * if (vp->cb_fn_base) iounmap(vp->cb_fn_base); + * here + */ + rt_unregister_rtnetdev(rtdev); + /* Should really use issue_and_wait() here */ + outw(TotalReset|0x14, rtdev->base_addr + EL3_CMD); + + if (vp->pdev && vp->enable_wol) { + pci_set_power_state(vp->pdev, 0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(vp->pdev, vp->power_state); + } + + dma_free_coherent(&pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, + vp->rx_ring_dma); + if (vp->must_free_region) + release_region(rtdev->base_addr, vp->io_size); + // *** RTnet *** + rtdev_free(rtdev); + // *** RTnet *** +} + + +static struct pci_driver vortex_driver = { + name: "3c59x_rt", + probe: vortex_init_one, + remove: vortex_remove_one, + id_table: vortex_pci_tbl, +#ifdef CONFIG_PM + suspend: NULL, + resume: NULL, +#endif +}; + + +static int vortex_have_pci; + + +static int __init vortex_init (void) +{ + int pci_rc; + + pci_rc = pci_register_driver(&vortex_driver); + + if (pci_rc == 0) + vortex_have_pci = 1; + + return (vortex_have_pci) ? 0 : -ENODEV; +} + + +static void __exit vortex_cleanup (void) +{ + if (vortex_have_pci) + pci_unregister_driver (&vortex_driver); +} + +module_init(vortex_init); +module_exit(vortex_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig new file mode 100644 index 0000000..4620c94 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig @@ -0,0 +1,17 @@ +config XENO_DRIVERS_NET_EXP_DRIVERS + depends on XENO_DRIVERS_NET && PCI + bool "Experimental Drivers" + +if XENO_DRIVERS_NET_EXP_DRIVERS + +config XENO_DRIVERS_NET_DRV_3C59X + depends on PCI + tristate "3Com 59x" + +config XENO_DRIVERS_NET_DRV_E1000_NEW + depends on PCI + tristate "New Intel(R) PRO/1000 (Gigabit)" + +source "drivers/xenomai/net/drivers/experimental/rt2500/Kconfig" + +endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile new file mode 100644 index 0000000..eddd29d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile @@ -0,0 +1,9 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_RT2500) += rt2500/ + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000_NEW) += e1000/ + +obj-$(CONFIG_RTNET_DRV_3C59X) += rt_3c59x.o + +rt_3c59x-y := 3c59x.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile new file mode 100644 index 0000000..be144c4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile @@ -0,0 +1,19 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000_NEW) += rt_e1000_new.o + +rt_e1000_new-y := \ + e1000_80003es2lan.o \ + e1000_82540.o \ + e1000_82541.o \ + e1000_82542.o \ + e1000_82543.o \ + e1000_82571.o \ + e1000_api.o \ + e1000_ich8lan.o \ + e1000_mac.o \ + e1000_main.o \ + e1000_manage.o \ + e1000_nvm.o \ + e1000_param.o \ + e1000_phy.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h new file mode 100644 index 0000000..8b9830b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h @@ -0,0 +1,425 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include "kcompat.h" + +#include "e1000_api.h" + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#define E1000_DBG(args...) + +#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args) + +#define PFX "e1000: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __FUNCTION__ , ## args)) + +#define E1000_MAX_INTR 10 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 80 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 + +#define E1000_MIN_RXD 80 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +#ifdef CONFIG_E1000_MQ +#define E1000_MAX_TX_QUEUES 4 +#endif + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Early Receive defines */ +#define E1000_ERT_2048 0x100 + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#ifdef NETIF_F_HW_VLAN_TX +#define E1000_MNG_VLAN_NONE -1 +#endif +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct e1000_buffer { + struct rtskb *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; +}; + +struct e1000_rx_buffer { + struct rtskb *skb; + dma_addr_t dma; + struct page *page; +}; + +#ifdef CONFIG_E1000_MQ +struct e1000_queue_stats { + u64 packets; + u64 bytes; +}; +#endif + +struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; +struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; }; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + +#ifdef CONFIG_E1000_MQ + /* for tx ring cleanup - needed for multiqueue */ + spinlock_t tx_queue_lock; +#endif + rtdm_lock_t tx_lock; + u16 tdh; + u16 tdt; +#ifdef CONFIG_E1000_MQ + struct e1000_queue_stats tx_stats; +#endif + bool last_tx_tso; +}; + +struct e1000_rx_ring { + struct e1000_adapter *adapter; /* back link */ + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; +#ifdef CONFIG_E1000_NAPI + struct napi_struct napi; +#endif + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +#ifdef CONFIG_E1000_MQ + struct e1000_queue_stats rx_stats; +#endif +}; + +#define E1000_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +#ifdef SIOCGMIIPHY +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; +#endif + +/* board specific private data structure */ + +struct e1000_adapter { +#ifdef NETIF_F_HW_VLAN_TX + struct vlan_group *vlgrp; + u16 mng_vlan_id; +#endif + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + rtdm_lock_t stats_lock; +#ifdef CONFIG_E1000_NAPI + spinlock_t tx_queue_lock; +#endif + atomic_t irq_sem; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + bool fc_autoneg; + +#ifdef ETHTOOL_PHYS_ID + struct timer_list blink_timer; + unsigned long led_status; +#endif + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ +#ifdef CONFIG_E1000_MQ + struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ +#endif + unsigned int restart_queue; + unsigned long tx_queue_len; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotc; + u64 gotc_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + + /* RX */ +#ifdef CONFIG_E1000_NAPI + bool (*clean_rx) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +#else + bool (*clean_rx) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp); +#endif + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ +#ifdef CONFIG_E1000_NAPI + //struct napi_struct napi; +#endif + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + unsigned int rx_ps_pages; + u32 gorc; + u64 gorc_old; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + + /* OS defined structs */ + struct rtnet_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + + rtdm_irq_t irq_handle; + char data_received; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + +#ifdef SIOCGMIIPHY + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; +#endif + +#ifdef ETHTOOL_TEST + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; +#endif + + + int msg_enable; + /* to not mess up cache alignment, always add to the bottom */ + unsigned long state; + u32 eeprom_wol; + + u32 *config_space; + + /* hardware capability, feature, and workaround flags */ + unsigned int flags; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +#define E1000_FLAG_HAS_SMBUS (1 << 0) +#define E1000_FLAG_HAS_MANC2H (1 << 1) +#define E1000_FLAG_HAS_MSI (1 << 2) +#define E1000_FLAG_MSI_ENABLED (1 << 3) +#define E1000_FLAG_HAS_INTR_MODERATION (1 << 4) +#define E1000_FLAG_RX_NEEDS_RESTART (1 << 5) +#define E1000_FLAG_BAD_TX_CARRIER_STATS_FD (1 << 6) +#define E1000_FLAG_INT_ASSERT_AUTO_MASK (1 << 7) +#define E1000_FLAG_QUAD_PORT_A (1 << 8) +#define E1000_FLAG_SMART_POWER_DOWN (1 << 9) +#ifdef NETIF_F_TSO +#define E1000_FLAG_HAS_TSO (1 << 10) +#ifdef NETIF_F_TSO6 +#define E1000_FLAG_HAS_TSO6 (1 << 11) +#endif +#define E1000_FLAG_TSO_FORCE (1 << 12) +#endif +#define E1000_FLAG_RX_RESTART_NOW (1 << 13) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +extern char e1000_driver_name[]; +extern const char e1000_driver_version[]; + +extern void e1000_power_up_phy(struct e1000_hw *hw); + +extern void e1000_set_ethtool_ops(struct net_device *netdev); +extern void e1000_check_options(struct e1000_adapter *adapter); + +extern int e1000_up(struct e1000_adapter *adapter); +extern void e1000_down(struct e1000_adapter *adapter); +extern void e1000_reinit_locked(struct e1000_adapter *adapter); +extern void e1000_reset(struct e1000_adapter *adapter); +extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); +extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_update_stats(struct e1000_adapter *adapter); +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *ifr); +#endif + +#endif /* _E1000_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c new file mode 100644 index 0000000..2ef70d6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c @@ -0,0 +1,1401 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_80003es2lan + */ + +#include "e1000_api.h" +#include "e1000_80003es2lan.h" + +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw); +static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw); +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw); +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, + u16 *data); +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, + u16 data); +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw); +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw); +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/* + * A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = + { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_gg82563_cable_length_table) / \ + sizeof(e1000_gg82563_cable_length_table[0])) + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_80003es2lan"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } else { + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + func->acquire_phy = e1000_acquire_phy_80003es2lan; + func->check_polarity = e1000_check_polarity_m88; + func->check_reset_block = e1000_check_reset_block_generic; + func->commit_phy = e1000_phy_sw_reset_generic; + func->get_cfg_done = e1000_get_cfg_done_80003es2lan; + func->get_phy_info = e1000_get_phy_info_m88; + func->release_phy = e1000_release_phy_80003es2lan; + func->reset_phy = e1000_phy_hw_reset_generic; + func->set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + + func->force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan; + func->get_cable_length = e1000_get_cable_length_80003es2lan; + func->read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan; + func->write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_80003es2lan"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_nvm_80003es2lan; + func->read_nvm = e1000_read_nvm_eerd; + func->release_nvm = e1000_release_nvm_80003es2lan; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->valid_led_default = e1000_valid_led_default_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + func->write_nvm = e1000_write_nvm_80003es2lan; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_80003es2lan"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = TRUE; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? TRUE : FALSE; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + func->reset_hw = e1000_reset_hw_80003es2lan; + /* hw initialization */ + func->init_hw = e1000_init_hw_80003es2lan; + /* link setup */ + func->setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + func->setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_80003es2lan + : e1000_setup_fiber_serdes_link_generic; + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->check_for_link = e1000_check_for_copper_link_generic; + break; + case e1000_media_type_fiber: + func->check_for_link = e1000_check_for_fiber_link_generic; + break; + case e1000_media_type_internal_serdes: + func->check_for_link = e1000_check_for_serdes_link_generic; + break; + default: + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + /* check management mode */ + func->check_mng_mode = e1000_check_mng_mode_generic; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* read mac address */ + func->read_mac_addr = e1000_read_mac_addr_80003es2lan; + /* blink LED */ + func->blink_led = e1000_blink_led_generic; + /* setup LED */ + func->setup_led = e1000_setup_led_generic; + /* cleanup LED */ + func->cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + func->led_on = e1000_led_on_generic; + func->led_off = e1000_led_off_generic; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan; + /* link info */ + func->get_link_up_info = e1000_get_link_up_info_80003es2lan; + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + **/ +void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_80003es2lan"); + + hw->func.init_mac_params = e1000_init_mac_params_80003es2lan; + hw->func.init_nvm_params = e1000_init_nvm_params_80003es2lan; + hw->func.init_phy_params = e1000_init_phy_params_80003es2lan; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_acquire_phy_80003es2lan"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + mask |= E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + DEBUGFUNC("e1000_release_phy_80003es2lan"); + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + mask |= E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_80003es2lan"); + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = e1000_acquire_nvm_generic(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. This is a + * function pointer entry point called by the api module. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_80003es2lan"); + + e1000_release_nvm_generic(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; + + DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_80003es2lan"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS); + /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan"); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + goto out; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* + * Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + goto out; + } + + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usec_delay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + e1000_release_phy_80003es2lan(hw); + goto out; + } + + usec_delay(200); + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usec_delay(200); + e1000_release_phy_80003es2lan(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan"); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + goto out; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* + * Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + goto out; + } + + + /* + * The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usec_delay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + e1000_release_phy_80003es2lan(hw); + goto out; + } + + usec_delay(200); + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usec_delay(200); + e1000_release_phy_80003es2lan(hw); + +out: + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + DEBUGFUNC("e1000_write_nvm_80003es2lan"); + + return e1000_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_80003es2lan"); + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan"); + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("GG82563 PSCR: %X\n", phy_data); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + usec_delay(1); + + if (hw->phy.autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link " + "on GG82563 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + + if (!link) { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + goto out; + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_80003es2lan"); + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + goto out; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index+5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_80003es2lan"); + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, + speed, + duplex); + if (ret_val) + goto out; + if (*speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, + *duplex); + } else { + ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw, + speed, + duplex); + } + +out: + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl, icr; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_80003es2lan"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + goto out; + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + icr = E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_check_alt_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_80003es2lan"); + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = e1000_id_led_init_generic(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + e1000_clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = E1000_READ_REG(hw, E1000_TCTL); + reg_data |= E1000_TCTL_RTLC; + E1000_WRITE_REG(hw, E1000_TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = E1000_READ_REG(hw, E1000_TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan"); + + if (hw->mac.disable_hw_init_bits) + goto out; + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + +out: + return; +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl_ext; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan"); + + if (!phy->reset_disable) { + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + &data); + if (ret_val) + goto out; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, + data); + if (ret_val) + goto out; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + goto out; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + goto out; + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_commit(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + goto out; + } + + } + + /* Bypass Rx and Tx FIFO's */ + ret_val = e1000_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, + E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + if (ret_val) + goto out; + + ret_val = e1000_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, + &data); + if (ret_val) + goto out; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, + data); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + goto out; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + goto out; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + goto out; + + /* + * Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!(e1000_check_mng_mode(hw))) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1000_write_phy_reg(hw, + GG82563_PHY_PWR_MGMT_CTRL, + data); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, + GG82563_PHY_KMRN_MODE_CTRL, + &data); + if (ret_val) + goto out; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1000_write_phy_reg(hw, + GG82563_PHY_KMRN_MODE_CTRL, + data); + + if (ret_val) + goto out; + } + + /* + * Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + goto out; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_setup_copper_link_80003es2lan"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + goto out; + ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + goto out; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + goto out; + ret_val = e1000_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + goto out; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = e1000_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + goto out; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val = E1000_SUCCESS; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + DEBUGFUNC("e1000_configure_kmrn_for_10_100"); + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = e1000_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + goto out; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, E1000_TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + + do { + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data2); + if (ret_val) + goto out; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + +out: + return ret_val; +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + DEBUGFUNC("e1000_configure_kmrn_for_1000"); + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = e1000_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + goto out; + + /* Configure Transmit Inter-Packet Gap */ + tipg = E1000_READ_REG(hw, E1000_TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + + do { + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, + ®_data2); + if (ret_val) + goto out; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_mac_addr_80003es2lan"); + if (e1000_check_alt_mac_addr_generic(hw)) + ret_val = e1000_read_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan"); + + e1000_clear_hw_cntrs_base_generic(hw); + + temp = E1000_READ_REG(hw, E1000_PRC64); + temp = E1000_READ_REG(hw, E1000_PRC127); + temp = E1000_READ_REG(hw, E1000_PRC255); + temp = E1000_READ_REG(hw, E1000_PRC511); + temp = E1000_READ_REG(hw, E1000_PRC1023); + temp = E1000_READ_REG(hw, E1000_PRC1522); + temp = E1000_READ_REG(hw, E1000_PTC64); + temp = E1000_READ_REG(hw, E1000_PTC127); + temp = E1000_READ_REG(hw, E1000_PTC255); + temp = E1000_READ_REG(hw, E1000_PTC511); + temp = E1000_READ_REG(hw, E1000_PTC1023); + temp = E1000_READ_REG(hw, E1000_PTC1522); + + temp = E1000_READ_REG(hw, E1000_ALGNERRC); + temp = E1000_READ_REG(hw, E1000_RXERRC); + temp = E1000_READ_REG(hw, E1000_TNCRS); + temp = E1000_READ_REG(hw, E1000_CEXTERR); + temp = E1000_READ_REG(hw, E1000_TSCTC); + temp = E1000_READ_REG(hw, E1000_TSCTFC); + + temp = E1000_READ_REG(hw, E1000_MGTPRC); + temp = E1000_READ_REG(hw, E1000_MGTPDC); + temp = E1000_READ_REG(hw, E1000_MGTPTC); + + temp = E1000_READ_REG(hw, E1000_IAC); + temp = E1000_READ_REG(hw, E1000_ICRXOC); + + temp = E1000_READ_REG(hw, E1000_ICRXPTC); + temp = E1000_READ_REG(hw, E1000_ICRXATC); + temp = E1000_READ_REG(hw, E1000_ICTXPTC); + temp = E1000_READ_REG(hw, E1000_ICTXATC); + temp = E1000_READ_REG(hw, E1000_ICTXQEC); + temp = E1000_READ_REG(hw, E1000_ICTXQMTC); + temp = E1000_READ_REG(hw, E1000_ICRXDMTC); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h new file mode 100644 index 0000000..ec84d27 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h @@ -0,0 +1,95 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_80003ES2LAN_H_ +#define _E1000_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disabled */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 + /* 1=Reverse Auto-Negotiation */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_2_5 0x0006 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) */ +/* + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + /* 1=Enable SERDES Electrical Idle */ + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c new file mode 100644 index 0000000..6e6e1f0 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c @@ -0,0 +1,680 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_82540 + * e1000_82545 + * e1000_82546 + * e1000_82545_rev_3 + * e1000_82546_rev_3 + */ + +#include "e1000_api.h" + +static s32 e1000_init_phy_params_82540(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_82540(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82540(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw); +static s32 e1000_init_hw_82540(struct e1000_hw *hw); +static s32 e1000_reset_hw_82540(struct e1000_hw *hw); +static s32 e1000_set_phy_mode_82540(struct e1000_hw *hw); +static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82540 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82540(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_m88; + + /* Function Pointers */ + func->check_polarity = e1000_check_polarity_m88; + func->commit_phy = e1000_phy_sw_reset_generic; + func->force_speed_duplex = e1000_phy_force_speed_duplex_m88; + func->get_cable_length = e1000_get_cable_length_m88; + func->get_cfg_done = e1000_get_cfg_done_generic; + func->read_phy_reg = e1000_read_phy_reg_m88; + func->reset_phy = e1000_phy_hw_reset_generic; + func->write_phy_reg = e1000_write_phy_reg_m88; + func->get_phy_info = e1000_get_phy_info_m88; + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper_82540; + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (phy->id == M88E1011_I_PHY_ID) + break; + fallthrough; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82540 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82540(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_init_nvm_params_82540"); + + nvm->type = e1000_nvm_eeprom_microwire; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + switch (nvm->override) { + case e1000_nvm_override_microwire_large: + nvm->address_bits = 8; + nvm->word_size = 256; + break; + case e1000_nvm_override_microwire_small: + nvm->address_bits = 6; + nvm->word_size = 64; + break; + default: + nvm->address_bits = eecd & E1000_EECD_SIZE ? 8 : 6; + nvm->word_size = eecd & E1000_EECD_SIZE ? 256 : 64; + break; + } + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_nvm_generic; + func->read_nvm = e1000_read_nvm_microwire; + func->release_nvm = e1000_release_nvm_generic; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->valid_led_default = e1000_valid_led_default_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + func->write_nvm = e1000_write_nvm_microwire; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82540 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_82540"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_pci_generic; + /* reset */ + func->reset_hw = e1000_reset_hw_82540; + /* hw initialization */ + func->init_hw = e1000_init_hw_82540; + /* link setup */ + func->setup_link = e1000_setup_link_generic; + /* physical interface setup */ + func->setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82540 + : e1000_setup_fiber_serdes_link_82540; + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->check_for_link = e1000_check_for_copper_link_generic; + break; + case e1000_media_type_fiber: + func->check_for_link = e1000_check_for_fiber_link_generic; + break; + case e1000_media_type_internal_serdes: + func->check_for_link = e1000_check_for_serdes_link_generic; + break; + default: + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + /* link info */ + func->get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* setup LED */ + func->setup_led = e1000_setup_led_generic; + /* cleanup LED */ + func->cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + func->led_on = e1000_led_on_generic; + func->led_off = e1000_led_off_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_82540; + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_82540 - Init func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + **/ +void e1000_init_function_pointers_82540(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82540"); + + hw->func.init_mac_params = e1000_init_mac_params_82540; + hw->func.init_nvm_params = e1000_init_nvm_params_82540; + hw->func.init_phy_params = e1000_init_phy_params_82540; +} + +/** + * e1000_reset_hw_82540 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82540(struct e1000_hw *hw) +{ + u32 ctrl, icr, manc; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_reset_hw_82540"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete + * before resetting the device. + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n"); + switch (hw->mac.type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST); + break; + default: + /* + * These controllers can't ack the 64-bit write when + * issuing the reset, so we use IO-mapping as a + * workaround to issue the reset. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + } + + /* Wait for EEPROM reload */ + msec_delay(5); + + /* Disable HW ARPs on ASF enabled adapters */ + manc = E1000_READ_REG(hw, E1000_MANC); + manc &= ~E1000_MANC_ARP_EN; + E1000_WRITE_REG(hw, E1000_MANC, manc); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + icr = E1000_READ_REG(hw, E1000_ICR); + + return ret_val; +} + +/** + * e1000_init_hw_82540 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_init_hw_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txdctl, ctrl_ext; + s32 ret_val = E1000_SUCCESS; + u16 i; + + DEBUGFUNC("e1000_init_hw_82540"); + + /* Initialize identification LED */ + ret_val = e1000_id_led_init_generic(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + if (mac->type < e1000_82545_rev_3) + E1000_WRITE_REG(hw, E1000_VET, 0); + + e1000_clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * Avoid back to back register writes by adding the register + * read (flush). This is to protect against some strange + * bridge configurations that may issue Memory Write Block + * (MWB) to our register space. The *_rev_3 hardware at + * least doesn't respond correctly to every other dword in an + * MWB to our register space. + */ + E1000_WRITE_FLUSH(hw); + } + + if (mac->type < e1000_82545_rev_3) + e1000_pcix_mmrbc_workaround_generic(hw); + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82540(hw); + + if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) || + (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* + * Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_setup_copper_link_82540 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_setup_copper_link_82540"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + ret_val = e1000_set_phy_mode_82540(hw); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_82545_rev_3 || + hw->mac.type == e1000_82546_rev_3) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &data); + if (ret_val) + goto out; + data |= 0x00000008; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Set the output amplitude to the value in the EEPROM and adjust the VCO + * speed to improve Bit Error Rate (BER) performance. Configures collision + * distance and flow control for fiber and serdes links. Upon successful + * setup, poll for link. This is a function pointer entry point called by + * the api module. + **/ +static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_82540"); + + switch (mac->type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + /* + * If we're on serdes media, adjust the output + * amplitude to value set in the EEPROM. + */ + ret_val = e1000_adjust_serdes_amplitude_82540(hw); + if (ret_val) + goto out; + } + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed_82540(hw); + if (ret_val) + goto out; + default: + break; + } + + ret_val = e1000_setup_fiber_serdes_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM + * @hw: pointer to the HW structure + * + * Adjust the SERDES ouput amplitude based on the EEPROM settings. + **/ +static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_adjust_serdes_amplitude_82540"); + + ret_val = e1000_read_nvm(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data); + if (ret_val) + goto out; + + if (nvm_data != NVM_RESERVED_WORD) { + /* Adjust serdes output amplitude only. */ + nvm_data &= NVM_SERDES_AMPLITUDE_MASK; + ret_val = e1000_write_phy_reg(hw, + M88E1000_PHY_EXT_CTRL, + nvm_data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_set_vco_speed_82540 - Set VCO speed for better performance + * @hw: pointer to the HW structure + * + * Set the VCO speed to improve Bit Error Rate (BER) performance. + **/ +static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 default_page = 0; + u16 phy_data; + + DEBUGFUNC("e1000_set_vco_speed_82540"); + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = e1000_read_phy_reg(hw, + M88E1000_PHY_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + default_page); + +out: + return ret_val; +} + +/** + * e1000_set_phy_mode_82540 - Set PHY to class A mode + * @hw: pointer to the HW structure + * + * Sets the PHY to class A mode and assumes the following operations will + * follow to enable the new class mode: + * 1. Do a PHY soft reset. + * 2. Restart auto-negotiation or force link. + **/ +static s32 e1000_set_phy_mode_82540(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_set_phy_mode_82540"); + + if (hw->mac.type != e1000_82545_rev_3) + goto out; + + ret_val = e1000_read_nvm(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) { + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = e1000_write_phy_reg(hw, + M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->reset_disable = FALSE; + } + +out: + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_82540"); + + e1000_clear_hw_cntrs_base_generic(hw); + + temp = E1000_READ_REG(hw, E1000_PRC64); + temp = E1000_READ_REG(hw, E1000_PRC127); + temp = E1000_READ_REG(hw, E1000_PRC255); + temp = E1000_READ_REG(hw, E1000_PRC511); + temp = E1000_READ_REG(hw, E1000_PRC1023); + temp = E1000_READ_REG(hw, E1000_PRC1522); + temp = E1000_READ_REG(hw, E1000_PTC64); + temp = E1000_READ_REG(hw, E1000_PTC127); + temp = E1000_READ_REG(hw, E1000_PTC255); + temp = E1000_READ_REG(hw, E1000_PTC511); + temp = E1000_READ_REG(hw, E1000_PTC1023); + temp = E1000_READ_REG(hw, E1000_PTC1522); + + temp = E1000_READ_REG(hw, E1000_ALGNERRC); + temp = E1000_READ_REG(hw, E1000_RXERRC); + temp = E1000_READ_REG(hw, E1000_TNCRS); + temp = E1000_READ_REG(hw, E1000_CEXTERR); + temp = E1000_READ_REG(hw, E1000_TSCTC); + temp = E1000_READ_REG(hw, E1000_TSCTFC); + + temp = E1000_READ_REG(hw, E1000_MGTPRC); + temp = E1000_READ_REG(hw, E1000_MGTPDC); + temp = E1000_READ_REG(hw, E1000_MGTPTC); +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c new file mode 100644 index 0000000..a0d5c88 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c @@ -0,0 +1,1328 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_82541 + * e1000_82547 + * e1000_82541_rev_2 + * e1000_82547_rev_2 + */ + +#include "e1000_api.h" +#include "e1000_82541.h" + +static s32 e1000_init_phy_params_82541(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82541(struct e1000_hw *hw); +static s32 e1000_reset_hw_82541(struct e1000_hw *hw); +static s32 e1000_init_hw_82541(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw); +static s32 e1000_check_for_link_82541(struct e1000_hw *hw); +static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw); +static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, + bool active); +static s32 e1000_setup_led_82541(struct e1000_hw *hw); +static s32 e1000_cleanup_led_82541(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, + bool link_up); +static s32 e1000_phy_init_script_82541(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw); + +static const u16 e1000_igp_cable_length_table[] = + { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_cable_length_table) / \ + sizeof(e1000_igp_cable_length_table[0])) + +struct e1000_dev_spec_82541 { + e1000_dsp_config dsp_config; + e1000_ffe_config ffe_config; + u16 spd_default; + bool phy_init_script; +}; + +/** + * e1000_init_phy_params_82541 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82541"); + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_igp; + + /* Function Pointers */ + func->check_polarity = e1000_check_polarity_igp; + func->force_speed_duplex = e1000_phy_force_speed_duplex_igp; + func->get_cable_length = e1000_get_cable_length_igp_82541; + func->get_cfg_done = e1000_get_cfg_done_generic; + func->get_phy_info = e1000_get_phy_info_igp; + func->read_phy_reg = e1000_read_phy_reg_igp; + func->reset_phy = e1000_phy_hw_reset_82541; + func->set_d3_lplu_state = e1000_set_d3_lplu_state_82541; + func->write_phy_reg = e1000_write_phy_reg_igp; + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper_82541; + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + if (phy->id != IGP01E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82541 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82541"); + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->type = e1000_nvm_eeprom_spi; + eecd |= E1000_EECD_ADDR_BITS; + break; + case e1000_nvm_override_spi_small: + nvm->type = e1000_nvm_eeprom_spi; + eecd &= ~E1000_EECD_ADDR_BITS; + break; + case e1000_nvm_override_microwire_large: + nvm->type = e1000_nvm_eeprom_microwire; + eecd |= E1000_EECD_SIZE; + break; + case e1000_nvm_override_microwire_small: + nvm->type = e1000_nvm_eeprom_microwire; + eecd &= ~E1000_EECD_SIZE; + break; + default: + nvm->type = eecd & E1000_EECD_TYPE + ? e1000_nvm_eeprom_spi + : e1000_nvm_eeprom_microwire; + break; + } + + if (nvm->type == e1000_nvm_eeprom_spi) { + nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) + ? 16 : 8; + nvm->delay_usec = 1; + nvm->opcode_bits = 8; + nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) + ? 32 : 8; + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_nvm_generic; + func->read_nvm = e1000_read_nvm_spi; + func->release_nvm = e1000_release_nvm_generic; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->valid_led_default = e1000_valid_led_default_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + func->write_nvm = e1000_write_nvm_spi; + + /* + * nvm->word_size must be discovered after the pointers + * are set so we can verify the size from the nvm image + * itself. Temporarily set it to a dummy value so the + * read will work. + */ + nvm->word_size = 64; + ret_val = e1000_read_nvm(hw, NVM_CFG, 1, &size); + if (ret_val) + goto out; + size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT; + /* + * if size != 0, it can be added to a constant and become + * the left-shift value to set the word_size. Otherwise, + * word_size stays at 64. + */ + if (size) { + size += NVM_WORD_SIZE_BASE_SHIFT_82541; + nvm->word_size = 1 << size; + } + } else { + nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) + ? 8 : 6; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) + ? 256 : 64; + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_nvm_generic; + func->read_nvm = e1000_read_nvm_microwire; + func->release_nvm = e1000_release_nvm_generic; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->valid_led_default = e1000_valid_led_default_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + func->write_nvm = e1000_write_nvm_microwire; + } + +out: + return ret_val; +} + +/** + * e1000_init_mac_params_82541 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val; + + DEBUGFUNC("e1000_init_mac_params_82541"); + + /* Set media type */ + hw->phy.media_type = e1000_media_type_copper; + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = TRUE; + + /* Function Pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_pci_generic; + /* reset */ + func->reset_hw = e1000_reset_hw_82541; + /* hw initialization */ + func->init_hw = e1000_init_hw_82541; + /* link setup */ + func->setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + func->setup_physical_interface = e1000_setup_copper_link_82541; + /* check for link */ + func->check_for_link = e1000_check_for_link_82541; + /* link info */ + func->get_link_up_info = e1000_get_link_up_info_82541; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* setup LED */ + func->setup_led = e1000_setup_led_82541; + /* cleanup LED */ + func->cleanup_led = e1000_cleanup_led_82541; + /* turn on/off LED */ + func->led_on = e1000_led_on_generic; + func->led_off = e1000_led_off_generic; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_82541; + + hw->dev_spec_size = sizeof(struct e1000_dev_spec_82541); + + /* Device-specific structure allocation */ + ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); + + return ret_val; +} + +/** + * e1000_init_function_pointers_82541 - Init func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + **/ +void e1000_init_function_pointers_82541(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82541"); + + hw->func.init_mac_params = e1000_init_mac_params_82541; + hw->func.init_nvm_params = e1000_init_nvm_params_82541; + hw->func.init_phy_params = e1000_init_phy_params_82541; +} + +/** + * e1000_reset_hw_82541 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82541(struct e1000_hw *hw) +{ + u32 ledctl, ctrl, icr, manc; + + DEBUGFUNC("e1000_reset_hw_82541"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete + * before resetting the device. + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Must reset the Phy before resetting the MAC */ + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST)); + msec_delay(5); + } + + DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n"); + switch (hw->mac.type) { + case e1000_82541: + case e1000_82541_rev_2: + /* + * These controllers can't ack the 64-bit write when + * issuing the reset, so we use IO-mapping as a + * workaround to issue the reset. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + default: + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + break; + } + + /* Wait for NVM reload */ + msec_delay(20); + + /* Disable HW ARPs on ASF enabled adapters */ + manc = E1000_READ_REG(hw, E1000_MANC); + manc &= ~E1000_MANC_ARP_EN; + E1000_WRITE_REG(hw, E1000_MANC, manc); + + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + e1000_phy_init_script_82541(hw); + + /* Configure activity LED after Phy reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } + + /* Once again, mask the interrupts */ + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + + /* Clear any pending interrupt events. */ + icr = E1000_READ_REG(hw, E1000_ICR); + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw_82541 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_init_hw_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, txdctl; + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_82541"); + + /* Initialize identification LED */ + ret_val = e1000_id_led_init_generic(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + e1000_clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + /* + * Avoid back to back register writes by adding the register + * read (flush). This is to protect against some strange + * bridge configurations that may issue Memory Write Block + * (MWB) to our register space. + */ + E1000_WRITE_FLUSH(hw); + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82541(hw); + + return ret_val; +} + +/** + * e1000_get_link_up_info_82541 - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_get_link_up_info_82541"); + + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); + if (ret_val) + goto out; + + if (!phy->speed_downgraded) + goto out; + + /* + * IGP01 PHY may advertise full duplex operation after speed + * downgrade even if it is operating at half duplex. + * Here we set the duplex settings to match the duplex in the + * link partner's capabilities. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &data); + if (ret_val) + goto out; + + if (!(data & NWAY_ER_LP_NWAY_CAPS)) { + *duplex = HALF_DUPLEX; + } else { + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &data); + if (ret_val) + goto out; + + if (*speed == SPEED_100) { + if (!(data & NWAY_LPAR_100TX_FD_CAPS)) + *duplex = HALF_DUPLEX; + } else if (*speed == SPEED_10) { + if (!(data & NWAY_LPAR_10T_FD_CAPS)) + *duplex = HALF_DUPLEX; + } + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_82541 - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and relase the semaphore (if necessary). + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ledctl; + + DEBUGFUNC("e1000_phy_hw_reset_82541"); + + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + e1000_phy_init_script_82541(hw); + + if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_82541 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_dev_spec_82541 *dev_spec; + s32 ret_val; + u32 ctrl, ledctl; + + DEBUGFUNC("e1000_setup_copper_link_82541"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + hw->phy.reset_disable = FALSE; + + dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec; + + /* Earlier revs of the IGP phy require us to force MDI. */ + if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) { + dev_spec->dsp_config = e1000_dsp_config_disabled; + phy->mdix = 1; + } else { + dev_spec->dsp_config = e1000_dsp_config_enabled; + } + + ret_val = e1000_copper_link_setup_igp(hw); + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + if (dev_spec->ffe_config == e1000_ffe_config_active) + dev_spec->ffe_config = e1000_ffe_config_enabled; + } + + /* Configure activity LED after Phy reset */ + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + ledctl &= IGP_ACTIVITY_LED_MASK; + ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_check_for_link_82541 - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_check_for_link_82541(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_link_82541"); + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + ret_val = e1000_config_dsp_after_link_change_82541(hw, FALSE); + goto out; /* No link detected */ + } + + mac->get_link_status = FALSE; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_config_dsp_after_link_change_82541(hw, TRUE); + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000_config_collision_dist_generic(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + } + +out: + return ret_val; +} + +/** + * e1000_config_dsp_after_link_change_82541 - Config DSP after link + * @hw: pointer to the HW structure + * @link_up: boolean flag for link up status + * + * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS + * at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, + bool link_up) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_dev_spec_82541 *dev_spec; + s32 ret_val; + u32 idle_errs = 0; + u16 phy_data, phy_saved_data, speed, duplex, i; + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = + {IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D}; + + DEBUGFUNC("e1000_config_dsp_after_link_change_82541"); + + dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + goto out; + } + + if (speed != SPEED_1000) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_get_cable_length(hw); + if (ret_val) + goto out; + + if ((dev_spec->dsp_config == e1000_dsp_config_enabled) && + phy->min_cable_length >= 50) { + + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, + dsp_reg_array[i], + phy_data); + if (ret_val) + goto out; + } + dev_spec->dsp_config = e1000_dsp_config_activated; + } + + if ((dev_spec->ffe_config != e1000_ffe_config_enabled) || + (phy->min_cable_length >= 50)) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + usec_delay(1000); + ret_val = e1000_read_phy_reg(hw, + PHY_1000T_STATUS, + &phy_data); + if (ret_val) + goto out; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + dev_spec->ffe_config = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + goto out; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } else { + if (dev_spec->dsp_config == e1000_dsp_config_activated) { + /* + * Save off the current value of register 0x2F5B + * to be restored at the end of the routines. + */ + ret_val = e1000_read_phy_reg(hw, + 0x2F5B, + &phy_saved_data); + if (ret_val) + goto out; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + if (ret_val) + goto out; + + msec_delay_irq(20); + + ret_val = e1000_write_phy_reg(hw, + 0x0000, + IGP01E1000_IEEE_FORCE_GIG); + if (ret_val) + goto out; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, + dsp_reg_array[i], + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = e1000_write_phy_reg(hw, + dsp_reg_array[i], + phy_data); + if (ret_val) + goto out; + } + + ret_val = e1000_write_phy_reg(hw, + 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + goto out; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = e1000_write_phy_reg(hw, + 0x2F5B, + phy_saved_data); + if (ret_val) + goto out; + + dev_spec->dsp_config = e1000_dsp_config_enabled; + } + + if (dev_spec->ffe_config != e1000_ffe_config_active) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * Save off the current value of register 0x2F5B + * to be restored at the end of the routines. + */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + if (ret_val) + goto out; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + if (ret_val) + goto out; + + msec_delay_irq(20); + + ret_val = e1000_write_phy_reg(hw, + 0x0000, + IGP01E1000_IEEE_FORCE_GIG); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, + 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + goto out; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + goto out; + + dev_spec->ffe_config = e1000_ffe_config_enabled; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which reperesent the + * cobination of course and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. This is a function pointer entry point called by the + * api module. + **/ +static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 i, data; + u16 cur_agc_value, agc_value = 0; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = + {IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D}; + + DEBUGFUNC("e1000_get_cable_length_igp_82541"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &data); + if (ret_val) + goto out; + + cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Bounds checking */ + if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || + (cur_agc_value == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + agc_value += cur_agc_value; + + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) { + agc_value -= min_agc_value; + /* Average the three remaining channels for the length. */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Average the channels for the length. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] > + IGP01E1000_AGC_RANGE) + ? (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) + : 0; + phy->max_cable_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by the + * api module. + **/ +static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82541"); + + switch (hw->mac.type) { + case e1000_82541_rev_2: + case e1000_82547_rev_2: + break; + default: + ret_val = e1000_set_d3_lplu_state_generic(hw, active); + goto out; + break; + } + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, data); + if (ret_val) + goto out; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * e1000_setup_led_82541 - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_setup_led_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec; + s32 ret_val; + + DEBUGFUNC("e1000_setup_led_82541"); + + dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec; + + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_GMII_FIFO, + &dev_spec->spd_default); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_GMII_FIFO, + (u16)(dev_spec->spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + +out: + return ret_val; +} + +/** + * e1000_cleanup_led_82541 - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. This is a function pointer + * entry point called by the api module. + **/ +static s32 e1000_cleanup_led_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec; + s32 ret_val; + + DEBUGFUNC("e1000_cleanup_led_82541"); + + dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_GMII_FIFO, + dev_spec->spd_default); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + +out: + return ret_val; +} + +/** + * e1000_phy_init_script_82541 - Initialize GbE PHY + * @hw: pointer to the HW structure + * + * Initializes the IGP PHY. + **/ +static s32 e1000_phy_init_script_82541(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82541 *dev_spec; + u32 ret_val; + u16 phy_saved_data; + + DEBUGFUNC("e1000_phy_init_script_82541"); + + dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec; + + if (!dev_spec->phy_init_script) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Delay after phy reset to enable NVM configuration to load */ + msec_delay(20); + + /* + * Save off the current value of register 0x2F5B to be restored at + * the end of this routine. + */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + msec_delay(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + + msec_delay(5); + + switch (hw->mac.type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + + msec_delay(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac.type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + +out: + return ret_val; +} + +/** + * e1000_init_script_state_82541 - Enable/Disable PHY init script + * @hw: pointer to the HW structure + * @state: boolean value used to enable/disable PHY init script + * + * Allows the driver to enable/disable the PHY init script, if the PHY is an + * IGP PHY. This is a function pointer entry point called by the api module. + **/ +void e1000_init_script_state_82541(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82541 *dev_spec; + + DEBUGFUNC("e1000_init_script_state_82541"); + + if (hw->phy.type != e1000_phy_igp) { + DEBUGOUT("Initialization script not necessary.\n"); + goto out; + } + + dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + goto out; + } + + dev_spec->phy_init_script = state; + +out: + return; +} + +/** + * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_82541"); + + e1000_clear_hw_cntrs_base_generic(hw); + + temp = E1000_READ_REG(hw, E1000_PRC64); + temp = E1000_READ_REG(hw, E1000_PRC127); + temp = E1000_READ_REG(hw, E1000_PRC255); + temp = E1000_READ_REG(hw, E1000_PRC511); + temp = E1000_READ_REG(hw, E1000_PRC1023); + temp = E1000_READ_REG(hw, E1000_PRC1522); + temp = E1000_READ_REG(hw, E1000_PTC64); + temp = E1000_READ_REG(hw, E1000_PTC127); + temp = E1000_READ_REG(hw, E1000_PTC255); + temp = E1000_READ_REG(hw, E1000_PTC511); + temp = E1000_READ_REG(hw, E1000_PTC1023); + temp = E1000_READ_REG(hw, E1000_PTC1522); + + temp = E1000_READ_REG(hw, E1000_ALGNERRC); + temp = E1000_READ_REG(hw, E1000_RXERRC); + temp = E1000_READ_REG(hw, E1000_TNCRS); + temp = E1000_READ_REG(hw, E1000_CEXTERR); + temp = E1000_READ_REG(hw, E1000_TSCTC); + temp = E1000_READ_REG(hw, E1000_TSCTFC); + + temp = E1000_READ_REG(hw, E1000_MGTPRC); + temp = E1000_READ_REG(hw, E1000_MGTPDC); + temp = E1000_READ_REG(hw, E1000_MGTPTC); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h new file mode 100644 index 0000000..8588606 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h @@ -0,0 +1,84 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_82541_H_ +#define _E1000_82541_H_ + +#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1) + +#define IGP01E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_DSP_RESET 0x1F33 + +#define IGP01E1000_PHY_DSP_FFE 0x1F35 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A + +#define IGP01E1000_IEEE_FORCE_GIG 0x0140 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 +#define IGP01E1000_AGC_RANGE 10 + +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c new file mode 100644 index 0000000..55fd6d6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c @@ -0,0 +1,543 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_82542 (rev 1 & 2) + */ + +#include "e1000_api.h" + +static s32 e1000_init_phy_params_82542(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82542(struct e1000_hw *hw); +static s32 e1000_get_bus_info_82542(struct e1000_hw *hw); +static s32 e1000_reset_hw_82542(struct e1000_hw *hw); +static s32 e1000_init_hw_82542(struct e1000_hw *hw); +static s32 e1000_setup_link_82542(struct e1000_hw *hw); +static s32 e1000_led_on_82542(struct e1000_hw *hw); +static s32 e1000_led_off_82542(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw); + +struct e1000_dev_spec_82542 { + bool dma_fairness; +}; + +/** + * e1000_init_phy_params_82542 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82542(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82542"); + + phy->type = e1000_phy_none; + + return ret_val; +} + +/** + * e1000_init_nvm_params_82542 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + + DEBUGFUNC("e1000_init_nvm_params_82542"); + + nvm->address_bits = 6; + nvm->delay_usec = 50; + nvm->opcode_bits = 3; + nvm->type = e1000_nvm_eeprom_microwire; + nvm->word_size = 64; + + /* Function Pointers */ + func->read_nvm = e1000_read_nvm_microwire; + func->release_nvm = e1000_stop_nvm; + func->write_nvm = e1000_write_nvm_microwire; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82542 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_82542"); + + /* Set media type */ + hw->phy.media_type = e1000_media_type_fiber; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_82542; + /* reset */ + func->reset_hw = e1000_reset_hw_82542; + /* hw initialization */ + func->init_hw = e1000_init_hw_82542; + /* link setup */ + func->setup_link = e1000_setup_link_82542; + /* phy/fiber/serdes setup */ + func->setup_physical_interface = e1000_setup_fiber_serdes_link_generic; + /* check for link */ + func->check_for_link = e1000_check_for_fiber_link_generic; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* turn on/off LED */ + func->led_on = e1000_led_on_82542; + func->led_off = e1000_led_off_82542; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_82542; + /* link info */ + func->get_link_up_info = e1000_get_speed_and_duplex_fiber_serdes_generic; + + hw->dev_spec_size = sizeof(struct e1000_dev_spec_82542); + + /* Device-specific structure allocation */ + ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); + + return ret_val; +} + +/** + * e1000_init_function_pointers_82542 - Init func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + **/ +void e1000_init_function_pointers_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82542"); + + hw->func.init_mac_params = e1000_init_mac_params_82542; + hw->func.init_nvm_params = e1000_init_nvm_params_82542; + hw->func.init_phy_params = e1000_init_phy_params_82542; +} + +/** + * e1000_get_bus_info_82542 - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adaper is attached and stores it in the hw structure. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_get_bus_info_82542(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_bus_info_82542"); + + hw->bus.type = e1000_bus_type_pci; + hw->bus.speed = e1000_bus_speed_unknown; + hw->bus.width = e1000_bus_width_unknown; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_82542 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82542(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val = E1000_SUCCESS; + u32 ctrl, icr; + + DEBUGFUNC("e1000_reset_hw_82542"); + + if (hw->revision_id == E1000_REVISION_2) { + DEBUGOUT("Disabling MWI on 82542 rev 2\n"); + e1000_pci_clear_mwi(hw); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + /* + * Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + e1000_reload_nvm(hw); + msec_delay(2); + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + icr = E1000_READ_REG(hw, E1000_ICR); + + if (hw->revision_id == E1000_REVISION_2) { + if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return ret_val; +} + +/** + * e1000_init_hw_82542 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_init_hw_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82542 *dev_spec; + s32 ret_val = E1000_SUCCESS; + u32 ctrl; + u16 i; + + DEBUGFUNC("e1000_init_hw_82542"); + + dev_spec = (struct e1000_dev_spec_82542 *)hw->dev_spec; + + /* Disabling VLAN filtering */ + E1000_WRITE_REG(hw, E1000_VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->revision_id == E1000_REVISION_2) { + DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(hw); + msec_delay(5); + } + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->revision_id == E1000_REVISION_2) { + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* + * Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. + */ + if (dev_spec->dma_fairness) { + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); + } + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82542(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82542(hw); + + return ret_val; +} + +/** + * e1000_setup_link_82542 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_setup_link_82542(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_link_82542"); + + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + goto out; + + hw->fc.type &= ~e1000_fc_tx_pause; + + if (mac->report_tx_early == 1) + hw->fc.type &= ~e1000_fc_rx_pause; + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.original_type = hw->fc.type; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type); + + /* Call the necessary subroutine to configure the link. */ + ret_val = func->setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing Flow Control address, type and timer regs\n"); + + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_led_on_82542 - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by the api module. + **/ +static s32 e1000_led_on_82542(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_on_82542"); + + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_82542 - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by the api module. + **/ +static s32 e1000_led_off_82542(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_off_82542"); + + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_translate_register_82542 - Translate the proper regiser offset + * @reg: e1000 register to be read + * + * Registers in 82542 are located in different offsets than other adapters + * even though they function in the same manner. This function takes in + * the name of the register to read and returns the correct offset for + * 82542 silicon. + **/ +u32 e1000_translate_register_82542(u32 reg) +{ + /* + * Some of the 82542 registers are located at different + * offsets than they are in newer adapters. + * Despite the difference in location, the registers + * function in the same manner. + */ + switch (reg) { + case E1000_RA: + reg = 0x00040; + break; + case E1000_RDTR: + reg = 0x00108; + break; + case E1000_RDBAL(0): + reg = 0x00110; + break; + case E1000_RDBAH(0): + reg = 0x00114; + break; + case E1000_RDLEN(0): + reg = 0x00118; + break; + case E1000_RDH(0): + reg = 0x00120; + break; + case E1000_RDT(0): + reg = 0x00128; + break; + case E1000_RDBAL(1): + reg = 0x00138; + break; + case E1000_RDBAH(1): + reg = 0x0013C; + break; + case E1000_RDLEN(1): + reg = 0x00140; + break; + case E1000_RDH(1): + reg = 0x00148; + break; + case E1000_RDT(1): + reg = 0x00150; + break; + case E1000_FCRTH: + reg = 0x00160; + break; + case E1000_FCRTL: + reg = 0x00168; + break; + case E1000_MTA: + reg = 0x00200; + break; + case E1000_TDBAL(0): + reg = 0x00420; + break; + case E1000_TDBAH(0): + reg = 0x00424; + break; + case E1000_TDLEN(0): + reg = 0x00428; + break; + case E1000_TDH(0): + reg = 0x00430; + break; + case E1000_TDT(0): + reg = 0x00438; + break; + case E1000_TIDV: + reg = 0x00440; + break; + case E1000_VFTA: + reg = 0x00600; + break; + case E1000_TDFH: + reg = 0x08010; + break; + case E1000_TDFT: + reg = 0x08018; + break; + default: + break; + } + + return reg; +} + +/** + * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_82542"); + + e1000_clear_hw_cntrs_base_generic(hw); + + temp = E1000_READ_REG(hw, E1000_PRC64); + temp = E1000_READ_REG(hw, E1000_PRC127); + temp = E1000_READ_REG(hw, E1000_PRC255); + temp = E1000_READ_REG(hw, E1000_PRC511); + temp = E1000_READ_REG(hw, E1000_PRC1023); + temp = E1000_READ_REG(hw, E1000_PRC1522); + temp = E1000_READ_REG(hw, E1000_PTC64); + temp = E1000_READ_REG(hw, E1000_PTC127); + temp = E1000_READ_REG(hw, E1000_PTC255); + temp = E1000_READ_REG(hw, E1000_PTC511); + temp = E1000_READ_REG(hw, E1000_PTC1023); + temp = E1000_READ_REG(hw, E1000_PTC1522); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c new file mode 100644 index 0000000..5ff9a58 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c @@ -0,0 +1,1654 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_82543 + * e1000_82544 + */ + +#include "e1000_api.h" +#include "e1000_82543.h" + +static s32 e1000_init_phy_params_82543(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82543(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, + u16 data); +static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw); +static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw); +static s32 e1000_reset_hw_82543(struct e1000_hw *hw); +static s32 e1000_init_hw_82543(struct e1000_hw *hw); +static s32 e1000_setup_link_82543(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw); +static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw); +static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw); +static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw); +static s32 e1000_led_on_82543(struct e1000_hw *hw); +static s32 e1000_led_off_82543(struct e1000_hw *hw); +static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, + u32 value); +static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value); +static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw); +static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw); +static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); +static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw); +static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); +static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw); +static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, + u16 count); +static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw); +static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state); + +struct e1000_dev_spec_82543 { + u32 tbi_compatibility; + bool dma_fairness; + bool init_phy_disabled; +}; + +/** + * e1000_init_phy_params_82543 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82543(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82543"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } else { + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 10000; + phy->type = e1000_phy_m88; + + /* Function Pointers */ + func->check_polarity = e1000_check_polarity_m88; + func->commit_phy = e1000_phy_sw_reset_generic; + func->force_speed_duplex = e1000_phy_force_speed_duplex_82543; + func->get_cable_length = e1000_get_cable_length_m88; + func->get_cfg_done = e1000_get_cfg_done_generic; + func->read_phy_reg = (hw->mac.type == e1000_82543) + ? e1000_read_phy_reg_82543 + : e1000_read_phy_reg_m88; + func->reset_phy = (hw->mac.type == e1000_82543) + ? e1000_phy_hw_reset_82543 + : e1000_phy_hw_reset_generic; + func->write_phy_reg = (hw->mac.type == e1000_82543) + ? e1000_write_phy_reg_82543 + : e1000_write_phy_reg_m88; + func->get_phy_info = e1000_get_phy_info_m88; + + /* + * The external PHY of the 82543 can be in a funky state. + * Resetting helps us read the PHY registers for acquiring + * the PHY ID. + */ + if (!e1000_init_phy_disabled_82543(hw)) { + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + DEBUGOUT("Resetting PHY during init failed.\n"); + goto out; + } + msec_delay(20); + } + + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82543: + if (phy->id != M88E1000_E_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + case e1000_82544: + if (phy->id != M88E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82543 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + + DEBUGFUNC("e1000_init_nvm_params_82543"); + + nvm->type = e1000_nvm_eeprom_microwire; + nvm->word_size = 64; + nvm->delay_usec = 50; + nvm->address_bits = 6; + nvm->opcode_bits = 3; + + /* Function Pointers */ + func->read_nvm = e1000_read_nvm_microwire; + func->update_nvm = e1000_update_nvm_checksum_generic; + func->valid_led_default = e1000_valid_led_default_generic; + func->validate_nvm = e1000_validate_nvm_checksum_generic; + func->write_nvm = e1000_write_nvm_microwire; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82543 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val; + + DEBUGFUNC("e1000_init_mac_params_82543"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82544EI_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_pci_generic; + /* reset */ + func->reset_hw = e1000_reset_hw_82543; + /* hw initialization */ + func->init_hw = e1000_init_hw_82543; + /* link setup */ + func->setup_link = e1000_setup_link_82543; + /* physical interface setup */ + func->setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82543 + : e1000_setup_fiber_link_82543; + /* check for link */ + func->check_for_link = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_check_for_copper_link_82543 + : e1000_check_for_fiber_link_82543; + /* link info */ + func->get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_82543; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_82543; + /* turn on/off LED */ + func->led_on = e1000_led_on_82543; + func->led_off = e1000_led_off_82543; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_82543; + + hw->dev_spec_size = sizeof(struct e1000_dev_spec_82543); + + /* Device-specific structure allocation */ + ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); + if (ret_val) + goto out; + + /* Set tbi compatibility */ + if ((hw->mac.type != e1000_82543) || + (hw->phy.media_type == e1000_media_type_fiber)) + e1000_set_tbi_compatibility_82543(hw, FALSE); + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_82543 - Init func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + **/ +void e1000_init_function_pointers_82543(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82543"); + + hw->func.init_mac_params = e1000_init_mac_params_82543; + hw->func.init_nvm_params = e1000_init_nvm_params_82543; + hw->func.init_phy_params = e1000_init_phy_params_82543; +} + +/** + * e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status + * @hw: pointer to the HW structure + * + * Returns the curent status of 10-bit Interface (TBI) compatibility + * (enabled/disabled). + **/ +static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec; + bool state = FALSE; + + DEBUGFUNC("e1000_tbi_compatibility_enabled_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + goto out; + } + + state = (dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED) + ? TRUE : FALSE; + +out: + return state; +} + +/** + * e1000_set_tbi_compatibility_82543 - Set TBI compatibility + * @hw: pointer to the HW structure + * @state: enable/disable TBI compatibility + * + * Enables or disabled 10-bit Interface (TBI) compatibility. + **/ +void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82543 *dev_spec; + + DEBUGFUNC("e1000_set_tbi_compatibility_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + goto out; + } + + if (state) + dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED; + else + dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED; + +out: + return; +} + +/** + * e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status + * @hw: pointer to the HW structure + * + * Returns the curent status of 10-bit Interface (TBI) store bad packet (SBP) + * (enabled/disabled). + **/ +bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec; + bool state = FALSE; + + DEBUGFUNC("e1000_tbi_sbp_enabled_82543"); + + if (hw->mac.type != e1000_82543) { + DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); + goto out; + } + + dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + goto out; + } + + state = (dev_spec->tbi_compatibility & TBI_SBP_ENABLED) + ? TRUE : FALSE; + +out: + return state; +} + +/** + * e1000_set_tbi_sbp_82543 - Set TBI SBP + * @hw: pointer to the HW structure + * @state: enable/disable TBI store bad packet + * + * Enables or disabled 10-bit Interface (TBI) store bad packet (SBP). + **/ +static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82543 *dev_spec; + + DEBUGFUNC("e1000_set_tbi_sbp_82543"); + + dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec; + + if (state && e1000_tbi_compatibility_enabled_82543(hw)) + dev_spec->tbi_compatibility |= TBI_SBP_ENABLED; + else + dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED; + + return; +} + +/** + * e1000_init_phy_disabled_82543 - Returns init PHY status + * @hw: pointer to the HW structure + * + * Returns the current status of whether PHY initialization is disabled. + * True if PHY initialization is disabled else false. + **/ +static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82543 *dev_spec; + bool ret_val; + + DEBUGFUNC("e1000_init_phy_disabled_82543"); + + if (hw->mac.type != e1000_82543) { + ret_val = FALSE; + goto out; + } + + dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + ret_val = FALSE; + goto out; + } + + ret_val = dev_spec->init_phy_disabled; + +out: + return ret_val; +} + +/** + * e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled + * @hw: pointer to the HW structure + * @stats: Struct containing statistic register values + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * @max_frame_size: The maximum frame size + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + **/ +void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, + struct e1000_hw_stats *stats, u32 frame_len, + u8 *mac_addr, u32 max_frame_size) +{ + if (!(e1000_tbi_sbp_enabled_82543(hw))) + goto out; + + /* First adjust the frame length. */ + frame_len--; + /* + * We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + stats->gorc += frame_len; + + /* + * Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff)) + /* Broadcast packet */ + stats->bprc++; + else if (*mac_addr & 0x01) + /* Multicast packet */ + stats->mprc++; + + /* + * In this case, the hardware has overcounted the number of + * oversize frames. + */ + if ((frame_len == max_frame_size) && (stats->roc > 0)) + stats->roc--; + + /* + * Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } + +out: + return; +} + +/** + * e1000_read_phy_reg_82543 - Read PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY at offset and stores the information read to data. + **/ +static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 mdic; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_82543"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * We must first send a preamble through the MDIO pin to signal the + * beginning of an MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* + * Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The format + * of an MII read instruction consists of a shift out of 14 bits and + * is defined as follows: + * <Preamble><SOF><Op Code><Phy Addr><Offset> + * followed by a shift in of 18 bits. This first two bits shifted in + * are TurnAround bits used to avoid contention on the MDIO pin when a + * READ operation is performed. These two bits are thrown away + * followed by a shift in of 16 bits which contains the desired data. + */ + mdic = (offset | (hw->phy.addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits_82543(hw, mdic, 14); + + /* + * Now that we've shifted out the read command to the MII, we need to + * "shift in" the 16-bit value (18 total bits) of the requested PHY + * register address. + */ + *data = e1000_shift_in_mdi_bits_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82543 - Write PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be written + * @data: pointer to the data to be written at offset + * + * Writes data to the PHY at offset. + **/ +static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 mdic; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_82543"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * We'll need to use the SW defined pins to shift the write command + * out to the PHY. We first send a preamble to the PHY to signal the + * beginning of the MII instruction. This is done by sending 32 + * consecutive "1" bits. + */ + e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* + * Now combine the remaining required fields that will indicate a + * write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the command. The + * format of a MII write instruction is as follows: + * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>. + */ + mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32) data; + + e1000_shift_out_mdi_bits_82543(hw, mdic, 32); + +out: + return ret_val; +} + +/** + * e1000_raise_mdi_clk_82543 - Raise Management Data Input clock + * @hw: pointer to the HW structure + * @ctrl: pointer to the control register + * + * Raise the management data input clock by setting the MDC bit in the control + * register. + **/ +static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) +{ + /* + * Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay a sufficient amount of time. + */ + E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/** + * e1000_lower_mdi_clk_82543 - Lower Management Data Input clock + * @hw: pointer to the HW structure + * @ctrl: pointer to the control register + * + * Lower the management data input clock by clearing the MDC bit in the + * control register. + **/ +static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) +{ + /* + * Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay a sufficient amount of time. + */ + E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(hw); + usec_delay(10); +} + +/** + * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY + * @hw: pointer to the HW structure + * @data: data to send to the PHY + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the PHY. So, the value in the + * "data" parameter will be shifted out to the PHY one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, + u16 count) +{ + u32 ctrl, mask; + + /* + * We need to shift "count" number of bits out to the PHY. So, the + * value in the "data" parameter will be shifted out to the PHY one + * bit at a time. In order to do this, "data" must be broken down + * into bits. + */ + mask = 0x01; + mask <<= (count -1); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* + * A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) ctrl |= E1000_CTRL_MDIO; + else ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(10); + + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + mask >>= 1; + } +} + +/** + * e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY + * @hw: pointer to the HW structure + * + * In order to read a register from the PHY, we need to shift 18 bits + * in from the PHY. Bits are "shifted in" by raising the clock input to + * the PHY (setting the MDC bit), and then reading the value of the data out + * MDIO bit. + **/ +static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* + * In order to read a register from the PHY, we need to shift in a + * total of 18 bits from the PHY. The first two bit (turnaround) + * times are used to avoid contention on the MDIO pin when a read + * operation is performed. These two bits are ignored by us and + * thrown away. Bits are "shifted in" by raising the input to the + * Management Data Clock (setting the MDC bit) and then reading the + * value of the MDIO bit. + */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + /* + * Raise and lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked + * out the last bit of the Register Address. + */ + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data <<= 1; + e1000_raise_mdi_clk_82543(hw, &ctrl); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk_82543(hw, &ctrl); + } + + e1000_raise_mdi_clk_82543(hw, &ctrl); + e1000_lower_mdi_clk_82543(hw, &ctrl); + + return data; +} + +/** + * e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY + * @hw: pointer to the HW structure + * + * Calls the function to force speed and duplex for the m88 PHY, and + * if the PHY is not auto-negotiating and the speed is forced to 10Mbit, + * then call the function for polarity reversal workaround. + **/ +static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82543"); + + ret_val = e1000_phy_force_speed_duplex_m88(hw); + if (ret_val) + goto out; + + if (!hw->mac.autoneg && + (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)) + ret_val = e1000_polarity_reversal_workaround_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal + * @hw: pointer to the HW structure + * + * When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity + * inadvertantly. To workaround the issue, we disable the transmitter on + * the PHY until we have established the link partner's link parameters. + **/ +static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + bool link; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + goto out; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + goto out; + + /* + * This loop will early-out if the NO link condition has been met. + * In other words, DO NOT use e1000_phy_has_link_generic() here. + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* + * Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msec_delay_irq(100); + } + + /* Recommended delay time after link has been lost */ + msec_delay_irq(1000); + + /* Now we will re-enable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + goto out; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + goto out; + + /* + * Read the MII Status Register and wait for Link Status bit + * to be set. + */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link); + if (ret_val) + goto out; + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_82543 - PHY hardware reset + * @hw: pointer to the HW structure + * + * Sets the PHY_RESET_DIR bit in the extended device control register + * to put the PHY into a reset and waits for completion. Once the reset + * has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out + * of reset. This is a function pointer entry point called by the api module. + **/ +static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw) +{ + struct e1000_functions *func = &hw->func; + u32 ctrl_ext; + s32 ret_val; + + DEBUGFUNC("e1000_phy_hw_reset_82543"); + + /* + * Read the Extended Device Control Register, assert the PHY_RESET_DIR + * bit to put the PHY into reset... + */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* ...then take it out of reset. */ + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + ret_val = func->get_cfg_done(hw); + + return ret_val; +} + +/** + * e1000_reset_hw_82543 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82543(struct e1000_hw *hw) +{ + u32 ctrl, icr; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_reset_hw_82543"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + e1000_set_tbi_sbp_82543(hw, FALSE); + + /* + * Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n"); + if (hw->mac.type == e1000_82543) { + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + } else { + /* + * The 82544 can't ACK the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround. + */ + E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + } + + /* + * After MAC reset, force reload of NVM to restore power-on + * settings to device. + */ + e1000_reload_nvm(hw); + msec_delay(2); + + /* Masking off and clearing any pending interrupts */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + icr = E1000_READ_REG(hw, E1000_ICR); + + return ret_val; +} + +/** + * e1000_init_hw_82543 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82543 *dev_spec; + u32 ctrl; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_82543"); + + dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Disabling VLAN filtering */ + E1000_WRITE_REG(hw, E1000_VET, 0); + e1000_clear_vfta(hw); + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + /* + * Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. + */ + if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) { + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); + } + + e1000_pcix_mmrbc_workaround_generic(hw); + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82543(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_link_82543 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Read the EEPROM to determine the initial polarity value and write the + * extended device control register with the information before calling + * the generic setup link function, which does the following: + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82543(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_setup_link_82543"); + + /* + * Take the 4 bits from NVM word 0xF that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before phy setup. + */ + if (hw->mac.type == e1000_82543) { + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) << + NVM_SWDPIO_EXT_SHIFT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + } + + ret_val = e1000_setup_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_82543 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU; + /* + * With 82543, we need to force speed and duplex on the MAC + * equal to what the PHY speed and duplex configuration is. + * In addition, we need to perform a hardware reset on the + * PHY to take it out of reset. + */ + if (hw->mac.type == e1000_82543) { + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + goto out; + hw->phy.reset_disable = FALSE; + } else { + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + + /* Set MDI/MDI-X, Polarity Reversal, and downshift settings */ + ret_val = e1000_copper_link_setup_m88(hw); + if (ret_val) + goto out; + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex_82543(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + /* Config the MAC and PHY after link is up */ + if (hw->mac.type == e1000_82544) { + e1000_config_collision_dist_generic(hw); + } else { + ret_val = e1000_config_mac_to_phy_82543(hw); + if (ret_val) + goto out; + } + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_link_82543 - Setup link for fiber + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber links. Upon + * successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000_config_collision_dist_generic(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + goto out; + + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW defineable pin 1 is cleared when the + * optics detect a signal. If we have a signal, then poll for a + * "Link-Up" indication. + */ + if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + +out: + return ret_val; +} + +/** + * e1000_check_for_copper_link_82543 - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks the phy for link, if link exists, do the following: + * - check for downshift + * - do polarity workaround (if necessary) + * - configure collision distance + * - configure flow control after link up + * - configure tbi compatibility + **/ +static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 icr, rctl; + s32 ret_val; + u16 speed, duplex; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link_82543"); + + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = FALSE; + + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we can return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + /* + * If speed and duplex are forced to 10H or 10F, then we will + * implement the polarity reversal workaround. We disable + * interrupts first, and upon returning, place the devices + * interrupt state to its previous value except for the link + * status change interrupt which will happened due to the + * execution of this workaround. + */ + if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) { + E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); + ret_val = e1000_polarity_reversal_workaround_82543(hw); + icr = E1000_READ_REG(hw, E1000_ICR); + E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC)); + E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK); + } + + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if (mac->type == e1000_82544) + e1000_config_collision_dist_generic(hw); + else { + ret_val = e1000_config_mac_to_phy_82543(hw); + if (ret_val) { + DEBUGOUT("Error configuring MAC to PHY settings\n"); + goto out; + } + } + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + } + + /* + * At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the link + * partner capability register. We use the link speed to determine if + * TBI compatibility needs to be turned on or off. If the link is not + * at gigabit speed, then TBI compatibility is not needed. If we are + * at gigabit speed, we turn on TBI compatibility. + */ + if (e1000_tbi_compatibility_enabled_82543(hw)) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* + * If link speed is not set to gigabit speed, + * we do not need to enable TBI compatibility. + */ + if (e1000_tbi_sbp_enabled_82543(hw)) { + /* + * If we previously were in the mode, + * turn it off. + */ + e1000_set_tbi_sbp_82543(hw, FALSE); + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~E1000_RCTL_SBP; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + } else { + /* + * If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!e1000_tbi_sbp_enabled_82543(hw)) { + e1000_set_tbi_sbp_82543(hw, TRUE); + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_SBP; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + } + } + } +out: + return ret_val; +} + +/** + * e1000_check_for_fiber_link_82543 - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw, ctrl, status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_fiber_link_82543"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */ + if ((!(ctrl & E1000_CTRL_SWDPIN1)) && + (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + ret_val = 0; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } + +out: + return ret_val; +} + +/** + * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings + * @hw: pointer to the HW structure + * + * For the 82543 silicon, we need to set the MAC to match the settings + * of the PHY, even if the PHY is auto-negotiating. + **/ +static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_config_mac_to_phy_82543"); + + /* Set the bits to force speed and duplex */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + /* + * Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + ctrl &= ~E1000_CTRL_FD; + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + + e1000_config_collision_dist_generic(hw); + + /* + * Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * e1000_write_vfta_82543 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. + **/ +static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + DEBUGFUNC("e1000_write_vfta_82543"); + + if ((hw->mac.type == e1000_82544) && (offset & 1)) { + temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp); + E1000_WRITE_FLUSH(hw); + } else { + e1000_write_vfta_generic(hw, offset, value); + } +} + +/** + * e1000_mta_set_82543 - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta, temp; + + DEBUGFUNC("e1000_mta_set_82543"); + + hash_reg = (hash_value >> 5); + + /* + * If we are on an 82544 and we are trying to write an odd offset + * in the MTA, save off the previous entry before writing and + * restore the old value after writing. + */ + if ((hw->mac.type == e1000_82544) && (hash_reg & 1)) { + hash_reg &= (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + mta |= (1 << hash_bit); + temp = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg - 1); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg - 1, temp); + E1000_WRITE_FLUSH(hw); + } else { + e1000_mta_set_generic(hw, hash_value); + } +} + +/** + * e1000_led_on_82543 - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by the api module. + **/ +static s32 e1000_led_on_82543(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_on_82543"); + + if (hw->mac.type == e1000_82544 && + hw->phy.media_type == e1000_media_type_copper) { + /* Clear SW-defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Fiber 82544 and all 82543 use this method */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_82543 - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by the api module. + **/ +static s32 e1000_led_off_82543(struct e1000_hw *hw) +{ + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGFUNC("e1000_led_off_82543"); + + if (hw->mac.type == e1000_82544 && + hw->phy.media_type == e1000_media_type_copper) { + /* Set SW-defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_82543"); + + e1000_clear_hw_cntrs_base_generic(hw); + + temp = E1000_READ_REG(hw, E1000_PRC64); + temp = E1000_READ_REG(hw, E1000_PRC127); + temp = E1000_READ_REG(hw, E1000_PRC255); + temp = E1000_READ_REG(hw, E1000_PRC511); + temp = E1000_READ_REG(hw, E1000_PRC1023); + temp = E1000_READ_REG(hw, E1000_PRC1522); + temp = E1000_READ_REG(hw, E1000_PTC64); + temp = E1000_READ_REG(hw, E1000_PTC127); + temp = E1000_READ_REG(hw, E1000_PTC255); + temp = E1000_READ_REG(hw, E1000_PTC511); + temp = E1000_READ_REG(hw, E1000_PTC1023); + temp = E1000_READ_REG(hw, E1000_PTC1522); + + temp = E1000_READ_REG(hw, E1000_ALGNERRC); + temp = E1000_READ_REG(hw, E1000_RXERRC); + temp = E1000_READ_REG(hw, E1000_TNCRS); + temp = E1000_READ_REG(hw, E1000_CEXTERR); + temp = E1000_READ_REG(hw, E1000_TSCTC); + temp = E1000_READ_REG(hw, E1000_TSCTFC); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h new file mode 100644 index 0000000..6e6fe82 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h @@ -0,0 +1,44 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_82543_H_ +#define _E1000_82543_H_ + +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_PREAMBLE_SIZE 32 +#define PHY_SOF 0x1 +#define PHY_OP_READ 0x2 +#define PHY_OP_WRITE 0x1 +#define PHY_TURNAROUND 0x2 + +#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */ +/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */ +#define TBI_SBP_ENABLED 0x2 + + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c new file mode 100644 index 0000000..af32a34 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c @@ -0,0 +1,1430 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_82571 + * e1000_82572 + * e1000_82573 + * e1000_82574 + */ + +#include "e1000_api.h" +#include "e1000_82571.h" + +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82571(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw); +static void e1000_release_nvm_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw); +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, + bool active); +static s32 e1000_reset_hw_82571(struct e1000_hw *hw); +static s32 e1000_init_hw_82571(struct e1000_hw *hw); +static void e1000_clear_vfta_82571(struct e1000_hw *hw); +static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +static s32 e1000_setup_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); + +struct e1000_dev_spec_82571 { + bool laa_is_present; +}; + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_phy_params_82571"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + func->acquire_phy = e1000_get_hw_semaphore_82571; + func->check_polarity = e1000_check_polarity_igp; + func->check_reset_block = e1000_check_reset_block_generic; + func->release_phy = e1000_put_hw_semaphore_82571; + func->reset_phy = e1000_phy_hw_reset_generic; + func->set_d0_lplu_state = e1000_set_d0_lplu_state_82571; + func->set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + func->get_cfg_done = e1000_get_cfg_done_82571; + func->get_phy_info = e1000_get_phy_info_igp; + func->force_speed_duplex = e1000_phy_force_speed_duplex_igp; + func->get_cable_length = e1000_get_cable_length_igp_2; + func->read_phy_reg = e1000_read_phy_reg_igp; + func->write_phy_reg = e1000_write_phy_reg_igp; + + /* This uses above function pointers */ + ret_val = e1000_get_phy_id_82571(hw); + + /* Verify PHY ID */ + if (phy->id != IGP01E1000_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + goto out; + } + break; + case e1000_82573: + phy->type = e1000_phy_m88; + func->get_cfg_done = e1000_get_cfg_done_generic; + func->get_phy_info = e1000_get_phy_info_m88; + func->commit_phy = e1000_phy_sw_reset_generic; + func->force_speed_duplex = e1000_phy_force_speed_duplex_m88; + func->get_cable_length = e1000_get_cable_length_m88; + func->read_phy_reg = e1000_read_phy_reg_m88; + func->write_phy_reg = e1000_write_phy_reg_m88; + + /* This uses above function pointers */ + ret_val = e1000_get_phy_id_82571(hw); + + /* Verify PHY ID */ + if (phy->id != M88E1111_I_PHY_ID) { + ret_val = -E1000_ERR_PHY; + DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id); + goto out; + } + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82571"); + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* + * Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + break; + } + fallthrough; + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_nvm_82571; + func->read_nvm = (hw->mac.type == e1000_82573) + ? e1000_read_nvm_eerd + : e1000_read_nvm_spi; + func->release_nvm = e1000_release_nvm_82571; + func->update_nvm = e1000_update_nvm_checksum_82571; + func->validate_nvm = e1000_validate_nvm_checksum_82571; + func->valid_led_default = e1000_valid_led_default_82571; + func->write_nvm = e1000_write_nvm_82571; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_82571"); + + /* Set media type */ + switch (hw->device_id) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = TRUE; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? TRUE : FALSE; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + func->reset_hw = e1000_reset_hw_82571; + /* hw initialization */ + func->init_hw = e1000_init_hw_82571; + /* link setup */ + func->setup_link = e1000_setup_link_82571; + /* physical interface link setup */ + func->setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82571 + : e1000_setup_fiber_serdes_link_82571; + /* check for link */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + func->check_for_link = e1000_check_for_copper_link_generic; + break; + case e1000_media_type_fiber: + func->check_for_link = e1000_check_for_fiber_link_generic; + break; + case e1000_media_type_internal_serdes: + func->check_for_link = e1000_check_for_serdes_link_generic; + break; + default: + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + /* check management mode */ + func->check_mng_mode = e1000_check_mng_mode_generic; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_82571; + /* writing VFTA */ + func->write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + func->clear_vfta = e1000_clear_vfta_82571; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* read mac address */ + func->read_mac_addr = e1000_read_mac_addr_82571; + /* blink LED */ + func->blink_led = e1000_blink_led_generic; + /* setup LED */ + func->setup_led = e1000_setup_led_generic; + /* cleanup LED */ + func->cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + func->led_on = e1000_led_on_generic; + func->led_off = e1000_led_off_generic; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_82571; + /* link info */ + func->get_link_up_info = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_get_speed_and_duplex_copper_generic + : e1000_get_speed_and_duplex_fiber_serdes_generic; + + hw->dev_spec_size = sizeof(struct e1000_dev_spec_82571); + + /* Device-specific structure allocation */ + ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_82571 - Init func ptrs. + * @hw: pointer to the HW structure + * + * The only function explicitly called by the api module to initialize + * all function pointers and parameters. + **/ +void e1000_init_function_pointers_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82571"); + + hw->func.init_mac_params = e1000_init_mac_params_82571; + hw->func.init_nvm_params = e1000_init_nvm_params_82571; + hw->func.init_phy_params = e1000_init_phy_params_82571; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_phy_id_82571"); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + ret_val = e1000_get_phy_id(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + return ret_val; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = E1000_SUCCESS; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_82571"); + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_82571"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~E1000_SWSM_SWESMBI; + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_82571"); + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + goto out; + + if (hw->mac.type != e1000_82573) + ret_val = e1000_acquire_nvm_generic(hw); + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82571"); + + e1000_release_nvm_generic(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function, the + * EEPROM will most likley contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_82571"); + + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_update_nvm_checksum_82571"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + /* + * If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + goto out; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msec_delay(1); + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) { + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Reset the firmware if using STM opcode. */ + if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* + * The enabling of and the actual reset must be done + * in two write cycles. + */ + E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msec_delay(1); + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) { + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_nvm_checksum_82571"); + + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000_update_nvm_checksum is not called after this function, the + * EEPROM will most likley contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + DEBUGFUNC("e1000_write_nvm_eewr_82571"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = (data[i] << E1000_NVM_RW_REG_DATA) | + ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START; + + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + E1000_WRITE_REG(hw, E1000_EEWR, eewr); + + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + +out: + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_cfg_done_82571"); + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) + break; + msec_delay(1); + timeout--; + } + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82571"); + + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, extcnf_ctrl, ctrl_ext, icr; + s32 ret_val; + u16 i = 0; + + DEBUGFUNC("e1000_reset_hw_82571"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* + * Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + if (hw->mac.type == e1000_82573) { + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + do { + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + msec_delay(2); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + } + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + if (hw->nvm.type == e1000_nvm_flash_hw) { + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + goto out; + + /* + * Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + if (hw->mac.type == e1000_82573) + msec_delay(25); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + icr = E1000_READ_REG(hw, E1000_ICR); + + if (!(e1000_check_alt_mac_addr_generic(hw))) + e1000_set_laa_state_82571(hw, TRUE); + +out: + return ret_val; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82571"); + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = e1000_id_led_init_generic(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + e1000_clear_vfta(hw); + + /* Setup the receive address. */ + /* + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000_get_laa_state_82571(hw)) + rar_count--; + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); + + /* ...for both queues. */ + if (mac->type != e1000_82573) { + reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); + } else { + e1000_enable_tx_pkt_filtering(hw); + reg_data = E1000_READ_REG(hw, E1000_GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + E1000_WRITE_REG(hw, E1000_GCR, reg_data); + } + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_82571"); + + if (hw->mac.disable_hw_init_bits) + goto out; + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + default: + break; + } + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + if (hw->mac.type == e1000_82573) { + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~(1 << 29); + E1000_WRITE_REG(hw, E1000_CTRL, reg); + } + + /* Extended Device Control */ + if (hw->mac.type == e1000_82573) { + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + } + +out: + return; +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + DEBUGFUNC("e1000_clear_vfta_82571"); + + if (hw->mac.type == e1000_82573) { + if (hw->mng_cookie.vlan_id != 0) { + /* + * The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* + * If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_update_mc_addr_list_82571 - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + DEBUGFUNC("e1000_update_mc_addr_list_82571"); + + if (e1000_get_laa_state_82571(hw)) + rar_count--; + + e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count, + rar_used_count, rar_count); +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_link_82571"); + + /* + * 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->mac.type == e1000_82573) + hw->fc.type = e1000_fc_full; + + return e1000_setup_link_generic(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl, led_ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_copper_link_82571"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + ret_val = e1000_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000_copper_link_setup_igp(hw); + /* Setup activity LED */ + led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_fiber_serdes_link_82571"); + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* + * If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twidling their thumbs + * if another tool failed to take it out of loopback mode. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000_setup_fiber_serdes_link_generic(hw); +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82571"); + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (hw->mac.type == e1000_82573 && + *data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + else if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; +out: + return ret_val; +} + +/** + * e1000_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administed address state. + **/ +bool e1000_get_laa_state_82571(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82571 *dev_spec; + bool state = FALSE; + + DEBUGFUNC("e1000_get_laa_state_82571"); + + if (hw->mac.type != e1000_82571) + goto out; + + dev_spec = (struct e1000_dev_spec_82571 *)hw->dev_spec; + + state = dev_spec->laa_is_present; + +out: + return state; +} + +/** + * e1000_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administed address state. + **/ +void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + struct e1000_dev_spec_82571 *dev_spec; + + DEBUGFUNC("e1000_set_laa_state_82571"); + + if (hw->mac.type != e1000_82571) + goto out; + + dev_spec = (struct e1000_dev_spec_82571 *)hw->dev_spec; + + dev_spec->laa_is_present = state; + + /* If workaround is activated... */ + if (state) { + /* + * Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + e1000_rar_set_generic(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); + } + +out: + return; +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_fix_nvm_checksum_82571"); + + if (nvm->type != e1000_nvm_flash_hw) + goto out; + + /* + * Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + goto out; + + if (!(data & 0x10)) { + /* + * Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + goto out; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + goto out; + ret_val = e1000_update_nvm_checksum(hw); + } + } + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_mac_addr_82571"); + if (e1000_check_alt_mac_addr_generic(hw)) + ret_val = e1000_read_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_82571"); + + e1000_clear_hw_cntrs_base_generic(hw); + temp = E1000_READ_REG(hw, E1000_PRC64); + temp = E1000_READ_REG(hw, E1000_PRC127); + temp = E1000_READ_REG(hw, E1000_PRC255); + temp = E1000_READ_REG(hw, E1000_PRC511); + temp = E1000_READ_REG(hw, E1000_PRC1023); + temp = E1000_READ_REG(hw, E1000_PRC1522); + temp = E1000_READ_REG(hw, E1000_PTC64); + temp = E1000_READ_REG(hw, E1000_PTC127); + temp = E1000_READ_REG(hw, E1000_PTC255); + temp = E1000_READ_REG(hw, E1000_PTC511); + temp = E1000_READ_REG(hw, E1000_PTC1023); + temp = E1000_READ_REG(hw, E1000_PTC1522); + + temp = E1000_READ_REG(hw, E1000_ALGNERRC); + temp = E1000_READ_REG(hw, E1000_RXERRC); + temp = E1000_READ_REG(hw, E1000_TNCRS); + temp = E1000_READ_REG(hw, E1000_CEXTERR); + temp = E1000_READ_REG(hw, E1000_TSCTC); + temp = E1000_READ_REG(hw, E1000_TSCTFC); + + temp = E1000_READ_REG(hw, E1000_MGTPRC); + temp = E1000_READ_REG(hw, E1000_MGTPDC); + temp = E1000_READ_REG(hw, E1000_MGTPTC); + + temp = E1000_READ_REG(hw, E1000_IAC); + temp = E1000_READ_REG(hw, E1000_ICRXOC); + + temp = E1000_READ_REG(hw, E1000_ICRXPTC); + temp = E1000_READ_REG(hw, E1000_ICRXATC); + temp = E1000_READ_REG(hw, E1000_ICTXPTC); + temp = E1000_READ_REG(hw, E1000_ICTXATC); + temp = E1000_READ_REG(hw, E1000_ICTXQEC); + temp = E1000_READ_REG(hw, E1000_ICTXQMTC); + temp = E1000_READ_REG(hw, E1000_ICRXDMTC); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h new file mode 100644 index 0000000..75ea2a2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h @@ -0,0 +1,40 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_82571_H_ +#define _E1000_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c new file mode 100644 index 0000000..f434bf2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c @@ -0,0 +1,1164 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" +#include "e1000_mac.h" +#include "e1000_nvm.h" +#include "e1000_phy.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->func.init_mac_params) { + ret_val = hw->func.init_mac_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->func.init_nvm_params) { + ret_val = hw->func.init_nvm_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->func.init_phy_params) { + ret_val = hw->func.init_phy_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + mac->type = e1000_82542; + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + mac->type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + mac->type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + mac->type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + mac->type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + mac->type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + mac->type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + mac->type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + mac->type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + mac->type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + mac->type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + mac->type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_82571EB_COPPER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + mac->type = e1000_82571; + break; + case E1000_DEV_ID_82572EI: + case E1000_DEV_ID_82572EI_COPPER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82572EI_SERDES: + mac->type = e1000_82572; + break; + case E1000_DEV_ID_82573E: + case E1000_DEV_ID_82573E_IAMT: + case E1000_DEV_ID_82573L: + mac->type = e1000_82573; + break; + case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: + case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: + mac->type = e1000_80003es2lan; + break; + case E1000_DEV_ID_ICH8_IFE: + case E1000_DEV_ID_ICH8_IFE_GT: + case E1000_DEV_ID_ICH8_IFE_G: + case E1000_DEV_ID_ICH8_IGP_M: + case E1000_DEV_ID_ICH8_IGP_M_AMT: + case E1000_DEV_ID_ICH8_IGP_AMT: + case E1000_DEV_ID_ICH8_IGP_C: + mac->type = e1000_ich8lan; + break; + case E1000_DEV_ID_ICH9_IFE: + case E1000_DEV_ID_ICH9_IFE_GT: + case E1000_DEV_ID_ICH9_IFE_G: + case E1000_DEV_ID_ICH9_IGP_AMT: + case E1000_DEV_ID_ICH9_IGP_C: + mac->type = e1000_ich9lan; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: TRUE will initialize the rest of the function pointers + * getting the device ready for use. FALSE will only set + * MAC type and the function pointers for the other init + * functions. Passing FALSE will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init some generic function pointers that are currently all pointing + * to generic implementations. We do this first allowing a driver + * module to override it afterwards. + */ + hw->func.config_collision_dist = e1000_config_collision_dist_generic; + hw->func.rar_set = e1000_rar_set_generic; + hw->func.validate_mdi_setting = e1000_validate_mdi_setting_generic; + hw->func.mng_host_if_write = e1000_mng_host_if_write_generic; + hw->func.mng_write_cmd_header = e1000_mng_write_cmd_header_generic; + hw->func.mng_enable_host_if = e1000_mng_enable_host_if_generic; + hw->func.wait_autoneg = e1000_wait_autoneg_generic; + hw->func.reload_nvm = e1000_reload_nvm_generic; + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82542: + e1000_init_function_pointers_82542(hw); + break; + case e1000_82543: + case e1000_82544: + e1000_init_function_pointers_82543(hw); + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + e1000_init_function_pointers_82540(hw); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + e1000_init_function_pointers_82541(hw); + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + e1000_init_function_pointers_82571(hw); + break; + case e1000_80003es2lan: + e1000_init_function_pointers_80003es2lan(hw); + break; + case e1000_ich8lan: + case e1000_ich9lan: + e1000_init_function_pointers_ich8lan(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + } + +out: + return ret_val; +} + +/** + * e1000_remove_device - Free device specific structure + * @hw: pointer to the HW structure + * + * If a device specific structure was allocated, this function will + * free it. This is a function pointer entry point called by drivers. + **/ +void e1000_remove_device(struct e1000_hw *hw) +{ + if (hw->func.remove_device) + hw->func.remove_device(hw); +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adaper is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->func.get_bus_info) + return hw->func.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->func.clear_vfta) + hw->func.clear_vfta (hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->func.write_vfta) + hw->func.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. Currently no func pointer + * exists and all implementations are handled in the generic version of this + * function. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 rar_used_count, + u32 rar_count) +{ + if (hw->func.update_mc_addr_list) + hw->func.update_mc_addr_list(hw, + mc_addr_list, + mc_addr_count, + rar_used_count, + rar_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->func.check_for_link) + return hw->func.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->func.check_mng_mode) + return hw->func.check_mng_mode(hw); + + return FALSE; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->func.reset_hw) + return hw->func.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->func.init_hw) + return hw->func.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->func.setup_link) + return hw->func.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->func.get_link_up_info) + return hw->func.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->func.setup_led) + return hw->func.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->func.cleanup_led) + return hw->func.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->func.blink_led) + return hw->func.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->func.led_on) + return hw->func.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->func.led_off) + return hw->func.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + return; // TODO + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->func.config_collision_dist) + hw->func.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->func.rar_set) + hw->func.rar_set(hw, addr, index); +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->func.validate_mdi_setting) + return hw->func.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_mta_set - Sets multicast table bit + * @hw: pointer to the HW structure + * @hash_value: Multicast hash value. + * + * This sets the bit in the multicast table corresponding to the + * hash value. This is a function pointer entry point called by drivers. + **/ +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + if (hw->func.mta_set) + hw->func.mta_set(hw, hash_value); +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + if (hw->func.mng_host_if_write) + return hw->func.mng_host_if_write(hw, buffer, length, offset, + sum); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + if (hw->func.mng_write_cmd_header) + return hw->func.mng_write_cmd_header(hw, hdr); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw * hw) +{ + if (hw->func.mng_enable_host_if) + return hw->func.mng_enable_host_if(hw); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_wait_autoneg - Waits for autonegotiation completion + * @hw: pointer to the HW structure + * + * Waits for autoneg to complete. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + if (hw->func.wait_autoneg) + return hw->func.wait_autoneg(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->func.check_reset_block) + return hw->func.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->func.read_phy_reg) + return hw->func.read_phy_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->func.write_phy_reg) + return hw->func.write_phy_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->func.get_cable_length) + return hw->func.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->func.get_phy_info) + return hw->func.get_phy_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->func.reset_phy) + return hw->func.reset_phy(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->func.commit_phy) + return hw->func.commit_phy(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->func.set_d0_lplu_state) + return hw->func.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->func.set_d3_lplu_state) + return hw->func.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->func.read_mac_addr) + return hw->func.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_num - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num) +{ + return e1000_read_pba_num_generic(hw, pba_num); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->func.validate_nvm) + return hw->func.validate_nvm(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->func.update_nvm) + return hw->func.update_nvm(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->func.reload_nvm) + hw->func.reload_nvm(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->func.read_nvm) + return hw->func.read_nvm(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->func.write_nvm) + return hw->func.write_nvm(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->func.power_up_phy) + hw->func.power_up_phy(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->func.power_down_phy) + hw->func.power_down_phy(hw); +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h new file mode 100644 index 0000000..4c646c8 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h @@ -0,0 +1,166 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82542(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82543(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82540(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82571(struct e1000_hw *hw); +extern void e1000_init_function_pointers_82541(struct e1000_hw *hw); +extern void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw); +extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw); + +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +void e1000_remove_device(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_wait_autoneg(struct e1000_hw *hw); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, + u8 *buffer, u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw * hw, + u8 *buffer, u16 length); +void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, u8 *mac_addr, + u32 max_frame_size); +void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, + bool state); +bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw); +u32 e1000_translate_register_82542(u32 reg); +void e1000_init_script_state_82541(struct e1000_hw *hw, bool state); +bool e1000_get_laa_state_82571(struct e1000_hw *hw); +void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state); +void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); + + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * error = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = TRUE; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = FALSE; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= (max_frame_size + 1))) : \ + (((length) > min_frame_size) && \ + ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1))))) + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h new file mode 100644 index 0000000..37f3511 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h @@ -0,0 +1,1397 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */ +#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC +#define E1000_WUS_ARP E1000_WUFC_ARP +#define E1000_WUS_IPV4 E1000_WUFC_IPV4 +#define E1000_WUS_IPV6 E1000_WUFC_IPV6 +#define E1000_WUS_FLX0 E1000_WUFC_FLX0 +#define E1000_WUS_FLX1 E1000_WUFC_FLX1 +#define E1000_WUS_FLX2 E1000_WUFC_FLX2 +#define E1000_WUS_FLX3 E1000_WUFC_FLX3 +#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +/* Reserved (bits 4,5) in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +/* IAME enable bit (27) was removed in >= 82575 */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_REG_ADDR 0x00FF0000 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_PHY_ADDR 0x07000000 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_RESET 0x10000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 + +/* Receive Decriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_ENABLE_MASK 0x00000007 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 +/* Enable IP address filtering */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* + * Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_CSR_SM 0x8 + +/* FACTPS Definitions */ +#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000 +#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000 +#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000 +#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000 +#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_10 0 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 +#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000 +#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000 +#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000 +#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to intrepret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBS_16K E1000_PBA_16K +#define E1000_PBS_24K E1000_PBA_24K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 + +#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */ +#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define NVM_PHY_CLASS_WORD 0x0007 +#define NVM_INIT_CONTROL1_REG 0x000A +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_FLASH_VERSION 0x0032 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_ANE 0x0800 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 +#define NVM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_MAC_ADDR_OFFSET 0 +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 + +/* NVM Commands - Microwire */ +#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ +#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ +#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ +#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erast/write disable */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ +#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 +#define NVM_STATUS_WEN_SPI 0x02 +#define NVM_STATUS_BP0_SPI 0x04 +#define NVM_STATUS_BP1_SPI 0x08 +#define NVM_STATUS_WPEN_SPI 0x80 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080 +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* + * 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +/* + * 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* + * Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_SPEC_STATUS \ + GG82563_REG(0, 17) /* PHY Specific Status */ +#define GG82563_PHY_INT_ENABLE \ + GG82563_REG(0, 18) /* Interrupt Enable */ +#define GG82563_PHY_SPEC_STATUS_2 \ + GG82563_REG(0, 19) /* PHY Specific Status 2 */ +#define GG82563_PHY_RX_ERR_CNTR \ + GG82563_REG(0, 21) /* Receive Error Counter */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ +#define GG82563_PHY_TEST_CLK_CTRL \ + GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL_2 \ + GG82563_REG(2, 26) /* MAC Specific Control 2 */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PORT_RESET \ + GG82563_REG(193, 17) /* Port Reset */ +#define GG82563_PHY_REVISION_ID \ + GG82563_REG(193, 18) /* Revision ID */ +#define GG82563_PHY_DEVICE_ID \ + GG82563_REG(193, 19) /* Device ID */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ +#define GG82563_PHY_RATE_ADAPT_CTRL \ + GG82563_REG(193, 25) /* Rate Adaptation Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ + GG82563_REG(194, 16) /* FIFO's Control/Status */ +#define GG82563_PHY_KMRN_CTRL \ + GG82563_REG(194, 17) /* Control */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ +#define GG82563_PHY_KMRN_DIAGNOSTIC \ + GG82563_REG(194, 19) /* Diagnostic */ +#define GG82563_PHY_ACK_TIMEOUTS \ + GG82563_REG(194, 20) /* Acknowledge Timeouts */ +#define GG82563_PHY_ADV_ABILITY \ + GG82563_REG(194, 21) /* Advertised Ability */ +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ + GG82563_REG(194, 23) /* Link Partner Advertised Ability */ +#define GG82563_PHY_ADV_NEXT_PAGE \ + GG82563_REG(194, 24) /* Advertised Next Page */ +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ + GG82563_REG(194, 25) /* Link Partner Advertised Next page */ +#define GG82563_PHY_KMRN_MISC \ + GG82563_REG(194, 26) /* Misc. */ + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c new file mode 100644 index 0000000..a249f17 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c @@ -0,0 +1,2207 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include <linux/netdevice.h> + +#ifdef SIOCETHTOOL +#include <linux/ethtool.h> + +#include "e1000.h" +#include "e1000_82541.h" +#ifdef NETIF_F_HW_VLAN_TX +#include <linux/if_vlan.h> +#endif + +#ifdef ETHTOOL_OPS_COMPAT +#include "kcompat_ethtool.c" +#endif + +#ifdef ETHTOOL_GSTATS +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorc) }, + { "tx_bytes", E1000_STAT(stats.gotc) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(net_stats.rx_errors) }, + { "tx_errors", E1000_STAT(net_stats.tx_errors) }, + { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, + { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorc) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "rx_header_split", E1000_STAT(rx_hdr_split) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#ifdef CONFIG_E1000_MQ +#define E1000_QUEUE_STATS_LEN \ + ((((((struct e1000_adapter *)netdev->priv)->num_rx_queues > 1) ? \ + ((struct e1000_adapter *)netdev->priv)->num_rx_queues : 0 ) + \ + (((((struct e1000_adapter *)netdev->priv)->num_tx_queues > 1) ? \ + ((struct e1000_adapter *)netdev->priv)->num_tx_queues : 0 ))) * \ + (sizeof(struct e1000_queue_stats) / sizeof(u64))) +#else +#define E1000_QUEUE_STATS_LEN 0 +#endif +#define E1000_GLOBAL_STATS_LEN \ + sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +#endif /* ETHTOOL_GSTATS */ +#ifdef ETHTOOL_TEST +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN +#endif /* ETHTOOL_TEST */ + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + + if (hw->mac.type == e1000_82543) + ecmd->transceiver = XCVR_EXTERNAL; + else + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + + if (hw->mac.type >= e1000_82545) + ecmd->transceiver = XCVR_INTERNAL; + else + ecmd->transceiver = XCVR_EXTERNAL; + } + + status = E1000_READ_REG(&adapter->hw, E1000_STATUS); + + if (status & E1000_STATUS_LU) { + + if ((status & E1000_STATUS_SPEED_1000) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = SPEED_100; + else + ecmd->speed = SPEED_10; + + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed */ + if (e1000_check_reset_block(hw)) { + DPRINTK(DRV, ERR, "Cannot change link characteristics " + "when SoL/IDER is active.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.original_type = e1000_fc_default; + } else { + if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.type == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.type == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.type == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc.type = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.type = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.type = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.type = e1000_fc_none; + + hw->fc.original_type = hw->fc.type; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.type = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else { + retval = ((hw->phy.media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_rx_csum(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->rx_csum; +} + +static int e1000_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->rx_csum = data; + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + return 0; +} + +static u32 e1000_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static int e1000_set_tx_csum(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac.type < e1000_82543) { + if (!data) + return -EINVAL; + return 0; + } + + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +#ifdef NETIF_F_TSO +static int e1000_set_tso(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + struct net_device *v_netdev; + if (!(adapter->flags & E1000_FLAG_HAS_TSO)) + return data ? -EINVAL : 0; + + if (data) { + netdev->features |= NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + if (adapter->flags & E1000_FLAG_HAS_TSO6) + netdev->features |= NETIF_F_TSO6; +#endif + } else { + netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + if (adapter->flags & E1000_FLAG_HAS_TSO6) + netdev->features &= ~NETIF_F_TSO6; +#endif +#ifdef NETIF_F_HW_VLAN_TX + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + for (i = 0; i < VLAN_N_VID; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + if (adapter->flags & E1000_FLAG_HAS_TSO6) + v_netdev->features &= ~NETIF_F_TSO6; +#endif + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } +#endif + } + +tso_out: + DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); + adapter->flags |= E1000_FLAG_TSO_FORCE; + return 0; +} +#endif /* NETIF_F_TSO */ + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL); + regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS); + + regs_buff[2] = E1000_READ_REG(hw, E1000_RCTL); + regs_buff[3] = E1000_READ_REG(hw, E1000_RDLEN(0)); + regs_buff[4] = E1000_READ_REG(hw, E1000_RDH(0)); + regs_buff[5] = E1000_READ_REG(hw, E1000_RDT(0)); + regs_buff[6] = E1000_READ_REG(hw, E1000_RDTR); + + regs_buff[7] = E1000_READ_REG(hw, E1000_TCTL); + regs_buff[8] = E1000_READ_REG(hw, E1000_TDLEN(0)); + regs_buff[9] = E1000_READ_REG(hw, E1000_TDH(0)); + regs_buff[10] = E1000_READ_REG(hw, E1000_TDT(0)); + regs_buff[11] = E1000_READ_REG(hw, E1000_TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy.type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac.type >= e1000_82540 && + hw->mac.type < e1000_82571 && + hw->phy.media_type == e1000_media_type_copper) { + regs_buff[26] = E1000_READ_REG(hw, E1000_MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) + if ((ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]))) + break; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for 82573 controllers */ + if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82573))) + e1000_update_nvm_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + u16 eeprom_data; + + strncpy(drvinfo->driver, e1000_driver_name, 32); + strncpy(drvinfo->version, e1000_driver_version, 32); + + /* EEPROM image version # is reported as firmware version # for + * 8257{1|2|3} controllers */ + e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); + switch (adapter->hw.mac.type) { + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_80003es2lan: + case e1000_ich8lan: + case e1000_ich9lan: + sprintf(firmware_version, "%d.%d-%d", + (eeprom_data & 0xF000) >> 12, + (eeprom_data & 0x0FF0) >> 4, + eeprom_data & 0x000F); + break; + default: + sprintf(firmware_version, "N/A"); + } + + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = E1000_STATS_LEN; + drvinfo->testinfo_len = E1000_TEST_LEN; + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + e1000_mac_type mac_type = adapter->hw.mac.type; + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + e1000_mac_type mac_type = adapter->hw.mac.type; + struct e1000_tx_ring *tx_ring, *tx_old; + struct e1000_rx_ring *rx_ring, *rx_old; + int i, err, tx_ring_size, rx_ring_size; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; + rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + tx_ring = kzalloc(tx_ring_size, GFP_KERNEL); + if (!tx_ring) + goto err_alloc_tx; + /* use a memcpy to save any previously configured + * items like napi structs from having to be + * reinitialized */ + memcpy(tx_ring, tx_old, tx_ring_size); + + rx_ring = kzalloc(rx_ring_size, GFP_KERNEL); + if (!rx_ring) + goto err_alloc_rx; + memcpy(rx_ring, rx_old, rx_ring_size); + + adapter->tx_ring = tx_ring; + adapter->rx_ring = rx_ring; + + rx_ring->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); + rx_ring->count = min(rx_ring->count,(u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + tx_ring->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); + tx_ring->count = min(tx_ring->count,(u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + /* overwrite the counts with the new values */ + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + if ((err = e1000_setup_all_rx_resources(adapter))) + goto err_setup_rx; + if ((err = e1000_setup_all_tx_resources(adapter))) + goto err_setup_tx; + + /* restore the old in order to free it, + * then add in the new */ + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rx_ring; + adapter->tx_ring = tx_ring; + if ((err = e1000_up(adapter))) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rx_ring); +err_alloc_rx: + kfree(tx_ring); +err_alloc_tx: + e1000_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, + int reg, int offset, u32 mask, u32 write) +{ \ + u32 pat, val; + static const u32 test[] = + {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { + E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, + (test[pat] & write)); + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); + if (val != (test[pat] & write & mask)) { + DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " + "0x%08X expected 0x%08X\n", + E1000_REGISTER(&adapter->hw, reg) + offset, + val, (test[pat] & write & mask)); + *data = E1000_REGISTER(&adapter->hw, reg); + return 1; + } + } + return 0; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val; + E1000_WRITE_REG(&adapter->hw, reg, write & mask); + val = E1000_READ_REG(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X" + "expected 0x%08X\n", reg, (val & mask), (write & mask)); + *data = E1000_REGISTER(&adapter->hw, reg); + return 1; + } + return 0; +} +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ + return 1; \ + } while (0) +#define REG_PATTERN_TEST(reg, mask, write) \ + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + u32 value, before, after; + u32 i, toggle; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + switch (mac->type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + case e1000_82573: + case e1000_ich8lan: + case e1000_ich9lan: + toggle = 0x7FFFF033; + break; + default: + toggle = 0xFFFFF833; + break; + } + + before = E1000_READ_REG(&adapter->hw, E1000_STATUS); + value = (E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle); + E1000_WRITE_REG(&adapter->hw, E1000_STATUS, toggle); + after = E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle; + if (value != after) { + DPRINTK(DRV, ERR, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + E1000_WRITE_REG(&adapter->hw, E1000_STATUS, before); + + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = (((mac->type == e1000_ich8lan) || + (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + if (mac->type >= e1000_82543) { + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + for (i = 0; i < mac->rar_entry_count; i++) { + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), + 0x8003FFFF, 0xFFFFFFFF); + } + + } else { + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFF000, 0xFFFFFFFF); + + } + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16) NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr |= E1000_READ_REG(&adapter->hw, E1000_ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i=0, shared_int = TRUE; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet */ + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = FALSE; + else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + DPRINTK(HW, INFO, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + + if (((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) + continue; + + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + E1000_WRITE_REG(&adapter->hw, E1000_IMC, mask); + E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + E1000_WRITE_REG(&adapter->hw, E1000_IMS, mask); + E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + E1000_WRITE_REG(&adapter->hw, E1000_IMC, + ~mask & 0x00007FFF); + E1000_WRITE_REG(&adapter->hw, E1000_ICS, + ~mask & 0x00007FFF); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + if (tx_ring->buffer_info[i].dma) + pci_unmap_single(pdev, tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].length, + PCI_DMA_TODEVICE); + if (tx_ring->buffer_info[i].skb) + dev_kfree_skb(tx_ring->buffer_info[i].skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + if (rx_ring->buffer_info[i].dma) + pci_unmap_single(pdev, rx_ring->buffer_info[i].dma, + E1000_RXBUFFER_2048, + PCI_DMA_FROMDEVICE); + if (rx_ring->buffer_info[i].skb) + dev_kfree_skb(rx_ring->buffer_info[i].skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + return; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + if (!(tx_ring->buffer_info = kcalloc(tx_ring->count, + sizeof(struct e1000_buffer), + GFP_KERNEL))) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + if (!(tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_ATOMIC))) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = tx_ring->next_to_clean = 0; + + E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), + ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); + E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), + tx_ring->count * sizeof(struct e1000_tx_desc)); + E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); + E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); + E1000_WRITE_REG(&adapter->hw, E1000_TCTL, + E1000_TCTL_MULR | + E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int size = 1024; + + if (!(skb = alloc_skb(size, GFP_KERNEL))) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + pci_map_single(pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS); + if (adapter->hw.mac.type < e1000_82543) + tx_desc->lower.data |= E1000_TXD_CMD_RPS; + else + tx_desc->lower.data |= E1000_TXD_CMD_RS; + + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + if (!(rx_ring->buffer_info = kcalloc(rx_ring->count, + sizeof(struct e1000_rx_buffer), + GFP_KERNEL))) { + ret_val = 4; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); + if (!(rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_ATOMIC))) { + ret_val = 5; + goto err_nomem; + } + rx_ring->next_to_use = rx_ring->next_to_clean = 0; + + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), + ((u64) rx_ring->dma & 0xFFFFFFFF)); + E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), ((u64) rx_ring->dma >> 32)); + E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), rx_ring->size); + E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); + E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct sk_buff *skb; + + if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, + GFP_KERNEL))) { + ret_val = 6; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048, + PCI_DMA_FROMDEVICE); + rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(&adapter->hw, 29, 0x001F); + e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC); + e1000_write_phy_reg(&adapter->hw, 29, 0x001A); + e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(&adapter->hw, + M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(&adapter->hw, + M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_commit(&adapter->hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + adapter->hw.mac.autoneg = FALSE; + + if (adapter->hw.phy.type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(&adapter->hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8140); + } else if (adapter->hw.phy.type == e1000_phy_gg82563) + e1000_write_phy_reg(&adapter->hw, + GG82563_PHY_KMRN_MODE_CTRL, + 0x1CC); + + ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL); + + if (adapter->hw.phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + } else { + /* force 1000, set loopback */ + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + } + + if (adapter->hw.phy.media_type == e1000_media_type_copper && + adapter->hw.phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* Set the ILOS bit on the fiber Nic if half duplex link is + * detected. */ + stat_reg = E1000_READ_REG(&adapter->hw, E1000_STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (adapter->hw.phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + int link = 0; + + /* special requirements for 82571/82572 fiber adapters */ + + /* jump through hoops to make sure link is up because serdes + * link is hardwired up */ + ctrl |= E1000_CTRL_SLU; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* disable autoneg */ + ctrl = E1000_READ_REG(hw, E1000_TXCW); + ctrl &= ~(1 << 31); + E1000_WRITE_REG(hw, E1000_TXCW, ctrl); + + link = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_ILOS; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + } + + /* special write to serdes control register to enable SerDes analog + * loopback */ +#define E1000_SERDES_LB_ON 0x410 + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SERDES_LB_ON); + msleep(10); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + u16 phy_reg = 0; + u16 count = 0; + + switch (adapter->hw.mac.type) { + case e1000_82543: + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_80003es2lan: + case e1000_ich8lan: + case e1000_ich9lan: + return e1000_integrated_phy_loopback(adapter); + break; + + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_reg); + return 0; + break; + } + + return 8; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = E1000_READ_REG(hw, E1000_CTRL_EXT); + u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* save CTRL_EXT to restore later, reuse an empty variable (unused + on mac_type 80003es2lan) */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* set mac loopback */ + ctrl = E1000_READ_REG(hw, E1000_RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + E1000_WRITE_REG(hw, E1000_RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + break; + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + break; + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + break; + default: + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + return 0; + } + } else if (hw->phy.media_type == e1000_media_type_copper) + return e1000_set_phy_loopback(adapter); + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + fallthrough; + case e1000_82571: + case e1000_82572: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { +#define E1000_SERDES_LB_OFF 0x400 + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SERDES_LB_OFF); + msleep(10); + break; + } + fallthrough; + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->mac.autoneg = TRUE; + if (hw->phy.type == e1000_phy_gg82563) + e1000_write_phy_reg(hw, + GG82563_PHY_KMRN_MODE_CTRL, + 0x180); + e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg); + e1000_phy_commit(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val=0; + unsigned long time; + + E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, + 1024); + pci_dma_sync_single_for_device(pdev, + tx_ring->buffer_info[k].dma, + tx_ring->buffer_info[k].length, + PCI_DMA_TODEVICE); + if (unlikely(++k == tx_ring->count)) k = 0; + } + E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), k); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + pci_dma_sync_single_for_cpu(pdev, + rx_ring->buffer_info[l].dma, + E1000_RXBUFFER_2048, + PCI_DMA_FROMDEVICE); + + ret_val = e1000_check_lbtest_frame( + rx_ring->buffer_info[l].skb, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rx_ring->count)) l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && jiffies < (time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 20)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active */ + if (e1000_check_reset_block(&adapter->hw)) { + DPRINTK(DRV, ERR, "Cannot do PHY loopback test " + "when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + if ((*data = e1000_setup_desc_rings(adapter))) + goto out; + if ((*data = e1000_setup_loopback_test(adapter))) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = 0; + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + adapter->hw.mac.serdes_has_link = FALSE; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes */ + do { + e1000_check_for_link(&adapter->hw); + if (adapter->hw.mac.serdes_has_link == TRUE) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(&adapter->hw); + if (adapter->hw.mac.autoneg) + msleep(4000); + + if (!(E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { + *data = 1; + } + } + return *data; +} + +static int e1000_diag_test_count(struct net_device *netdev) +{ + return E1000_TEST_LEN; +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + DPRINTK(HW, INFO, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + e1000_power_up_phy(&adapter->hw); + e1000_setup_link(&adapter->hw); + } + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = TRUE; + e1000_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = FALSE; + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + DPRINTK(HW, INFO, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_COPPER: + /* Wake events not supported on port B */ + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!(adapter->flags & E1000_FLAG_QUAD_PORT_A)) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled */ + if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware */ + if (e1000_wol_exclusion(adapter, wol)) + return; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + DPRINTK(DRV, ERR, "Interface does not support " + "directed (unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + + return; +} + +static int e1000_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + DPRINTK(DRV, ERR, "Interface does not support " + "directed (unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + return 0; +} + +/* toggle LED 4 times per second = 2 "blinks" per second */ +#define E1000_ID_INTERVAL (HZ/4) + +/* bit defines for adapter->led_status */ +#define E1000_LED_ON 0 + +static void e1000_led_blink_callback(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) + e1000_led_off(&adapter->hw); + else + e1000_led_on(&adapter->hw); + + mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); +} + +static int e1000_phys_id(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!data) + data = INT_MAX; + + if (adapter->hw.mac.type < e1000_82571) { + if (!adapter->blink_timer.function) { + init_timer(&adapter->blink_timer); + adapter->blink_timer.function = e1000_led_blink_callback; + adapter->blink_timer.data = (unsigned long) adapter; + } + e1000_setup_led(&adapter->hw); + mod_timer(&adapter->blink_timer, jiffies); + msleep_interruptible(data * 1000); + del_timer_sync(&adapter->blink_timer); + } else if (adapter->hw.phy.type == e1000_phy_ife) { + if (!adapter->blink_timer.function) { + init_timer(&adapter->blink_timer); + adapter->blink_timer.function = e1000_led_blink_callback; + adapter->blink_timer.data = (unsigned long) adapter; + } + mod_timer(&adapter->blink_timer, jiffies); + msleep_interruptible(data * 1000); + del_timer_sync(&adapter->blink_timer); + e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); + } else { + e1000_blink_led(&adapter->hw); + msleep_interruptible(data * 1000); + } + + e1000_led_off(&adapter->hw); + clear_bit(E1000_LED_ON, &adapter->led_status); + e1000_cleanup_led(&adapter->hw); + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (!(adapter->flags & E1000_FLAG_HAS_INTR_MODERATION)) + return -ENOTSUPP; + + if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + E1000_WRITE_REG(&adapter->hw, E1000_ITR, + 1000000000 / (adapter->itr * 256)); + else + E1000_WRITE_REG(&adapter->hw, E1000_ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static int e1000_get_stats_count(struct net_device *netdev) +{ + return E1000_STATS_LEN; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_E1000_MQ + u64 *queue_stat; + int stat_count = sizeof(struct e1000_queue_stats) / sizeof(u64); + int j, k; +#endif + int i; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +#ifdef CONFIG_E1000_MQ + if (adapter->num_tx_queues > 1) { + for (j = 0; j < adapter->num_tx_queues; j++) { + queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } + } + if (adapter->num_rx_queues > 1) { + for (j = 0; j < adapter->num_rx_queues; j++) { + queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } + } +#endif +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ +#ifdef CONFIG_E1000_MQ + struct e1000_adapter *adapter = netdev_priv(netdev); +#endif + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e1000_gstrings_test, + E1000_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +#ifdef CONFIG_E1000_MQ + if (adapter->num_tx_queues > 1) { + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + } + if (adapter->num_rx_queues > 1) { + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + } +#endif +/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .get_rx_csum = e1000_get_rx_csum, + .set_rx_csum = e1000_set_rx_csum, + .get_tx_csum = e1000_get_tx_csum, + .set_tx_csum = e1000_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = e1000_set_tso, +#endif + .self_test_count = e1000_diag_test_count, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .phys_id = e1000_phys_id, + .get_stats_count = e1000_get_stats_count, + .get_ethtool_stats = e1000_get_ethtool_stats, +#ifdef ETHTOOL_GPERMADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} +#endif /* SIOCETHTOOL */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h new file mode 100644 index 0000000..9a94200 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h @@ -0,0 +1,711 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 + +typedef enum { + e1000_undefined = 0, + e1000_82542, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_82571, + e1000_82572, + e1000_82573, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +} e1000_mac_type; + +typedef enum { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_eeprom_microwire, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +} e1000_nvm_type; + +typedef enum { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, + e1000_nvm_override_microwire_small, + e1000_nvm_override_microwire_large +} e1000_nvm_override; + +typedef enum { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, +} e1000_phy_type; + +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +} e1000_bus_type; + +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +} e1000_bus_speed; + +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +} e1000_fc_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +/* Receive Descriptor */ +struct e1000_rx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + u16 length; /* Length of data DMAed into data buffer */ + u16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + u16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + u64 buffer_addr; + u64 reserved; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + u64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length0; /* length of buffer 0 */ + u16 vlan; /* VLAN tag */ + } middle; + struct { + u16 header_status; + u16 length[3]; /* length of buffers 1-3 */ + } upper; + u64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + u16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + u32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + u16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + u32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + u16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + u32 cmd_and_length; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + u16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + u64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + u16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_manage.h" + +struct e1000_functions { + /* Function pointers for the MAC. */ + s32 (*init_mac_params)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *hw); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, + u32); + void (*remove_device)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*mta_set)(struct e1000_hw *, u32); + void (*config_collision_dist)(struct e1000_hw*); + void (*rar_set)(struct e1000_hw*, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw*); + s32 (*validate_mdi_setting)(struct e1000_hw*); + s32 (*mng_host_if_write)(struct e1000_hw*, u8*, u16, u16, u8*); + s32 (*mng_write_cmd_header)(struct e1000_hw *hw, + struct e1000_host_mng_command_header*); + s32 (*mng_enable_host_if)(struct e1000_hw*); + s32 (*wait_autoneg)(struct e1000_hw*); + + /* Function pointers for the PHY. */ + s32 (*init_phy_params)(struct e1000_hw *); + s32 (*acquire_phy)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit_phy)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); + void (*release_phy)(struct e1000_hw *); + s32 (*reset_phy)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); + void (*power_up_phy)(struct e1000_hw *); + void (*power_down_phy)(struct e1000_hw *); + + /* Function pointers for the NVM. */ + s32 (*init_nvm_params)(struct e1000_hw *); + s32 (*acquire_nvm)(struct e1000_hw *); + s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); + void (*release_nvm)(struct e1000_hw *); + void (*reload_nvm)(struct e1000_hw *); + s32 (*update_nvm)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate_nvm)(struct e1000_hw *); + s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + u8 addr[6]; + u8 perm_addr[6]; + + e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_av; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + e1000_phy_type type; + + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; + e1000_ms_type ms_type; + e1000_ms_type original_ms_type; + e1000_rev_polarity cable_polarity; + e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + e1000_nvm_type type; + e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + e1000_bus_type type; + e1000_bus_speed speed; + e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + e1000_fc_type type; /* Type of flow control */ + e1000_fc_type original_type; +}; + +struct e1000_hw { + void *back; + void *dev_spec; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_functions func; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + u32 dev_spec_size; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* These functions must be implemented by drivers */ +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +s32 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size); +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_free_dev_spec_struct(struct e1000_hw *hw); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c new file mode 100644 index 0000000..c341584 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c @@ -0,0 +1,2582 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_ich8lan + * e1000_ich9lan + */ + +#include "e1000_api.h" +#include "e1000_ich8lan.h" + +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw); +static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw); +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw); +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw); +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); +static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, + bool active); +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, + u16 *data); +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout); +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw); +static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16* data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, + u32 offset, u16 *data); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 data); +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data); +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone :1; /* bit 0 Flash Cycle Done */ + u16 flcerr :1; /* bit 1 Flash Cycle Error */ + u16 dael :1; /* bit 2 Direct Access error Log */ + u16 berasesz :2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog :1; /* bit 5 flash cycle in Progress */ + u16 reserved1 :2; /* bit 13:6 Reserved */ + u16 reserved2 :6; /* bit 13:6 Reserved */ + u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo :1; /* 0 Flash Cycle Go */ + u16 flcycle :2; /* 2:1 Flash Cycle */ + u16 reserved :5; /* 7:3 Reserved */ + u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ + u16 flockdn :6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra :8; /* 0:7 GbE region Read Access */ + u32 grwa :8; /* 8:15 GbE region Write Access */ + u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS]; +}; + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + u16 i = 0; + + DEBUGFUNC("e1000_init_phy_params_ich8lan"); + + phy->addr = 1; + phy->reset_delay_us = 100; + + func->acquire_phy = e1000_acquire_swflag_ich8lan; + func->check_polarity = e1000_check_polarity_ife_ich8lan; + func->check_reset_block = e1000_check_reset_block_ich8lan; + func->force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan; + func->get_cable_length = e1000_get_cable_length_igp_2; + func->get_cfg_done = e1000_get_cfg_done_ich8lan; + func->get_phy_info = e1000_get_phy_info_ich8lan; + func->read_phy_reg = e1000_read_phy_reg_igp; + func->release_phy = e1000_release_swflag_ich8lan; + func->reset_phy = e1000_phy_hw_reset_ich8lan; + func->set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; + func->set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; + func->write_phy_reg = e1000_write_phy_reg_igp; + func->power_up_phy = e1000_power_up_phy_copper; + func->power_down_phy = e1000_power_down_phy_copper_ich8lan; + + + phy->id = 0; + while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + msec_delay(1); + ret_val = e1000_get_phy_id(hw); + if (ret_val) + goto out; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_functions *func = &hw->func; + struct e1000_dev_spec_ich8lan *dev_spec; + u32 gfpreg, sector_base_addr, sector_end_addr; + s32 ret_val = E1000_SUCCESS; + u16 i; + + DEBUGFUNC("e1000_init_nvm_params_ich8lan"); + + /* Can't read flash registers if the register set isn't mapped. */ + if (!hw->flash_address) { + DEBUGOUT("ERROR: Flash registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); + + /* + * sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* + * find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = (sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_SHADOW_RAM_WORDS; + + dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = FALSE; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + /* Function Pointers */ + func->acquire_nvm = e1000_acquire_swflag_ich8lan; + func->read_nvm = e1000_read_nvm_ich8lan; + func->release_nvm = e1000_release_swflag_ich8lan; + func->update_nvm = e1000_update_nvm_checksum_ich8lan; + func->valid_led_default = e1000_valid_led_default_ich8lan; + func->validate_nvm = e1000_validate_nvm_checksum_ich8lan; + func->write_nvm = e1000_write_nvm_ich8lan; + +out: + return ret_val; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_init_mac_params_ich8lan"); + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = TRUE; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = TRUE; + + /* Function pointers */ + + /* bus type/speed/width */ + func->get_bus_info = e1000_get_bus_info_ich8lan; + /* reset */ + func->reset_hw = e1000_reset_hw_ich8lan; + /* hw initialization */ + func->init_hw = e1000_init_hw_ich8lan; + /* link setup */ + func->setup_link = e1000_setup_link_ich8lan; + /* physical interface setup */ + func->setup_physical_interface = e1000_setup_copper_link_ich8lan; + /* check for link */ + func->check_for_link = e1000_check_for_copper_link_generic; + /* check management mode */ + func->check_mng_mode = e1000_check_mng_mode_ich8lan; + /* link info */ + func->get_link_up_info = e1000_get_link_up_info_ich8lan; + /* multicast address update */ + func->update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* setting MTA */ + func->mta_set = e1000_mta_set_generic; + /* blink LED */ + func->blink_led = e1000_blink_led_generic; + /* setup LED */ + func->setup_led = e1000_setup_led_generic; + /* cleanup LED */ + func->cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + func->led_on = e1000_led_on_ich8lan; + func->led_off = e1000_led_off_ich8lan; + /* remove device */ + func->remove_device = e1000_remove_device_generic; + /* clear hardware counters */ + func->clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; + + hw->dev_spec_size = sizeof(struct e1000_dev_spec_ich8lan); + + /* Device-specific structure allocation */ + ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size); + if (ret_val) + goto out; + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); + + +out: + return ret_val; +} + +/** + * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific function pointers for PHY, MAC, and NVM. + **/ +void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_ich8lan"); + + hw->func.init_mac_params = e1000_init_mac_params_ich8lan; + hw->func.init_nvm_params = e1000_init_nvm_params_ich8lan; + hw->func.init_phy_params = e1000_init_phy_params_ich8lan; +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing NVM and PHY + * operations. This is a function pointer entry point only called by + * read/write routines for the PHY and NVM parts. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_swflag_ich8lan"); + + while (timeout) { + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + msec_delay_irq(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("FW or HW has locked the resource for too long.\n"); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing NVM and PHY operations. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + DEBUGFUNC("e1000_release_swflag_ich8lan"); + + extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); + + return; +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_ich8lan"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_reset_block_ich8lan"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS + : E1000_BLK_PHY_RESET; +} + +/** + * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ich8lan"); + + if (phy->type != e1000_phy_ife) { + ret_val = e1000_phy_force_speed_duplex_igp(hw); + goto out; + } + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, data); + if (ret_val) + goto out; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + goto out; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Link taking longer than expected.\n"); + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val; + u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); + + ret_val = e1000_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* + * Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { + /* Check if SW needs configure the PHY */ + if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) || + (hw->device_id == E1000_DEV_ID_ICH8_IGP_M)) + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + else + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + + data = E1000_READ_REG(hw, E1000_FEXTNVM); + if (!(data & sw_cfg_mask)) + goto out; + + /* Wait for basic configuration completes before proceeding*/ + do { + data = E1000_READ_REG(hw, E1000_STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + usec_delay(100); + } while ((!data) && --loop); + + /* + * If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) { + DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); + } + + /* Clear the Init Done bit for the next init event */ + data = E1000_READ_REG(hw, E1000_STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + E1000_WRITE_REG(hw, E1000_STATUS, data); + + /* + * Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + goto out; + + cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto out; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + /* + * Configure LCD from extended configuration + * region. + */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, + (word_addr + i * 2), + 1, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_nvm(hw, + (word_addr + i * 2 + 1), + 1, + ®_addr); + if (ret_val) + goto out; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr |= phy_page; + + ret_val = e1000_write_phy_reg(hw, + (u32)reg_addr, + reg_data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info + * @hw: pointer to the HW structure + * + * Wrapper for calling the get_phy_info routines for the appropriate phy type. + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + + DEBUGFUNC("e1000_get_phy_info_ich8lan"); + + switch (hw->phy.type) { + case e1000_phy_ife: + ret_val = e1000_get_phy_info_ife_ich8lan(hw); + break; + case e1000_phy_igp_3: + ret_val = e1000_get_phy_info_igp(hw); + break; + default: + break; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + * This function is only called by other family-specific + * routines. + **/ +static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife_ich8lan"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + goto out; + phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) + ? FALSE : TRUE; + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife_ich8lan(hw); + if (ret_val) + goto out; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE; + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + +out: + return ret_val; +} + +/** + * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reveral feature being enabled. + * This function is only called by other family-specific + * routines. + **/ +static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife_ich8lan"); + + /* + * Polarity is determined based on the reversal feature + * being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1000_read_phy_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, + bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); + + if (phy->type == e1000_phy_ife) + goto out; + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, + bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); + + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + /* + * Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + s32 ret_val = E1000_SUCCESS; + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return ret_val; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec; + u32 act_offset; + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u16 i, word; + + DEBUGFUNC("e1000_read_nvm_ich8lan"); + + dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) + goto out; + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + for (i = 0; i < words; i++) { + if ((dev_spec->shadow_ram) && + (dev_spec->shadow_ram[offset+i].modified)) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + e1000_release_nvm(hw); + +out: + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + s32 i = 0; + + DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); + + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (hsfsts.hsf_status.fldesvalid == 0) { + DEBUGOUT("Flash descriptor invalid. " + "SW Sequencing must be used."); + goto out; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + + /* + * Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after harware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* + * There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = E1000_SUCCESS; + } else { + /* + * Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1); + } + if (ret_val == E1000_SUCCESS) { + /* + * Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + E1000_WRITE_FLASH_REG16(hw, + ICH_FLASH_HSFSTS, + hsfsts.regval); + } else { + DEBUGOUT("Flash controller busy, cannot get access"); + } + } + +out: + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + u32 i = 0; + + DEBUGFUNC("e1000_flash_cycle_ich8lan"); + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + usec_delay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_flash_word_ich8lan"); + + if (!data) { + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Must convert offset into bytes. */ + offset <<= 1; + + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data); + +out: + return ret_val; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16* data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + DEBUGFUNC("e1000_read_flash_data_ich8lan"); + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + goto out; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* + * Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == E1000_SUCCESS) { + flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); + if (size == 1) { + *data = (u8)(flash_data & 0x000000FF); + } else if (size == 2) { + *data = (u16)(flash_data & 0x0000FFFF); + } + break; + } else { + /* + * If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + DEBUGOUT("Timeout error - flash cycle " + "did not complete."); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + +out: + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec; + s32 ret_val = E1000_SUCCESS; + u16 i; + + DEBUGFUNC("e1000_write_nvm_ich8lan"); + + dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = TRUE; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + e1000_release_nvm(hw); + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a succesful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); + + dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + /* + * We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) + goto out; + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + e1000_erase_flash_bank_ich8lan(hw, 1); + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + e1000_erase_flash_bank_ich8lan(hw, 0); + } + + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + /* + * Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + e1000_read_flash_word_ich8lan(hw, + i + old_bank_offset, + &data); + } + + /* + * If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usec_delay(100); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + usec_delay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* + * Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + DEBUGOUT("Flash commit failed.\n"); + e1000_release_nvm(hw); + goto out; + } + + /* + * Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + e1000_read_flash_word_ich8lan(hw, act_offset, &data); + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) { + e1000_release_nvm(hw); + goto out; + } + + /* + * And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) { + e1000_release_nvm(hw); + goto out; + } + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = FALSE; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + e1000_release_nvm(hw); + + /* + * Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + e1000_reload_nvm(hw); + msec_delay(10); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was + * not calculated, in which case we need to calculate the checksum and set + * bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); + + /* + * Read 0x19 and check bit 6. If this bit is 0, the checksum + * needs to be fixed. This bit is an indication that the NVM + * was prepared by OEM software and did not calculate the + * checksum...a likely scenario. + */ + ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + if (ret_val) + goto out; + + if ((data & 0x40) == 0) { + data |= 0x40; + ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (ret_val) + goto out; + ret_val = e1000_update_nvm_checksum(hw); + if (ret_val) + goto out; + } + + ret_val = e1000_validate_nvm_checksum_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + DEBUGFUNC("e1000_write_ich8_data"); + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + goto out; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); + + /* + * check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (ret_val == E1000_SUCCESS) { + break; + } else { + /* + * If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + DEBUGOUT("Timeout error - flash cycle " + "did not complete."); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + +out: + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + DEBUGFUNC("e1000_write_flash_byte_ich8lan"); + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (ret_val == E1000_SUCCESS) + goto out; + + for (program_retries = 0; program_retries < 100; program_retries++) { + DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); + usec_delay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (ret_val == E1000_SUCCESS) + break; + } + if (program_retries == 100) { + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val = E1000_SUCCESS; + s32 count = 0; + s32 j, iteration, sector_size; + + DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); + + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + /* + * Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K; + break; + case 2: + if (hw->mac.type == e1000_ich9lan) { + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; + } else { + ret_val = -E1000_ERR_NVM; + goto out; + } + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K; + break; + default: + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? (sector_size * iteration) : 0; + + for (j = 0; j < iteration ; j++) { + do { + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + goto out; + + /* + * Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + hsflctl.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + E1000_WRITE_FLASH_REG16(hw, + ICH_FLASH_HSFCTL, + hsflctl.regval); + + /* + * Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + E1000_WRITE_FLASH_REG(hw, + ICH_FLASH_FADDR, + flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_ERASE_COMMAND_TIMEOUT); + if (ret_val == E1000_SUCCESS) { + break; + } else { + /* + * Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* + * repeat for some time before + * giving up + */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) + goto out; + } + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + +out: + return ret_val; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_ich8lan"); + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + +out: + return ret_val; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + DEBUGFUNC("e1000_get_bus_info_ich8lan"); + + ret_val = e1000_get_bus_info_pcie_generic(hw); + + /* + * ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl, icr, kab; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_ich8lan"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + /* + * Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); + } + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + if (!e1000_check_reset_block(hw) && !hw->phy.reset_disable) { + /* + * PHY HW reset requires MAC CORE reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + DEBUGOUT("Issuing a global reset to ich8lan"); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); + msec_delay(20); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + icr = E1000_READ_REG(hw, E1000_ICR); + + kab = E1000_READ_REG(hw, E1000_KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + E1000_WRITE_REG(hw, E1000_KABGTXD, kab); + + return ret_val; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit discriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_init_hw_ich8lan"); + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = e1000_id_led_init_generic(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Setup the receive address. */ + e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); + + /* + * ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32)~(PCIE_NO_SNOOP_ALL); + e1000_set_pcie_no_snoop_generic(hw, snoop); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return ret_val; +} +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); + + if (hw->mac.disable_hw_init_bits) + goto out; + + /* Extended Device Control */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); + reg |= (1 << 22); + E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = E1000_READ_REG(hw, E1000_TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = E1000_READ_REG(hw, E1000_TARC(1)); + if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + E1000_WRITE_REG(hw, E1000_TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = E1000_READ_REG(hw, E1000_STATUS); + reg &= ~(1 << 31); + E1000_WRITE_REG(hw, E1000_STATUS, reg); + } + +out: + return; +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_link_ich8lan"); + + if (e1000_check_reset_block(hw)) + goto out; + + /* + * ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.type == e1000_fc_default) + hw->fc.type = e1000_fc_full; + + hw->fc.original_type = hw->fc.type; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type); + + /* Continue to configure the copper link. */ + ret_val = func->setup_physical_interface(hw); + if (ret_val) + goto out; + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + DEBUGFUNC("e1000_setup_copper_link_ich8lan"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* + * Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + goto out; + ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + goto out; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + goto out; + + if (hw->phy.type == e1000_phy_igp_3) { + ret_val = e1000_copper_link_setup_igp(hw); + if (ret_val) + goto out; + } + + if (hw->phy.type == e1000_phy_ife) { + ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + goto out; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + goto out; + } + ret_val = e1000_setup_copper_link_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retreive the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_ich8lan"); + + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); + if (ret_val) + goto out; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + +out: + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec; + u32 phy_ctrl; + s32 ret_val = E1000_SUCCESS; + u16 i, data; + bool link; + + DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); + + dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + if (!(dev_spec->kmrn_lock_loss_workaround_enabled)) + goto out; + + /* + * Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (!link) { + ret_val = E1000_SUCCESS; + goto out; + } + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + goto out; + /* and again to get new status */ + ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + goto out; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + msec_delay_irq(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); + + /* + * Call gig speed drop workaround on Giga disable before accessing + * any PHY registers + */ + e1000_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + ret_val = -E1000_ERR_PHY; + +out: + return ret_val; +} + +/** + * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumaran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - TRUE + * /disabled - FALSE). + **/ +void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec; + + DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); + + if (hw->mac.type != e1000_ich8lan) { + DEBUGOUT("Workaround applies to ICH8 only.\n"); + goto out; + } + + dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec; + + if (!dev_spec) { + DEBUGOUT("dev_spec pointer is set to NULL.\n"); + goto out; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; + +out: + return; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); + + if (hw->phy.type != e1000_phy_igp_3) + goto out; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = E1000_READ_REG(hw, E1000_PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); + + /* + * Call gig speed drop workaround on Giga disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1000_read_phy_reg(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1000_write_phy_reg(hw, + IGP3_VR_CTRL, + data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1000_read_phy_reg(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); + +out: + return; +} + +/** + * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Giga disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with IGP_3 Phy. + **/ +void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 reg_data; + + DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type != e1000_phy_igp_3)) + goto out; + + ret_val = e1000_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + goto out; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + goto out; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +out: + return; +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_cleanup_led_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + ret_val = e1000_write_phy_reg(hw, + IFE_PHY_SPECIAL_CONTROL_LED, + 0); + else + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + + return ret_val; +} + +/** + * e1000_led_on_ich8lan - Turn LED's on + * @hw: pointer to the HW structure + * + * Turn on the LED's. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_led_on_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + ret_val = e1000_write_phy_reg(hw, + IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + else + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + + return ret_val; +} + +/** + * e1000_led_off_ich8lan - Turn LED's off + * @hw: pointer to the HW structure + * + * Turn off the LED's. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_led_off_ich8lan"); + + if (hw->phy.type == e1000_phy_ife) + ret_val = e1000_write_phy_reg(hw, + IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + else + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + + return ret_val; +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + e1000_get_cfg_done_generic(hw); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000_phy_init_script_igp3(hw); + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); + + e1000_clear_hw_cntrs_base_generic(hw); + + temp = E1000_READ_REG(hw, E1000_ALGNERRC); + temp = E1000_READ_REG(hw, E1000_RXERRC); + temp = E1000_READ_REG(hw, E1000_TNCRS); + temp = E1000_READ_REG(hw, E1000_CEXTERR); + temp = E1000_READ_REG(hw, E1000_TSCTC); + temp = E1000_READ_REG(hw, E1000_TSCTFC); + + temp = E1000_READ_REG(hw, E1000_MGTPRC); + temp = E1000_READ_REG(hw, E1000_MGTPDC); + temp = E1000_READ_REG(hw, E1000_MGTPTC); + + temp = E1000_READ_REG(hw, E1000_IAC); + temp = E1000_READ_REG(hw, E1000_ICRXOC); +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h new file mode 100644 index 0000000..65e95c9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h @@ -0,0 +1,110 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_ICH8LAN_H_ +#define _E1000_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 + +#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define E1000_SHADOW_RAM_WORDS 2048 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 +#define ICH_FLASH_SECTOR_SIZE 4096 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +#define E1000_ICH_FWSM_DISSW 0x10000000 /* FW Disables SW Writes */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M */ + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ +#define IGP3_CAPABILITY PHY_REG(776, 19) /* Capability */ +#define IGP3_PM_CTRL PHY_REG(769, 20) /* Power Management Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 +#define IGP3_PM_CTRL_FORCE_PWR_DOWN 0x0020 + +/* + * Additional interrupts need to be handled for ICH family: + * DSW = The FW changed the status of the DISSW bit in FWSM + * PHYINT = The LAN connected device generates an interrupt + * EPRST = Manageability reset event + */ +#define IMS_ICH_ENABLE_MASK (\ + E1000_IMS_DSW | \ + E1000_IMS_PHYINT | \ + E1000_IMS_EPRST) + + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c new file mode 100644 index 0000000..c60b402 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c @@ -0,0 +1,2039 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" +#include "e1000_mac.h" + +/** + * e1000_remove_device_generic - Free device specific structure + * @hw: pointer to the HW structure + * + * If a device specific structure was allocated, this function will + * free it. + **/ +void e1000_remove_device_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_remove_device_generic"); + + /* Freeing the dev_spec member of e1000_hw structure */ + e1000_free_dev_spec_struct(hw); +} + +/** + * e1000_get_bus_info_pci_generic - Get PCI(x) bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function. + **/ +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + u16 pci_header_type; + + DEBUGFUNC("e1000_get_bus_info_pci_generic"); + + /* PCI or PCI-X? */ + bus->type = (status & E1000_STATUS_PCIX_MODE) + ? e1000_bus_type_pcix + : e1000_bus_type_pci; + + /* Bus speed */ + if (bus->type == e1000_bus_type_pci) { + bus->speed = (status & E1000_STATUS_PCI66) + ? e1000_bus_speed_66 + : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + bus->speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + bus->speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + bus->speed = e1000_bus_speed_133; + break; + default: + bus->speed = e1000_bus_speed_reserved; + break; + } + } + + /* Bus width */ + bus->width = (status & E1000_STATUS_BUS64) + ? e1000_bus_width_64 + : e1000_bus_width_32; + + /* Which PCI(-X) function? */ + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + else + bus->func = 0; + + return ret_val; +} + +/** + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 status; + u16 pcie_link_status, pci_header_type; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + bus->speed = e1000_bus_speed_2500; + + ret_val = e1000_read_pcie_cap_reg(hw, + PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) + bus->width = e1000_bus_width_unknown; + else + bus->width = (e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set_generic(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_alt_mac_addr_offset == 0xFFFF) { + ret_val = -(E1000_NOT_IMPLEMENTED); + goto out; + } + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += ETH_ADDR_LEN/sizeof(u16); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + ret_val = -(E1000_NOT_IMPLEMENTED); + goto out; + } + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; + + e1000_rar_set(hw, hw->mac.perm_addr, 0); + +out: + return ret_val; +} + +/** + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) { + if (!hw->mac.disable_av) + rar_high |= E1000_RAH_AV; + } + + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); +} + +/** + * e1000_mta_set_generic - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + DEBUGFUNC("e1000_mta_set_generic"); + /* + * The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + u32 hash_value; + u32 i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* + * Load the first set of multicast addresses into the exact + * filters (RAR). If there are not enough to fill the RAR + * array, clear the filters. + */ + for (i = rar_used_count; i < rar_count; i++) { + if (mc_addr_count) { + e1000_rar_set(hw, mc_addr_list, i); + mc_addr_count--; + mc_addr_list += ETH_ADDR_LEN; + } else { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(hw); + } + } + + /* Clear the old settings from the MTA */ + DEBUGOUT("Clearing MTA\n"); + for (i = 0; i < hw->mac.mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + E1000_WRITE_FLUSH(hw); + } + + /* Load any remaining multicast addresses into the hash table. */ + for (; mc_addr_count > 0; mc_addr_count--) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + DEBUGOUT1("Hash value = 0x%03X\n", hash_value); + e1000_mta_set(hw, hash_value); + mc_addr_list += ETH_ADDR_LEN; + } +} + +/** + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command regsiter with the current PCIx status + * regsiter. + **/ +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + volatile u32 temp; + + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + temp = E1000_READ_REG(hw, E1000_CRCERRS); + temp = E1000_READ_REG(hw, E1000_SYMERRS); + temp = E1000_READ_REG(hw, E1000_MPC); + temp = E1000_READ_REG(hw, E1000_SCC); + temp = E1000_READ_REG(hw, E1000_ECOL); + temp = E1000_READ_REG(hw, E1000_MCC); + temp = E1000_READ_REG(hw, E1000_LATECOL); + temp = E1000_READ_REG(hw, E1000_COLC); + temp = E1000_READ_REG(hw, E1000_DC); + temp = E1000_READ_REG(hw, E1000_SEC); + temp = E1000_READ_REG(hw, E1000_RLEC); + temp = E1000_READ_REG(hw, E1000_XONRXC); + temp = E1000_READ_REG(hw, E1000_XONTXC); + temp = E1000_READ_REG(hw, E1000_XOFFRXC); + temp = E1000_READ_REG(hw, E1000_XOFFTXC); + temp = E1000_READ_REG(hw, E1000_FCRUC); + temp = E1000_READ_REG(hw, E1000_GPRC); + temp = E1000_READ_REG(hw, E1000_BPRC); + temp = E1000_READ_REG(hw, E1000_MPRC); + temp = E1000_READ_REG(hw, E1000_GPTC); + temp = E1000_READ_REG(hw, E1000_GORCL); + temp = E1000_READ_REG(hw, E1000_GORCH); + temp = E1000_READ_REG(hw, E1000_GOTCL); + temp = E1000_READ_REG(hw, E1000_GOTCH); + temp = E1000_READ_REG(hw, E1000_RNBC); + temp = E1000_READ_REG(hw, E1000_RUC); + temp = E1000_READ_REG(hw, E1000_RFC); + temp = E1000_READ_REG(hw, E1000_ROC); + temp = E1000_READ_REG(hw, E1000_RJC); + temp = E1000_READ_REG(hw, E1000_TORL); + temp = E1000_READ_REG(hw, E1000_TORH); + temp = E1000_READ_REG(hw, E1000_TOTL); + temp = E1000_READ_REG(hw, E1000_TOTH); + temp = E1000_READ_REG(hw, E1000_TPR); + temp = E1000_READ_REG(hw, E1000_TPT); + temp = E1000_READ_REG(hw, E1000_MPTC); + temp = E1000_READ_REG(hw, E1000_BPTC); +} + +/** + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = FALSE; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000_config_collision_dist_generic(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + } + +out: + return ret_val; +} + +/** + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } + +out: + return ret_val; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + goto out; + } + DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, E1000_RXCW)) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = TRUE; + DEBUGOUT("SERDES: Link is up.\n"); + } + } else { + mac->serdes_has_link = FALSE; + DEBUGOUT("SERDES: Link is down.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + mac->serdes_has_link = (status & E1000_STATUS_LU) + ? TRUE + : FALSE; + } + +out: + return ret_val; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000_setup_link_generic(struct e1000_hw *hw) +{ + struct e1000_functions *func = &hw->func; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + goto out; + + /* + * If flow control is set to default, set flow control based on + * the EEPROM flow control settings. + */ + if (hw->fc.type == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + goto out; + } + + /* + * We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.original_type = hw->fc.type; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = func->setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000_config_collision_dist_generic(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + goto out; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW defineable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* + * If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + goto out; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + DEBUGOUT("Valid Link Found\n"); + } + +out: + return ret_val; +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.type) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric RX + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + +out: + return ret_val; +} + +/** + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.type & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return ret_val; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.type = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.type = e1000_fc_tx_pause; + else + hw->fc.type = e1000_fc_full; + +out: + return ret_val; +} + +/** + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.type" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.type = %u\n", hw->fc.type); + + switch (hw->fc.type) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + goto out; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg " + "has not completed.\n"); + goto out; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.original_type == e1000_fc_full) { + hw->fc.type = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\r\n"); + } else { + hw->fc.type = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = " + "RX PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.type = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.type = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n"); + } else { + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.type = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.type = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_get_speed_and_duplex_copper_generic - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = E1000_SUCCESS; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + +out: + return ret_val; +} + +/** + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000_id_led_init_generic(struct e1000_hw * hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->func.valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->func.setup_led != e1000_setup_led_generic) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + +out: + return ret_val; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_cleanup_led_generic"); + + if (hw->func.cleanup_led != e1000_cleanup_led_generic) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + +out: + return ret_val; +} + +/** + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +out: + return; +} + +/** + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (E1000_SUCCESS) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + goto out; + } + + if (!mac->ifs_params_forced) { + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + } + + mac->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, E1000_AIT, 0); +out: + return; +} + +/** + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + goto out; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = TRUE; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +out: + return; +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h new file mode 100644 index 0000000..2a7d39c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h @@ -0,0 +1,86 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +/* + * Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_set_default_fc_generic(struct e1000_hw *hw); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_config_collision_dist_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_remove_device_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c new file mode 100644 index 0000000..3bd82cf --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c @@ -0,0 +1,5987 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/netdevice.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> + + +// RTNET defines... +#ifdef NETIF_F_TSO +#undef NETIF_F_TSO +#endif + +#ifdef NETIF_F_TSO6 +#undef NETIF_F_TSO6 +#endif + +#ifdef NETIF_F_HW_VLAN_TX +#undef NETIF_F_HW_VLAN_TX +#endif + +#ifdef CONFIG_E1000_NAPI +#undef CONFIG_E1000_NAPI +#endif + +#ifdef MAX_SKB_FRAGS +#undef MAX_SKB_FRAGS +#endif + +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT +#endif + +#ifdef CONFIG_E1000_MQ +#undef CONFIG_E1000_MQ +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifdef CONFIG_PM +#undef CONFIG_PM +#endif + +#ifdef HAVE_PCI_ERS +#error "STOP it here" +#undef HAVE_PCI_ERS +#endif + +#ifdef USE_REBOOT_NOTIFIER +#undef USE_REBOOT_NOTIFIER +#endif + +#ifdef HAVE_TX_TIMEOUT +#undef HAVE_TX_TIMEOUT +#endif + + +#ifdef NETIF_F_TSO +#include <net/checksum.h> +#ifdef NETIF_F_TSO6 +#include <net/ip6_checksum.h> +#endif +#endif +#ifdef SIOCGMIIPHY +#include <linux/mii.h> +#endif +#ifdef SIOCETHTOOL +#include <linux/ethtool.h> +#endif +#ifdef NETIF_F_HW_VLAN_TX +#include <linux/if_vlan.h> +#endif +#ifdef CONFIG_E1000_MQ +#include <linux/cpu.h> +#include <linux/smp.h> +#endif + +#include "e1000.h" + +#ifdef HAVE_PCI_ERS +#error "STOP it here" +#endif + + + +char e1000_driver_name[MODULE_NAME_LEN] = "rt_e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; + +#ifdef CONFIG_E1000_NAPI +#define DRV_NAPI "-NAPI" +#else +#define DRV_NAPI +#endif + + +#define DRV_DEBUG + +#define DRV_HW_PERF + +/* + * Port to rtnet based on e1000 driver version 7.6.15.5 (22-Sep-2008 Mathias Koehrer) + * + * */ + +#define DRV_VERSION "7.6.15.5" DRV_NAPI DRV_DEBUG DRV_HW_PERF " ported to RTnet" +const char e1000_driver_version[] = DRV_VERSION; +static const char e1000_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; + +// RTNET wrappers +#define kmalloc(a,b) rtdm_malloc(a) +#define vmalloc(a) rtdm_malloc(a) +#define kfree(a) rtdm_free(a) +#define vfree(a) rtdm_free(a) +#define skb_reserve(a,b) rtskb_reserve(a,b) +#define net_device rtnet_device +#define sk_buff rtskb +#define netdev_priv(a) a->priv +// ---------------------- + + + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ + +#define PCI_ID_LIST_PCI \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82542), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82543GC_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82543GC_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544EI_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544EI_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544GC_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544GC_LOM), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EM), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545EM_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545EM_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541EI), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541ER_LOM), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EM_LOM), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP_LOM), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541EI_MOBILE), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547EI), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547EI_MOBILE), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_QUAD_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP_LP), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_SERDES), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547GI), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI_MOBILE), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541ER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_SERDES), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI_LF), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_PCIE), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_QUAD_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) + +#define PCI_ID_LIST_PCIE \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_M_AMT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_AMT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_C), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_M), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_SERDES), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573E), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573E_IAMT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_COPPER_DPT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_SERDES_DPT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573L), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_FIBER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES_DUAL), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES_QUAD), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_COPPER_SPT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_SERDES_SPT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_COPPER_LP), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571PT_QUAD_COPPER), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE_GT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE_G), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IGP_AMT), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IGP_C), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE_G), \ + INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE_GT) + + + + +static struct pci_device_id e1000_pci_tbl[] = { + PCI_ID_LIST_PCI, + PCI_ID_LIST_PCIE, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +static struct pci_device_id e1000_pcipure_tbl[] = { + PCI_ID_LIST_PCI, + /* required last entry */ + {0,} +}; + +static struct pci_device_id e1000_pcie_tbl[] = { + PCI_ID_LIST_PCIE, + /* required last entry */ + {0,} +}; + + + +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +#ifdef CONFIG_E1000_MQ +static void e1000_setup_queue_mapping(struct e1000_adapter *adapter); +#endif +static int e1000_sw_init(struct e1000_adapter *adapter); +static int e1000_open(struct net_device *netdev); +static int e1000_close(struct net_device *netdev); +static void e1000_configure(struct e1000_adapter *adapter); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_multi(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog_task(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static int e1000_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, + struct e1000_tx_ring *tx_ring); +static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +#ifdef CONFIG_E1000_MQ +static int e1000_subqueue_xmit_frame(struct sk_buff *skb, + struct net_device *netdev, int queue); +#endif +static void e1000_phy_read_status(struct e1000_adapter *adapter); +#if 0 +static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +#endif +static int e1000_intr(rtdm_irq_t *irq_handle); +static int e1000_intr_msi(rtdm_irq_t *irq_handle); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +#ifdef CONFIG_E1000_NAPI +static int e1000_poll(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +#else +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp); +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp); +#endif +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +#if 0 +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +#ifdef SIOCGMIIPHY +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev); +#endif +#endif +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +#ifdef NETIF_F_HW_VLAN_TX +static void e1000_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp); +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); +#endif + +// static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); +#ifdef CONFIG_PM +static int e1000_resume(struct pci_dev *pdev); +#endif +#ifndef USE_REBOOT_NOTIFIER +// static void e1000_shutdown(struct pci_dev *pdev); +#else +static int e1000_notify_reboot(struct notifier_block *, unsigned long event, + void *ptr); +static struct notifier_block e1000_notifier_reboot = { + .notifier_call = e1000_notify_reboot, + .next = NULL, + .priority = 0 +}; +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + + +#ifdef HAVE_PCI_ERS +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; +#endif + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, +#ifdef HAVE_PCI_ERS + .err_handler = &e1000_err_handler +#endif +}; + +MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define MAX_UNITS 8 +static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (eg. 1,0,1)"); + + +static int local_debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; +module_param(local_debug, int, 0); +MODULE_PARM_DESC(local_debug, "Debug level (0=none,...,16=all)"); + +/* The parameter 'pciif' might be used to use this driver for + * PCI or PCIe only NICs. + * This allows to reflect the situation that newer Linux kernels + * have two different (non real time) drivers for the e1000: + * e1000 for PCI only + * e1000e for PCIe only + * + * Using the 'pciif' parameter allows to load the driver + * modprobe rt_e1000 pciif=pci + * to use it as PCI only + * and a + * modprobe rt_e1000 -o rt_e1000e pciif=pcie + * allows to load a second instance of this driver named 'rt_e1000e' + * + * If the 'pciif' paramter is not specified, all (PCI and PCIe) e1000 + * NICs will be used. + * */ +static char *pciif = "all"; +module_param(pciif, charp, 0); +MODULE_PARM_DESC(pciif, "PCI Interface: 'all' (default), 'pci', 'pcie'"); + + +//#define register_netdev(a) rt_register_rtnetdev(a) +//#define unregister_netdev(a) rt_unregister_rtnetdev(a) +//#define free_netdev(a) rtdev_free(a) +//#define netif_stop_queue(a) rtnetif_stop_queue(a) + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + strcpy(e1000_driver_name, THIS_MODULE->name); + printk(KERN_INFO "%s - %s version %s (pciif: %s)\n", + e1000_driver_string, e1000_driver_name, e1000_driver_version, pciif); + + printk(KERN_INFO "%s\n", e1000_copyright); + + + if (0 == strcmp(pciif, "pcie")) + { + // PCIe only + e1000_driver.id_table = e1000_pcie_tbl; + } + else if (0 == strcmp(pciif, "pci")) + { + // PCI only + e1000_driver.id_table = e1000_pcipure_tbl; + } + + ret = pci_register_driver(&e1000_driver); +#ifdef USE_REBOOT_NOTIFIER + if (ret >= 0) { + register_reboot_notifier(&e1000_notifier_reboot); + } +#endif + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + printk(KERN_INFO "e1000: copybreak disabled\n"); + else + printk(KERN_INFO "e1000: copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ +#ifdef USE_REBOOT_NOTIFIER + unregister_reboot_notifier(&e1000_notifier_reboot); +#endif + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (adapter->flags & E1000_FLAG_HAS_MSI) { + err = pci_enable_msi(adapter->pdev); + if (!err) + adapter->flags |= E1000_FLAG_MSI_ENABLED; + } + rt_stack_connect(netdev, &STACK_manager); + if (adapter->flags & E1000_FLAG_MSI_ENABLED) { + err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq, e1000_intr_msi, + 0, netdev->name, netdev); + if (!err) { + return err; + } else { + adapter->flags &= ~E1000_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } + } + err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq, + e1000_intr, RTDM_IRQTYPE_SHARED, netdev->name, + netdev); + if (err) + DPRINTK(PROBE, ERR, "Unable to allocate interrupt Error: %d\n", + err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + // struct net_device *netdev = adapter->netdev; + + rtdm_irq_free(&adapter->irq_handle); + + if (adapter->flags & E1000_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~E1000_FLAG_MSI_ENABLED; + } +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ + +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(&adapter->hw); + } +} +#ifdef NETIF_F_HW_VLAN_TX + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + if (adapter->vlgrp) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) { + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !vlan_group_get_device(adapter->vlgrp, old_vid)) + e1000_vlan_rx_kill_vid(netdev, old_vid); + } else { + adapter->mng_vlan_id = vid; + } + } +} +#endif + +/** + * e1000_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +static void e1000_release_hw_control(struct e1000_adapter *adapter) +{ + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + switch (adapter->hw.mac.type) { + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); + E1000_WRITE_REG(&adapter->hw, E1000_SWSM, + swsm & ~E1000_SWSM_DRV_LOAD); + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + case e1000_ich8lan: + case e1000_ich9lan: + ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; + default: + break; + } +} + +/** + * e1000_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + * + **/ +static void e1000_get_hw_control(struct e1000_adapter *adapter) +{ + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + switch (adapter->hw.mac.type) { + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); + E1000_WRITE_REG(&adapter->hw, E1000_SWSM, + swsm | E1000_SWSM_DRV_LOAD); + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + case e1000_ich8lan: + case e1000_ich9lan: + ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; + default: + break; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_multi(netdev); + +#ifdef NETIF_F_HW_VLAN_TX + e1000_restore_vlan(adapter); +#endif + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } + +#ifdef CONFIG_E1000_MQ + e1000_setup_queue_mapping(adapter); +#endif + + // adapter->tx_queue_len = netdev->tx_queue_len; +} + +static void e1000_napi_enable_all(struct e1000_adapter *adapter) +{ +#ifdef CONFIG_E1000_NAPI + int i; + for (i = 0; i < adapter->num_rx_queues; i++) + napi_enable(&adapter->rx_ring[i].napi); +#endif +} + +static void e1000_napi_disable_all(struct e1000_adapter *adapter) +{ +#ifdef CONFIG_E1000_NAPI + int i; + for (i = 0; i < adapter->num_rx_queues; i++) + napi_disable(&adapter->rx_ring[i].napi); +#endif +} + +int e1000_up(struct e1000_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + e1000_napi_enable_all(adapter); + + e1000_irq_enable(adapter); + + /* fire a link change interrupt to start the watchdog */ + // E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); + return 0; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__E1000_DOWN, &adapter->state); + + cancel_work_sync(&adapter->reset_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 tctl, rctl; + + e1000_down_and_stop(adapter); + + /* disable receives in the hardware */ + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + +#ifdef NETIF_F_LLTX + rtnetif_stop_queue(netdev); +#else + rtnetif_tx_disable(netdev); +#endif + + /* disable transmits in the hardware */ + tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(&adapter->hw); + msleep(10); + + e1000_napi_disable_all(adapter); + + e1000_irq_disable(adapter); + + // netdev->tx_queue_len = adapter->tx_queue_len; + rtnetif_carrier_off(netdev); + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = FALSE; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (mac->type) { + case e1000_82542: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = TRUE; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = TRUE; + pba = E1000_PBA_30K; + break; + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + pba = E1000_PBA_38K; + break; + case e1000_82573: + pba = E1000_PBA_20K; + break; + case e1000_ich8lan: + pba = E1000_PBA_8K; + break; + case e1000_ich9lan: +#define E1000_PBA_10K 0x000A + pba = E1000_PBA_10K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust == TRUE) { + if (adapter->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (mac->type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (adapter->max_frame_size > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) { + /* adjust PBA for jumbo frames */ + E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. */ + pba = E1000_READ_REG(&adapter->hw, E1000_PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETHERNET_FCS_SIZE) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (mac->type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on rx space, rx wins and must trump tx + * adjustment or use Early Receive if available */ + if (pba < min_rx_space) { + switch (mac->type) { + case e1000_82573: + case e1000_ich9lan: + /* ERT enabled in e1000_configure_rx */ + break; + default: + pba = min_rx_space; + break; + } + } + } + } + + E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); + + /* flow control settings */ + /* The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame */ + hwm = min(((pba << 10) * 9 / 10), + ((mac->type == e1000_82573 || mac->type == e1000_ich9lan) ? + (u16)((pba << 10) - (E1000_ERT_2048 << 3)) : + ((pba << 10) - adapter->max_frame_size))); + + fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + + if (mac->type == e1000_80003es2lan) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = 1; + fc->type = fc->original_type; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(&adapter->hw); + + /* For 82573 and ICHx if AMT is enabled, let the firmware know + * that the network interface is in control */ + if (((adapter->hw.mac.type == e1000_82573) || + (adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && + e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + if (mac->type >= e1000_82544) + E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); + + if (e1000_init_hw(&adapter->hw)) + DPRINTK(PROBE, ERR, "Hardware Error\n"); +#ifdef NETIF_F_HW_VLAN_TX + e1000_update_mng_vlan(adapter); +#endif + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (mac->type >= e1000_82544 && + mac->type <= e1000_82547_rev_2 && + mac->autoneg == 1 && + adapter->hw.phy.autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload */ + ctrl &= ~E1000_CTRL_SWDPIN3; + E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); + } + +#if defined(CONFIG_PPC64) || defined(CONFIG_PPC) +#define E1000_GCR_DISABLE_TIMEOUT_MECHANISM 0x80000000 + if (adapter->hw.mac.type == e1000_82571) { + /* work around pSeries hardware by disabling timeouts */ + u32 gcr = E1000_READ_REG(&adapter->hw, E1000_GCR); + gcr |= E1000_GCR_DISABLE_TIMEOUT_MECHANISM; + E1000_WRITE_REG(&adapter->hw, E1000_GCR, gcr); + } +#endif + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(&adapter->hw); + e1000_get_phy_info(&adapter->hw); + + if (!(adapter->flags & E1000_FLAG_SMART_POWER_DOWN) && + (mac->type == e1000_82571 || mac->type == e1000_82572)) { + u16 phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed */ + e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, + &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + } + + e1000_release_manageability(adapter); +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + + static int cards_found = 0; + static int global_quad_port_a = 0; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + if (cards[cards_found++] == 0) + { + return -ENODEV; + } + + if ((err = pci_enable_device(pdev))) + return err; + + if (!(err = dma_set_mask(&pdev->dev, DMA_64BIT_MASK)) && + !(err = dma_set_coherent_mask(&pdev->dev, DMA_64BIT_MASK))) { + pci_using_dac = 1; + } else { + if ((err = dma_set_mask(&pdev->dev, DMA_32BIT_MASK)) && + (err = dma_set_coherent_mask(&pdev->dev, DMA_32BIT_MASK))) { + E1000_ERR("No usable DMA configuration, aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + if ((err = pci_request_regions(pdev, e1000_driver_name))) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; +#ifdef CONFIG_E1000_MQ + netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter) + + (sizeof(struct net_device_subqueue) * + E1000_MAX_TX_QUEUES), 16); +#else + netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter), + 2 * E1000_DEFAULT_RXD + E1000_DEFAULT_TXD); +#endif + if (!netdev) + goto err_alloc_etherdev; + + memset(netdev->priv, 0, sizeof(struct e1000_adapter)); + rt_rtdev_connect(netdev, &RTDEV_manager); + + // SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->vers = RTDEV_VERS_2_0; + + pci_set_drvdata(pdev, netdev); + adapter = netdev->priv; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = (1 << local_debug) - 1; + + err = -EIO; + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0), + pci_resource_len(pdev, BAR_0)); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + adapter->hw.io_base = pci_resource_start(pdev, i); + break; + } + } + + netdev->open = &e1000_open; + netdev->stop = &e1000_close; + netdev->hard_start_xmit = &e1000_xmit_frame; +#ifdef CONFIG_E1000_MQ + netdev->hard_start_subqueue_xmit = &e1000_subqueue_xmit_frame; +#endif +#ifdef HAVE_TX_TIMEOUT + netdev->tx_timeout = &e1000_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; +#endif +#ifdef NETIF_F_HW_VLAN_TX + netdev->vlan_rx_register = e1000_vlan_rx_register; + netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; + netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = e1000_netpoll; +#endif + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + if ((err = e1000_sw_init(adapter))) + goto err_sw_init; + + err = -EIO; + /* Flash BAR mapping must happen after e1000_sw_init + * because it depends on mac.type */ + if (((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + adapter->hw.flash_address = ioremap(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + if ((err = e1000_init_mac_params(&adapter->hw))) + goto err_hw_init; + + if ((err = e1000_init_nvm_params(&adapter->hw))) + goto err_hw_init; + + if ((err = e1000_init_phy_params(&adapter->hw))) + goto err_hw_init; + + e1000_get_bus_info(&adapter->hw); + + e1000_init_script_state_82541(&adapter->hw, TRUE); + e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); + + adapter->hw.phy.autoneg_wait_to_complete = FALSE; + adapter->hw.mac.adaptive_ifs = FALSE; + + /* Copper options */ + + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = FALSE; + adapter->hw.phy.ms_type = E1000_MASTER_SLAVE; + } + + if (e1000_check_reset_block(&adapter->hw)) + DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); + +#ifdef MAX_SKB_FRAGS + if (adapter->hw.mac.type >= e1000_82543) { +#ifdef NETIF_F_HW_VLAN_TX + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + if ((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) + netdev->features &= ~NETIF_F_HW_VLAN_FILTER; +#else + netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM; +#endif + } + +#ifdef NETIF_F_TSO + if ((adapter->hw.mac.type >= e1000_82544) && + (adapter->hw.mac.type != e1000_82547)) { + adapter->flags |= E1000_FLAG_HAS_TSO; + netdev->features |= NETIF_F_TSO; + } + +#ifdef NETIF_F_TSO6 + if (adapter->hw.mac.type > e1000_82547_rev_2) { + adapter->flags |= E1000_FLAG_HAS_TSO6; + netdev->features |= NETIF_F_TSO6; + } +#endif +#endif + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + +#endif +#ifdef NETIF_F_LLTX + netdev->features |= NETIF_F_LLTX; +#endif + + /* Hardware features, flags and workarounds */ + if (adapter->hw.mac.type >= e1000_82571) { + adapter->flags |= E1000_FLAG_INT_ASSERT_AUTO_MASK; + adapter->flags |= E1000_FLAG_HAS_MSI; + adapter->flags |= E1000_FLAG_HAS_MANC2H; + } + + if (adapter->hw.mac.type >= e1000_82540) { + adapter->flags |= E1000_FLAG_HAS_SMBUS; + adapter->flags |= E1000_FLAG_HAS_INTR_MODERATION; + } + + if (adapter->hw.mac.type == e1000_82543) + adapter->flags |= E1000_FLAG_BAD_TX_CARRIER_STATS_FD; + + /* In rare occasions, ESB2 systems would end up started without + * the RX unit being turned on. */ + if (adapter->hw.mac.type == e1000_80003es2lan) + adapter->flags |= E1000_FLAG_RX_NEEDS_RESTART; + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); + + /* before reading the NVM, reset the controller to + * put the device in a known good starting state */ + + e1000_reset_hw(&adapter->hw); + + /* make sure we don't intercept ARP packets until we're up */ + e1000_release_manageability(adapter); + + /* make sure the NVM is good */ + + if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { + DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + /* copy the MAC address out of the NVM */ + + if (e1000_read_mac_addr(&adapter->hw)) + DPRINTK(PROBE, ERR, "NVM Read Error\n"); + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); +#ifdef ETHTOOL_GPERMADDR + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { +#else + if (!is_valid_ether_addr(netdev->dev_addr)) { +#endif + DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (adapter->hw.mac.type) { + case e1000_82542: + case e1000_82543: + break; + case e1000_82544: + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_ich8lan: + case e1000_ich9lan: + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); + eeprom_apme_mask = E1000_WUC_APME; + break; + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82571: + case e1000_80003es2lan: + if (adapter->hw.bus.func == 1) { + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + fallthrough; + default: + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82571EB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting */ + if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & + E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->flags |= E1000_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + + /* print bus type/speed/width info */ + { + struct e1000_hw *hw = &adapter->hw; + DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", + ((hw->bus.type == e1000_bus_type_pcix) ? "-X" : + (hw->bus.type == e1000_bus_type_pci_express ? " Express":"")), + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_133) ? "133MHz" : + (hw->bus.speed == e1000_bus_speed_120) ? "120MHz" : + (hw->bus.speed == e1000_bus_speed_100) ? "100MHz" : + (hw->bus.speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), + ((hw->bus.width == e1000_bus_width_64) ? "64-bit" : + (hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : + "32-bit")); + } + + for (i = 0; i < 6; i++) + printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + /* If the controller is 82573 or ICH and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (((adapter->hw.mac.type != e1000_82573) && + (adapter->hw.mac.type != e1000_ich8lan) && + (adapter->hw.mac.type != e1000_ich9lan)) || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* tell the stack to leave us alone until e1000_open() is called */ + rtnetif_carrier_off(netdev); + rtnetif_stop_queue(netdev); + + strcpy(netdev->name, "rteth%d"); + err = rt_register_rtnetdev(netdev); + if (err) + goto err_register; + + DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_hw_init: + e1000_release_hw_control(adapter); +err_eeprom: + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + + e1000_remove_device(&adapter->hw); +err_flashmap: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + iounmap(adapter->hw.hw_addr); +err_ioremap: + rtdev_free(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_down_and_stop(adapter); + + e1000_release_manageability(adapter); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + rt_unregister_rtnetdev(netdev); + + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + e1000_remove_device(&adapter->hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_regions(pdev); + + rtdev_free(netdev); + + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; +#ifdef CONFIG_E1000_NAPI + int i; +#endif + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETHERNET_FCS_SIZE; + adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; + + /* Initialize the hardware-specific values */ + if (e1000_setup_init_funcs(hw, FALSE)) { + DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n"); + return -EIO; + } + +#ifdef CONFIG_E1000_MQ + /* Number of supported queues. + * TODO: It's assumed num_rx_queues >= num_tx_queues, since multi-rx + * queues are much more interesting. Is it worth coding for the + * possibility (however improbable) of num_tx_queues > num_rx_queues? + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_80003es2lan: + adapter->num_tx_queues = 2; + adapter->num_rx_queues = 2; + break; + case e1000_ich8lan: + case e1000_ich9lan: + if ((adapter->hw.device_id == E1000_DEV_ID_ICH8_IGP_AMT) || + (adapter->hw.device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) || + (adapter->hw.device_id == E1000_DEV_ID_ICH9_IGP_AMT)) { + adapter->num_tx_queues = 2; + adapter->num_rx_queues = 2; + break; + } + fallthrough; /* remaining ICH SKUs do not support MQ */ + default: + /* All hardware before 82571 only have 1 queue each for Rx/Tx. + * However, the 82571 family does not have MSI-X, so multi- + * queue isn't enabled. + * It'd be wise not to mess with this default case. :) */ + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + netdev->egress_subqueue_count = 0; + break; + } + adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); + adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); + + if ((adapter->num_tx_queues > 1) || (adapter->num_rx_queues > 1)) { + netdev->egress_subqueue = (struct net_device_subqueue *) + ((void *)adapter + + sizeof(struct e1000_adapter)); + netdev->egress_subqueue_count = adapter->num_tx_queues; + DPRINTK(DRV, INFO, "Multiqueue Enabled: RX queues = %u, " + "TX queues = %u\n", adapter->num_rx_queues, + adapter->num_tx_queues); + } +#else + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; +#endif + + if (e1000_alloc_queues(adapter)) { + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + +#ifdef CONFIG_E1000_NAPI + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *rx_ring = &adapter->rx_ring[i]; + netif_napi_add(adapter->netdev, &rx_ring->napi, e1000_poll, 64); + } + rtdm_lock_init(&adapter->tx_queue_lock); +#ifdef CONFIG_E1000_MQ + for (i = 0; i < adapter->num_tx_queues; i++) + rtdm_lock_init(&adapter->tx_ring[i].tx_queue_lock); +#endif +#endif + + /* Explicitly disable IRQ since the NIC can be in any state. */ + atomic_set(&adapter->irq_sem, 0); + e1000_irq_disable(adapter); + + rtdm_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + +#ifdef CONFIG_E1000_MQ + adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); +#endif + + return E1000_SUCCESS; +} + +#ifdef CONFIG_E1000_MQ +static void e1000_setup_queue_mapping(struct e1000_adapter *adapter) +{ + int i, cpu; + + lock_cpu_hotplug(); + i = 0; + for_each_online_cpu(cpu) { + *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = + &adapter->tx_ring[i % adapter->num_tx_queues]; + i++; + } + unlock_cpu_hotplug(); +} +#endif + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + u32 icr = E1000_READ_REG(&adapter->hw, E1000_ICR); + DPRINTK(HW,INFO, "icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags |= E1000_FLAG_HAS_MSI; + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + E1000_READ_REG(&adapter->hw, E1000_ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + + err = pci_enable_msi(adapter->pdev); + err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + /* our temporary test variable */ + adapter->flags &= ~E1000_FLAG_HAS_MSI; + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXSEQ); + E1000_WRITE_FLUSH(&adapter->hw); + msleep(50); + + e1000_irq_disable(adapter); + + rmb(); + if (!(adapter->flags & E1000_FLAG_HAS_MSI)) { + adapter->flags |= E1000_FLAG_HAS_MSI; + err = -EIO; + DPRINTK(HW, INFO, "MSI interrupt test failed!\n"); + } + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + + if (err == -EIO) + goto msi_test_failed; + + /* okay so the test worked, restore settings */ + DPRINTK(HW, INFO, "MSI interrupt test succeeded!\n"); +msi_test_failed: + /* restore the original vector, even if it failed */ + e1000_request_irq(adapter); + return err; +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds and INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & E1000_FLAG_MSI_ENABLED) || + !(adapter->flags & E1000_FLAG_HAS_MSI)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* restore previous setting of command word */ + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + + /* success ! */ + if (!err) + return 0; + + /* EIO means MSI test failed */ + if (err != -EIO) + return err; + + /* back to INTx mode */ + DPRINTK(PROBE, WARNING, "MSI interrupt test failed, using legacy " + "interrupt.\n"); + + e1000_free_irq(adapter); + adapter->flags &= ~E1000_FLAG_HAS_MSI; + + err = e1000_request_irq(adapter); + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int err; + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + e1000_power_up_phy(&adapter->hw); + e1000_setup_link(&adapter->hw); + } + +#ifdef NETIF_F_HW_VLAN_TX + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) { + e1000_update_mng_vlan(adapter); + } +#endif + + /* For 82573 and ICHx if AMT is enabled, let the firmware know + * that the network interface is now open */ + if (((adapter->hw.mac.type == e1000_82573) || + (adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && + e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. */ + e1000_configure(adapter); + + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000 MSI messages, which means we need to test our MSI + * interrupt now */ + err = e1000_test_msi(adapter); + if (err) { + DPRINTK(PROBE, ERR, "Interrupt allocation failed\n"); + goto err_req_irq; + } + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + e1000_napi_enable_all(adapter); + + schedule_delayed_work(&adapter->watchdog_task, 1); + e1000_irq_enable(adapter); + + /* fire a link status change interrupt to start the watchdog */ + E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_release_hw_control(adapter); + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is TRUE * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ + if (!adapter->wol && adapter->hw.mac.type >= e1000_82540 && + adapter->hw.phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(&adapter->hw); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000_down(adapter); + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is TRUE * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ + if (!adapter->wol && adapter->hw.mac.type >= e1000_82540 && + adapter->hw.phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(&adapter->hw); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + +#ifdef NETIF_F_HW_VLAN_TX + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + !(adapter->vlgrp && + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + } +#endif + + /* For 82573 and ICHx if AMT is enabled, let the firmware know + * that the network interface is now closed */ + if (((adapter->hw.mac.type == e1000_82573) || + (adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && + e1000_check_mng_mode(&adapter->hw)) + e1000_release_hw_control(adapter); + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, + void *start, unsigned long len) +{ + unsigned long begin = (unsigned long) start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 */ + if (adapter->hw.mac.type == e1000_82545 || + adapter->hw.mac.type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; + } + + return TRUE; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vmalloc(size); + if (!tx_ring->buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + memset(tx_ring->buffer_info, 0, size); + + /* round up to nearest 4K */ + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_ATOMIC); + if (!tx_ring->desc) { +setup_tx_desc_die: + vfree(tx_ring->buffer_info); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, tx_ring->desc, tx_ring->size)) { + void *olddesc = tx_ring->desc; + dma_addr_t olddma = tx_ring->dma; + DPRINTK(TX_ERR, ERR, "tx_ring align check failed: %u bytes " + "at %p\n", tx_ring->size, tx_ring->desc); + /* Try again, without freeing the previous */ + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_ATOMIC); + /* Failed allocation, critical failure */ + if (!tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, tx_ring->desc, + tx_ring->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, olddesc, + olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(tx_ring->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, tx_ring->size, olddesc, + olddma); + } + } + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + rtdm_lock_init(&tx_ring->tx_lock); + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * @adapter: board private structure + * + * this allocates tx resources for all queues, return 0 on success, negative + * on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg, tarc; + u32 ipgr1, ipgr2; + int i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) { + tdba = adapter->tx_ring[i].dma; + tdlen = adapter->tx_ring[i].count * sizeof(struct e1000_tx_desc); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (tdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), (tdba >> 32)); + E1000_WRITE_REG(hw, E1000_TDLEN(i), tdlen); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + adapter->tx_ring[i].tdh = E1000_REGISTER(hw, E1000_TDH(i)); + adapter->tx_ring[i].tdt = E1000_REGISTER(hw, E1000_TDT(i)); + } + + + /* Set the default values for the Tx Inter Packet Gap timer */ + if (adapter->hw.mac.type <= e1000_82547_rev_2 && + (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac.type) { + case e1000_82542: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + case e1000_80003es2lan: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + E1000_WRITE_REG(hw, E1000_TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + E1000_WRITE_REG(hw, E1000_TIDV, adapter->tx_int_delay); + if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) + E1000_WRITE_REG(hw, E1000_TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572) { + tarc = E1000_READ_REG(hw, E1000_TARC(0)); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + E1000_WRITE_REG(hw, E1000_TARC(0), tarc); + } else if (hw->mac.type == e1000_80003es2lan) { + tarc = E1000_READ_REG(hw, E1000_TARC(0)); + tarc |= 1; + E1000_WRITE_REG(hw, E1000_TARC(0), tarc); + tarc = E1000_READ_REG(hw, E1000_TARC(1)); + tarc |= 1; + E1000_WRITE_REG(hw, E1000_TARC(1), tarc); + } + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac.type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. */ + if (hw->mac.type == e1000_82544 && + hw->bus.type == e1000_bus_type_pcix) + adapter->pcix_82544 = 1; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + rx_ring->buffer_info = vmalloc(size); + if (!rx_ring->buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + memset(rx_ring->buffer_info, 0, size); + + rx_ring->ps_page = kcalloc(rx_ring->count, sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!rx_ring->ps_page) { + vfree(rx_ring->buffer_info); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + + rx_ring->ps_page_dma = kcalloc(rx_ring->count, + sizeof(struct e1000_ps_page_dma), + GFP_KERNEL); + if (!rx_ring->ps_page_dma) { + vfree(rx_ring->buffer_info); + kfree(rx_ring->ps_page); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + + if (adapter->hw.mac.type <= e1000_82547_rev_2) + desc_len = sizeof(struct e1000_rx_desc); + else + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_ATOMIC); + + if (!rx_ring->desc) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); +setup_rx_desc_die: + vfree(rx_ring->buffer_info); + kfree(rx_ring->ps_page); + kfree(rx_ring->ps_page_dma); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rx_ring->desc, rx_ring->size)) { + void *olddesc = rx_ring->desc; + dma_addr_t olddma = rx_ring->dma; + DPRINTK(RX_ERR, ERR, "rx_ring align check failed: %u bytes " + "at %p\n", rx_ring->size, rx_ring->desc); + /* Try again, without freeing the previous */ + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_ATOMIC); + /* Failed allocation, critical failure */ + if (!rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, olddesc, + olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate memory " + "for the receive descriptor ring\n"); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rx_ring->desc, + rx_ring->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, olddesc, + olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate aligned memory " + "for the receive descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rx_ring->size, olddesc, + olddma); + } + } + memset(rx_ring->desc, 0, rx_ring->size); + + /* set up ring defaults */ + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + rx_ring->adapter = adapter; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * @adapter: board private structure + * + * this allocates rx resources for all queues, return 0 on success, negative + * on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + u32 rctl, rfctl; + u32 psrctl = 0; +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT + u32 pages = 0; +#endif + + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* disable the stripping of CRC because it breaks + * BMC firmware connected over SMBUS + if (adapter->hw.mac.type > e1000_82543) + rctl |= E1000_RCTL_SECRC; + */ + + if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_256: + rctl |= E1000_RCTL_SZ_256; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_512: + rctl |= E1000_RCTL_SZ_512; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_1024: + rctl |= E1000_RCTL_SZ_1024; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT + /* 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + */ + /* allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames */ + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((adapter->hw.mac.type >= e1000_82571) && (pages <= 3) && + PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; +#endif + + if (adapter->rx_ps_pages) { + /* Configure extra packet-split registers */ + rfctl = E1000_READ_REG(&adapter->hw, E1000_RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* disable packet split support for IPv6 extension headers, + * because some malformed IPv6 headers can hang the RX */ + rfctl |= (E1000_RFCTL_IPV6_EX_DIS | + E1000_RFCTL_NEW_IPV6_EXT_DIS); + + E1000_WRITE_REG(&adapter->hw, E1000_RFCTL, rfctl); + + /* disable the stripping of CRC because it breaks + * BMC firmware connected over SMBUS */ + rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; + + psrctl |= adapter->rx_ps_bsize0 >> + E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + fallthrough; + case 2: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + fallthrough; + case 1: + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + E1000_WRITE_REG(&adapter->hw, E1000_PSRCTL, psrctl); + } + + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); + adapter->flags &= ~E1000_FLAG_RX_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum, ctrl_ext; + int i; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = adapter->rx_ring[0].count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; +#ifdef CONFIG_E1000_NAPI + } else if (adapter->netdev->mtu > MAXIMUM_ETHERNET_VLAN_SIZE) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; +#endif + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + mdelay(10); + + /* set the Receive Delay Timer Register */ + E1000_WRITE_REG(hw, E1000_RDTR, adapter->rx_int_delay); + + if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) { + E1000_WRITE_REG(hw, E1000_RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + E1000_WRITE_REG(hw, E1000_ITR, + 1000000000 / (adapter->itr * 256)); + } + + if (hw->mac.type >= e1000_82571) { + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Reset delay timers after every interrupt */ + ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; +#ifdef CONFIG_E1000_NAPI + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + E1000_WRITE_REG(hw, E1000_IAM, 0xffffffff); +#endif + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rdba = adapter->rx_ring[i].dma; + E1000_WRITE_REG(hw, E1000_RDBAL(i), (rdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), (rdba >> 32)); + E1000_WRITE_REG(hw, E1000_RDLEN(i), rdlen); + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), 0); + adapter->rx_ring[i].rdh = E1000_REGISTER(hw, E1000_RDH(i)); + adapter->rx_ring[i].rdt = E1000_REGISTER(hw, E1000_RDT(i)); + } + +#ifdef CONFIG_E1000_MQ + if (adapter->num_rx_queues > 1) { + u32 random[10]; + u32 reta, mrqc; + int i; + + get_random_bytes(&random[0], 40); + + switch (adapter->num_rx_queues) { + default: + reta = 0x00800080; + mrqc = E1000_MRQC_ENABLE_RSS_2Q; + break; + } + + /* Fill out redirection table */ + for (i = 0; i < 32; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_RETA, i, reta); + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK, i, random[i]); + + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP); + + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* Multiqueue and packet checksumming are mutually exclusive. */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + } else if (hw->mac.type >= e1000_82543) { +#else + if (hw->mac.type >= e1000_82543) { +#endif /* CONFIG_E1000_MQ */ + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + if (adapter->rx_csum == TRUE) { + rxcsum |= E1000_RXCSUM_TUOFL; + + /* Enable 82571 IPv4 payload checksum for UDP fragments + * Must be used in conjunction with packet-split. */ + if ((hw->mac.type >= e1000_82571) && + (adapter->rx_ps_pages)) { + rxcsum |= E1000_RXCSUM_IPPCSE; + } + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* don't need to clear IPPCSE as it defaults to 0 */ + } + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + } + + /* Enable early receives on supported devices, only takes effect when + * packet size is equal or larger than the specified value (in 8 byte + * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ + if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + E1000_WRITE_REG(hw, E1000_ERT, E1000_ERT_2048); + + /* Enable Receives */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + dma_unmap_page(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + kfree_rtskb(buffer_info->skb); + buffer_info->skb = NULL; + } + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->tdh); + writel(0, adapter->hw.hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + kfree(rx_ring->ps_page); + rx_ring->ps_page = NULL; + kfree(rx_ring->ps_page_dma); + rx_ring->ps_page_dma = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_rx_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma && + adapter->clean_rx == e1000_clean_rx_irq) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); +#ifdef CONFIG_E1000_NAPI + } else if (buffer_info->dma && + adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); +#endif + } else if (buffer_info->dma && + adapter->clean_rx == e1000_clean_rx_irq_ps) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + } + buffer_info->dma = 0; + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + if (buffer_info->skb) { + kfree_rtskb(buffer_info->skb); + buffer_info->skb = NULL; + } + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; + for (j = 0; j < adapter->rx_ps_pages; j++) { + if (!ps_page->ps_page[j]) break; + dma_unmap_page(&pdev->dev, + ps_page_dma->ps_page_dma[j], + PAGE_SIZE, DMA_FROM_DEVICE); + ps_page_dma->ps_page_dma[j] = 0; + put_page(ps_page->ps_page[j]); + ps_page->ps_page[j] = NULL; + } + } + +#ifdef CONFIG_E1000_NAPI + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + kfree_rtskb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } +#endif + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + size = sizeof(struct e1000_ps_page) * rx_ring->count; + memset(rx_ring->ps_page, 0, size); + size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; + memset(rx_ring->ps_page_dma, 0, size); + + /* Zero out the descriptor ring */ + + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->rdh); + writel(0, adapter->hw.hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +#if 0 +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rctl; + + if (adapter->hw.mac.type != e1000_82542) + return; + if (adapter->hw.revision_id != E1000_REVISION_2) + return; + + e1000_pci_clear_mwi(&adapter->hw); + + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + rctl |= E1000_RCTL_RST; + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(&adapter->hw); + mdelay(5); + + if (rtnetif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rctl; + + if (adapter->hw.mac.type != e1000_82542) + return; + if (adapter->hw.revision_id != E1000_REVISION_2) + return; + + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + rctl &= ~E1000_RCTL_RST; + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(&adapter->hw); + mdelay(5); + + if (adapter->hw.bus.pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(&adapter->hw); + + if (rtnetif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (adapter->hw.mac.type == e1000_82542) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + /* With 82571 controllers, LAA may be overwritten (with the default) + * due to controller reset from the other port. */ + if (adapter->hw.mac.type == e1000_82571) { + /* activate the work around */ + e1000_set_laa_state_82571(&adapter->hw, TRUE); + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] */ + e1000_rar_set(&adapter->hw, + adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + if (adapter->hw.mac.type == e1000_82542) + e1000_leave_82542_rst(adapter); + + return 0; +} +#endif + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_multi(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + } else if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + e1000_get_phy_info(&adapter->hw); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == + E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && + (E1000_READ_REG(&adapter->hw, E1000_TDFT) == + E1000_READ_REG(&adapter->hw, E1000_TDFH)) && + (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == + E1000_READ_REG(&adapter->hw, E1000_TDFHS))) { + tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); + E1000_WRITE_REG(&adapter->hw, E1000_TCTL, + tctl & ~E1000_TCTL_EN); + E1000_WRITE_REG(&adapter->hw, E1000_TDFT, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, E1000_TDFH, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, + adapter->tx_head_addr); + E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(&adapter->hw); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + rtnetif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->state)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } +} + +static bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = FALSE; + s32 ret_val = 0; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = e1000_check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = TRUE; + } + break; + case e1000_media_type_fiber: + ret_val = e1000_check_for_link(hw); + link_active = !!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = e1000_check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (E1000_READ_REG(&adapter->hw, E1000_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + DPRINTK(LINK, INFO, + "Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & E1000_FLAG_RX_NEEDS_RESTART) && + (adapter->flags & E1000_FLAG_RX_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~E1000_FLAG_RX_RESTART_NOW; + } +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_tx_ring *tx_ring; + u32 link, tctl; + int i, tx_pending = 0; + + link = e1000_has_link(adapter); + if ((rtnetif_carrier_ok(netdev)) && link) { + e1000_enable_receives(adapter); + goto link_up; + } + + if (mac->type == e1000_82573) { + e1000_enable_tx_pkt_filtering(&adapter->hw); +#ifdef NETIF_F_HW_VLAN_TX + if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) + e1000_update_mng_vlan(adapter); +#endif + } + + if (link) { + if (!rtnetif_carrier_ok(netdev)) { + u32 ctrl; + bool txb2b = 1; +#ifdef SIOCGMIIPHY + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); +#endif + e1000_get_speed_and_duplex(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); + DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None" ))); + + /* tweak tx_queue_len according to speed/duplex + * and adjust the timeout factor */ + //netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + //netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = 0; + //netdev->tx_queue_len = 100; + /* maybe add some timeout factor ? */ + break; + } + + if ((mac->type == e1000_82571 || + mac->type == e1000_82572) && + txb2b == 0) { + u32 tarc0; + tarc0 = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc0); + } + +#ifdef NETIF_F_TSO + /* disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues */ + if (!(adapter->flags & E1000_FLAG_TSO_FORCE) && + adapter->hw.bus.type == e1000_bus_type_pci_express){ + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + DPRINTK(PROBE,INFO, + "10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features &= ~NETIF_F_TSO6; +#endif + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features |= NETIF_F_TSO6; +#endif + break; + default: + /* oops */ + break; + } + } +#endif + + /* enable transmits in the hardware, need to do this + * after setting TARC0 */ + tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); + tctl |= E1000_TCTL_EN; + E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); + + rtnetif_carrier_on(netdev); + rtnetif_wake_queue(netdev); +#ifdef CONFIG_E1000_MQ + if (netif_is_multiqueue(netdev)) + for (i = 0; i < adapter->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); +#endif + + if (!test_bit(__E1000_DOWN, &adapter->state)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (rtnetif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + DPRINTK(LINK, INFO, "NIC Link is Down\n"); + rtnetif_carrier_off(netdev); + rtnetif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives in the ISR and + * reset device here in the watchdog + */ + if (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART) + /* reset device */ + schedule_work(&adapter->reset_task); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + + e1000_update_adaptive(&adapter->hw); + + if (!rtnetif_carrier_ok(netdev)) { + for (i = 0 ; i < adapter->num_tx_queues ; i++) { + tx_ring = &adapter->tx_ring[i]; + tx_pending |= (E1000_DESC_UNUSED(tx_ring) + 1 < + tx_ring->count); + } + if (tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = TRUE; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] */ + if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) + e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +#if 0 +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (unlikely(!(adapter->flags & E1000_FLAG_HAS_INTR_MODERATION))) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) { + retval = low_latency; + } + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) { + retval = bulk_latency; + } else if ((packets < 10) || ((bytes/packets) > 1200)) { + retval = bulk_latency; + } else if ((packets > 35)) { + retval = lowest_latency; + } + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) { + retval = low_latency; + } + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} +#endif + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb) +{ +#ifdef NETIF_F_TSO + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; +#ifdef NETIF_F_TSO6 + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; +#endif + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) i = 0; + tx_ring->next_to_use = i; + + return TRUE; + } +#endif + + return FALSE; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, + struct sk_buff *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + // u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + return FALSE; + + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + break; + default: + if (unlikely(net_ratelimit())) { + DPRINTK(PROBE, WARNING, "checksum_partial proto=%x!\n", + skb->protocol); + } + break; + } + + // css = skb_transport_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return TRUE; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) + +static int e1000_tx_map(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, + struct sk_buff *skb, unsigned int first, + unsigned int max_per_txd, unsigned int nr_frags, + unsigned int mss) +{ + struct e1000_buffer *buffer_info; + unsigned int len = skb->len; + unsigned int offset = 0, size, count = 0, i; +#ifdef MAX_SKB_FRAGS + unsigned int f; + len -= skb->data_len; +#endif + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); +#ifdef NETIF_F_TSO + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller */ + if (tx_ring->last_tx_tso && !skb_is_gso(skb)) { + tx_ring->last_tx_tso = 0; + if (!skb->data_len) + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; +#endif + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((adapter->hw.bus.type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->dma = + dma_map_single(&adapter->pdev->dev, + skb->data + offset, + size, + DMA_TO_DEVICE); + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (unlikely(++i == tx_ring->count)) i = 0; + } + +#ifdef MAX_SKB_FRAGS + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); +#ifdef NETIF_F_TSO + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) + size -= 4; +#endif + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(frag->page+offset+size-1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->dma = + dma_map_page(&adapter->pdev->dev, + frag->page, + offset, + size, + DMA_TO_DEVICE); + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (unlikely(++i == tx_ring->count)) i = 0; + } + } +#endif + + i = (i == 0) ? tx_ring->count - 1 : i - 1; + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, + int tx_flags, int count, nanosecs_abs_t *xmit_stamp) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + rtdm_lockctx_t context; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + rtdm_lock_irqsave(context); + + if (xmit_stamp) + *xmit_stamp = cpu_to_be64(rtdm_clock_read() + *xmit_stamp); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tdt); + + rtdm_lock_irqrestore(context); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems */ + mmiowb(); +} + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +/** + * 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + **/ +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; +#ifdef NETIF_F_HW_VLAN_TX + if (vlan_tx_tag_present(skb)) { + if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) + && (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + } +#endif + if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { + struct ethhdr *eth = (struct ethhdr *) skb->data; + if ((htons(ETH_P_IP) == eth->h_proto)) { + const struct iphdr *ip = + (struct iphdr *)((u8 *)skb->data+14); + if (IPPROTO_UDP == ip->protocol) { + struct udphdr *udp = + (struct udphdr *)((u8 *)ip + + (ip->ihl << 2)); + if (ntohs(udp->dest) == 67) { + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + + return e1000_mng_write_dhcp_info(hw, + (u8 *)udp + 8, + length); + } + } + } + } + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + rtnetif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + rtnetif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, tx_ring, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static int e1000_xmit_frame_ring(struct sk_buff *skb, + struct net_device *netdev, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb->len; + unsigned long irq_flags; + unsigned int nr_frags = 0; + unsigned int mss = 0; + int count = 0; + int tso; +#ifdef MAX_SKB_FRAGS + unsigned int f; + len -= skb->data_len; +#endif + + if (test_bit(__E1000_DOWN, &adapter->state)) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + + /* 82571 and newer doesn't need the workaround that limited descriptor + * length to 4kB */ + if (adapter->hw.mac.type >= e1000_82571) + max_per_txd = 8192; + +#ifdef NETIF_F_TSO + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { + switch (adapter->hw.mac.type) { + unsigned int pull_size; + case e1000_82544: + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) + break; + fallthrough; + case e1000_82571: + case e1000_82572: + case e1000_82573: + case e1000_ich8lan: + case e1000_ich9lan: + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + DPRINTK(DRV, ERR, + "__pskb_pull_tail failed.\n"); + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + len = skb->len - skb->data_len; + break; + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; +#else + if (skb->ip_summed == CHECKSUM_PARTIAL) + count++; +#endif + +#ifdef NETIF_F_TSO + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; +#endif + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((adapter->hw.bus.type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + +#ifdef MAX_SKB_FRAGS + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + +#endif + + if (adapter->hw.mac.tx_pkt_filtering && + (adapter->hw.mac.type == e1000_82573)) + e1000_transfer_dhcp_info(adapter, skb); + + rtdm_lock_get_irqsave(&tx_ring->tx_lock, irq_flags); + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) { + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags); + rtdm_printk("FATAL: rt_e1000 ran into tail close to head situation!\n"); + return NETDEV_TX_BUSY; + } + + if (unlikely(adapter->hw.mac.type == e1000_82547)) { + if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { + rtnetif_stop_queue(netdev); + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags); + if (!test_bit(__E1000_DOWN, &adapter->state)) + schedule_delayed_work(&adapter->fifo_stall_task, + 1); + rtdm_printk("FATAL: rt_e1000 ran into tail 82547 controller bug!\n"); + return NETDEV_TX_BUSY; + } + } + +#ifndef NETIF_F_LLTX + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags); + +#endif +#ifdef NETIF_F_HW_VLAN_TX + if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } +#endif + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb); + if (tso < 0) { + kfree_rtskb(skb); +#ifdef NETIF_F_LLTX + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags); +#endif + return NETDEV_TX_OK; + } + + if (likely(tso)) { + tx_ring->last_tx_tso = 1; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. */ + if (likely(skb->protocol == htons(ETH_P_IP))) + tx_flags |= E1000_TX_FLAGS_IPV4; + + e1000_tx_queue(adapter, tx_ring, tx_flags, + e1000_tx_map(adapter, tx_ring, skb, first, + max_per_txd, nr_frags, mss), + skb->xmit_stamp); + + // netdev->trans_start = jiffies; + + /* Make sure there is space in the ring for the next send. */ + // e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + +#ifdef NETIF_F_LLTX + rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags); +#endif + return NETDEV_TX_OK; +} + +static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + /* This goes back to the question of how to logically map a tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. */ + return (e1000_xmit_frame_ring(skb, netdev, tx_ring)); +} + +#ifdef CONFIG_E1000_MQ +static int e1000_subqueue_xmit_frame(struct sk_buff *skb, + struct net_device *netdev, int queue) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = &adapter->tx_ring[queue]; + + return (e1000_xmit_frame_ring(skb, netdev, tx_ring)); +} +#endif + + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +#if 0 +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} +#endif + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + e1000_reinit_locked(adapter); +} + +#if 0 +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats * e1000_get_stats(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETHERNET_FCS_SIZE; + u16 eeprom_data = 0; + + if ((max_frame < ETH_ZLEN + ETHERNET_FCS_SIZE) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Adapter-specific max frame size limits. */ + switch (adapter->hw.mac.type) { + case e1000_undefined: + case e1000_82542: + case e1000_ich8lan: + if (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) { + DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + case e1000_82573: + /* Jumbo Frames not supported if: + * - this is not an 82573L device + * - ASPM is enabled in any way (0x1A bits 3:2) */ + e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, &eeprom_data); + if ((adapter->hw.device_id != E1000_DEV_ID_82573L) || + (eeprom_data & NVM_WORD1A_ASPM_MASK)) { + if (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) { + DPRINTK(PROBE, ERR, + "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + } + /* ERT will be enabled later to enable wire speed receives */ + + /* fall through to get support */ + case e1000_ich9lan: + if ((adapter->hw.phy.type == e1000_phy_ife) && + (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE)) { + DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + /* fall through to get support */ + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + if (rtnetif_running(netdev)) + e1000_down(adapter); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs */ + + if (max_frame <= E1000_RXBUFFER_256) + adapter->rx_buffer_len = E1000_RXBUFFER_256; + else if (max_frame <= E1000_RXBUFFER_512) + adapter->rx_buffer_len = E1000_RXBUFFER_512; + else if (max_frame <= E1000_RXBUFFER_1024) + adapter->rx_buffer_len = E1000_RXBUFFER_1024; + else if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; +#ifdef CONFIG_E1000_NAPI + else + adapter->rx_buffer_len = E1000_RXBUFFER_4096; +#else + else if (max_frame <= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = E1000_RXBUFFER_4096; + else if (max_frame <= E1000_RXBUFFER_8192) + adapter->rx_buffer_len = E1000_RXBUFFER_8192; + else if (max_frame <= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!e1000_tbi_sbp_enabled_82543(&adapter->hw) && + ((max_frame == ETH_FRAME_LEN + ETHERNET_FCS_SIZE) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (rtnetif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} +#endif + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ +} +#ifdef SIOCGMIIPHY + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + int ret_val = E1000_SUCCESS; + unsigned long irq_flags; + + + rtdm_lock_get_irqsave(&adapter->stats_lock, irq_flags); + + if (E1000_READ_REG(hw, E1000_STATUS)& E1000_STATUS_LU) { + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy->bmcr); + ret_val |= e1000_read_phy_reg(hw, PHY_STATUS, &phy->bmsr); + ret_val |= e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &phy->advertise); + ret_val |= e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy->lpa); + ret_val |= e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, + &phy->expansion); + ret_val |= e1000_read_phy_reg(hw, PHY_1000T_CTRL, + &phy->ctrl1000); + ret_val |= e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy->stat1000); + ret_val |= e1000_read_phy_reg(hw, PHY_EXT_STATUS, + &phy->estatus); + if (ret_val) + DPRINTK(DRV, WARNING, "Error reading PHY register\n"); + } else { + /* Do not read PHY registers if link is not up + * Set values to typical power-on defaults */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } + + rtdm_lock_put_irqrestore(&adapter->stats_lock, irq_flags); +} +#endif + + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static int e1000_intr_msi(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +#ifndef CONFIG_E1000_NAPI + int i, j; + int rx_cleaned, tx_cleaned; +#endif + u32 icr = E1000_READ_REG(hw, E1000_ICR); + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + + +#ifdef CONFIG_E1000_NAPI + /* read ICR disables interrupts using IAM, so keep up with our + * enable/disable accounting */ + atomic_inc(&adapter->irq_sem); +#endif + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog */ + if (rtnetif_carrier_ok(netdev) && + (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + u32 rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= E1000_FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + //if (!test_bit(__E1000_DOWN, &adapter->state)) + // mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +#ifdef CONFIG_E1000_NAPI + /* XXX only using ring 0 for napi */ + if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); + } else { + atomic_dec(&adapter->irq_sem); + } +#else + adapter->total_tx_bytes = 0; + adapter->total_rx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_packets = 0; + adapter->data_received = 0; + + for (i = 0; i < E1000_MAX_INTR; i++) { + rx_cleaned = 0; + for (j = 0; j < adapter->num_rx_queues; j++) + rx_cleaned |= adapter->clean_rx(adapter, + &adapter->rx_ring[j], &time_stamp); + + tx_cleaned = 0; + for (j = 0 ; j < adapter->num_tx_queues ; j++) + tx_cleaned |= e1000_clean_tx_irq(adapter, + &adapter->tx_ring[j]); + + if (!rx_cleaned && !tx_cleaned) + break; + } + + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); +#endif + + if (adapter->data_received) + rt_mark_stack_mgr(netdev); + + return RTDM_IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static int e1000_intr(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl, icr = E1000_READ_REG(hw, E1000_ICR); +#ifndef CONFIG_E1000_NAPI + int i, j; + int rx_cleaned, tx_cleaned; +#endif + nanosecs_abs_t time_stamp = rtdm_clock_read(); + if (unlikely(!icr)) + return RTDM_IRQ_NONE; /* Not our interrupt */ + +#ifdef CONFIG_E1000_NAPI + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt */ + if ((adapter->flags & E1000_FLAG_INT_ASSERT_AUTO_MASK) && + !(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write, but it does mean we should + * account for it ASAP. */ + if (likely(hw->mac.type >= e1000_82571)) + atomic_inc(&adapter->irq_sem); +#endif + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->mac.get_link_status = 1; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))) + e1000_gig_downshift_workaround_ich8lan(hw); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (rtnetif_carrier_ok(netdev) && + (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= E1000_FLAG_RX_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + //if (!test_bit(__E1000_DOWN, &adapter->state)) + // mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +#ifdef CONFIG_E1000_NAPI + if (hw->mac.type < e1000_82571) { + /* disable interrupts, without the synchronize_irq bit */ + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); + } + /* XXX only using ring 0 for napi */ + if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); + } else { + atomic_dec(&adapter->irq_sem); + } +#else + /* Writing IMC and IMS is needed for 82547. + * Due to Hub Link bus being occupied, an interrupt + * de-assertion message is not able to be sent. + * When an interrupt assertion message is generated later, + * two messages are re-ordered and sent out. + * That causes APIC to think 82547 is in de-assertion + * state, while 82547 is in assertion state, resulting + * in dead lock. Writing IMC forces 82547 into + * de-assertion state. + */ + if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2) { + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(hw, E1000_IMC, ~0); + } + + adapter->data_received = 0; + adapter->total_tx_bytes = 0; + adapter->total_rx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_packets = 0; + + for (i = 0; i < E1000_MAX_INTR; i++) { + rx_cleaned = 0; + for (j = 0; j < adapter->num_rx_queues; j++) + rx_cleaned |= adapter->clean_rx(adapter, + &adapter->rx_ring[j], &time_stamp); + + tx_cleaned = 0; + for (j = 0 ; j < adapter->num_tx_queues ; j++) + tx_cleaned |= e1000_clean_tx_irq(adapter, + &adapter->tx_ring[j]); + + if (!rx_cleaned && !tx_cleaned) + break; + } + + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + + if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2) + e1000_irq_enable(adapter); + +#endif + + if (adapter->data_received) + rt_mark_stack_mgr(netdev); + return RTDM_IRQ_HANDLED; +} + +#ifdef CONFIG_E1000_NAPI +/** + * e1000_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int e1000_poll(struct napi_struct *napi, int budget) +{ + struct e1000_rx_ring *rx_ring = container_of(napi, struct e1000_rx_ring, + napi); + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + int tx_clean_complete = 1, work_done = 0; + int i; + + /* FIXME: i think this code is un-necessary when using base netdev */ + /* Keep link state information with original netdev */ + if (!rtnetif_carrier_ok(netdev)) + goto quit_polling; + + /* e1000_poll is called per-cpu. This lock protects + * tx_ring[i] from being cleaned by multiple cpus + * simultaneously. A failure obtaining the lock means + * tx_ring[i] is currently being cleaned anyway. */ + for (i = 0; i < adapter->num_tx_queues; i++) { +#ifdef CONFIG_E1000_MQ + if (spin_trylock(&adapter->tx_ring[i].tx_queue_lock)) { + tx_clean_complete &= e1000_clean_tx_irq(adapter, + &adapter->tx_ring[i]); + spin_unlock(&adapter->tx_ring[i].tx_queue_lock); + } +#else + if (spin_trylock(&adapter->tx_queue_lock)) { + tx_clean_complete &= e1000_clean_tx_irq(adapter, + &adapter->tx_ring[i]); + spin_unlock(&adapter->tx_queue_lock); + } +#endif + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->clean_rx(adapter, &adapter->rx_ring[i], + &work_done, budget); + } + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((tx_clean_complete && (work_done == 0)) || + !rtnetif_running(netdev)) { +quit_polling: + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + netif_rx_complete(netdev, napi); + if (test_bit(__E1000_DOWN, &adapter->state)) + atomic_dec(&adapter->irq_sem); + else + e1000_irq_enable(adapter); + return 0; + } + + /* need to make sure the stack is aware of a tx-only poll loop */ + if (!tx_clean_complete) + work_done = budget; + + return work_done; +} + +#endif +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; +#ifdef CONFIG_E1000_NAPI + unsigned int count = 0; +#endif + bool cleaned = FALSE; + bool retval = TRUE; + unsigned int total_tx_bytes=0, total_tx_packets=0; + + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { + for (cleaned = FALSE; !cleaned; ) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + +#ifdef CONFIG_E1000_MQ + tx_ring->tx_stats.bytes += buffer_info->length; +#endif + if (cleaned) { + struct sk_buff *skb = buffer_info->skb; +#ifdef NETIF_F_TSO + unsigned int segs, bytecount; + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_tx_packets += segs; + total_tx_bytes += bytecount; +#else + total_tx_packets++; + total_tx_bytes += skb->len; +#endif + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) i = 0; + } + +#ifdef CONFIG_E1000_MQ + tx_ring->tx_stats.packets++; +#endif + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); +#ifdef CONFIG_E1000_NAPI +#define E1000_TX_WEIGHT 64 + /* weight of a sort for tx, to avoid endless transmit cleanup */ + if (count++ == E1000_TX_WEIGHT) { + retval = FALSE; + break; + } +#endif + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(cleaned && rtnetif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (rtnetif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + rtnetif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = FALSE; + if (tx_ring->buffer_info[eop].dma && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) + && !(E1000_READ_REG(&adapter->hw, E1000_STATUS) & + E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)((tx_ring - adapter->tx_ring) / + sizeof(struct e1000_tx_ring)), + readl(adapter->hw.hw_addr + tx_ring->tdh), + readl(adapter->hw.hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + rtnetif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + adapter->net_stats.tx_bytes += total_tx_bytes; + adapter->net_stats.tx_packets += total_tx_packets; + return retval; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + skb->ip_summed = CHECKSUM_NONE; + + /* 82543 or newer only */ + if (unlikely(adapter->hw.mac.type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (adapter->hw.mac.type <= e1000_82547_rev_2) { + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + } else { + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (adapter->hw.mac.type > e1000_82547_rev_2) { + /* IP fragment with UDP payload */ + /* Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + csum = ntohl(csum ^ 0xFFFF); + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + adapter->hw_csum_good++; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + u16 vlan, struct sk_buff *skb) +{ +#ifdef CONFIG_E1000_NAPI +#ifdef NETIF_F_HW_VLAN_TX + if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(vlan) & + E1000_RXD_SPC_VLAN_MASK); + } else { + netif_receive_skb(skb); + } +#else + netif_receive_skb(skb); +#endif +#else /* CONFIG_E1000_NAPI */ +#ifdef NETIF_F_HW_VLAN_TX + if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { + vlan_hwaccel_rx(skb, adapter->vlgrp, + le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK); + } else { + netif_rx(skb); + } +#else + rtnetif_rx(skb); +#endif +#endif /* CONFIG_E1000_NAPI */ +} + +#ifdef CONFIG_E1000_NAPI +/* NOTE: these new jumbo frame routines rely on NAPI because of the + * pskb_may_pull call, which eventually must call kmap_atomic which you cannot + * call from hard irq context */ + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + unsigned long irq_flags; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = FALSE; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = TRUE; + cleaned_count++; + dma_unmap_page(&pdev->dev, + buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 last_byte = *(skb->data + length - 1); + if (TBI_ACCEPT(&adapter->hw, status, + rx_desc->errors, length, last_byte, + adapter->min_frame_size, + adapter->max_frame_size)) { + rtdm_lock_get_irqsave(&adapter->stats_lock, + irq_flags); + e1000_tbi_adjust_stats_82543(&adapter->hw, + &adapter->stats, + length, skb->data, + adapter->max_frame_size); + rtdm_lock_put_irqrestore(&adapter->stats_lock, + irq_flags); + length--; + } else { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too */ + if (rx_ring->rx_skb_top) + kfree_rtskb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + rtskb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + pskb_trim(skb, skb->len - 4); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + DPRINTK(DRV, ERR, "__pskb_pull_tail failed.\n"); + kfree_rtskb(skb); + goto next_desc; + } + + skb->protocol = rt_eth_type_trans(skb, netdev); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + adapter->data_received = 1; // Set flag for the main interrupt routine + + netdev->last_rx = jiffies; +#ifdef CONFIG_E1000_MQ + rx_ring->rx_stats.packets++; + rx_ring->rx_stats.bytes += length; +#endif + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + adapter->net_stats.rx_bytes += total_rx_bytes; + adapter->net_stats.rx_packets += total_rx_packets; + return cleaned; +} +#endif /* NAPI */ + + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +#ifdef CONFIG_E1000_NAPI +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +#else +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp) +#endif +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = FALSE; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + // rtdm_printk("<2> e1000_clean_rx_irq %i\n", __LINE__); + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + +#ifdef CONFIG_E1000_NAPI + if (*work_done >= work_to_do) + break; + (*work_done)++; +#endif + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = TRUE; + cleaned_count++; + dma_unmap_single(&pdev->dev, + buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* !EOP means multiple descriptors were used to store a single + * packet, also make sure the frame isn't just CRC only */ + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { + /* All receives must fit into a single buffer */ + E1000_DBG("%s: Receive packet consumed multiple" + " buffers\n", netdev->name); + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + u8 last_byte = *(skb->data + length - 1); + if (TBI_ACCEPT(&adapter->hw, status, + rx_desc->errors, length, last_byte, + adapter->min_frame_size, + adapter->max_frame_size)) { + length--; + } else { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + } + + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above */ + length -= 4; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += length; + total_rx_packets++; + + rtskb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + skb->protocol = rt_eth_type_trans(skb, netdev); + skb->time_stamp = *time_stamp; + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + adapter->data_received = 1; // Set flag for the main interrupt routine + + // netdev->last_rx = jiffies; +#ifdef CONFIG_E1000_MQ + rx_ring->rx_stats.packets++; + rx_ring->rx_stats.bytes += length; +#endif + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + adapter->net_stats.rx_bytes += total_rx_bytes; + adapter->net_stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +#ifdef CONFIG_E1000_NAPI +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +#else +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + nanosecs_abs_t *time_stamp) +#endif +{ +#ifdef CONFIG_E1000_DISABLE_PACKET_SPLIT + return true; + +#else + + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = FALSE; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; +#ifdef CONFIG_E1000_NAPI + if (unlikely(*work_done >= work_to_do)) + break; + (*work_done)++; +#endif + skb = buffer_info->skb; + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = TRUE; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) { + E1000_DBG("%s: Packet Split buffers didn't pick up" + " the full packet\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (unlikely(!length)) { + E1000_DBG("%s: Last part of the packet spanning" + " multiple descriptors\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + rtskb_put(skb, length); +#ifdef CONFIG_E1000_MQ + rx_ring->rx_stats.packets++; + rx_ring->rx_stats.bytes += skb->len; +#endif + +#ifdef CONFIG_E1000_NAPI + { + /* this looks ugly, but it seems compiler issues make it + more efficient than reusing j */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* page alloc/put takes too long and effects small packet + * throughput, so unsplit small packets and save the alloc/put + * only valid in softirq (napi) context to call kmap_* */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + /* there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long */ + pci_dma_sync_single_for_cpu(pdev, + ps_page_dma->ps_page_dma[0], + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + vaddr = kmap_atomic(ps_page->ps_page[0], + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); + pci_dma_sync_single_for_device(pdev, + ps_page_dma->ps_page_dma[0], + PAGE_SIZE, PCI_DMA_FROMDEVICE); + /* remove the CRC */ + l1 -= 4; + rtskb_put(skb, l1); + goto copydone; + } /* if */ + } +#endif + + for (j = 0; j < adapter->rx_ps_pages; j++) { + if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) + break; + dma_unmap_page(&pdev->dev, ps_page_dma->ps_page_dma[j], + PAGE_SIZE, DMA_FROM_DEVICE); + ps_page_dma->ps_page_dma[j] = 0; + skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0, + length); + ps_page->ps_page[j] = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive */ + pskb_trim(skb, skb->len - 4); + +#ifdef CONFIG_E1000_NAPI +copydone: +#endif + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, + le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); + skb->protocol = rt_eth_type_trans(skb, netdev); + + if (likely(rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, staterr, rx_desc->wb.middle.vlan, + skb); + netdev->last_rx = jiffies; + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + adapter->net_stats.rx_bytes += total_rx_bytes; + adapter->net_stats.rx_packets += total_rx_packets; + return cleaned; +#endif +} + +#ifdef CONFIG_E1000_NAPI +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - + 16 /*for skb_reserve */ - + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = rtnetdev_alloc_rtskb(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct sk_buff *oldskb = skb; + DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " + "at %p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = rtnetdev_alloc_rtskb(netdev, bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + kfree_rtskb(oldskb); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + kfree_rtskb(skb); + kfree_rtskb(oldskb); + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + + /* Use new allocation */ + kfree_rtskb(oldskb); + } + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} +#endif /* NAPI */ + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + rtskb_trim(skb, 0); + goto map_skb; + } + + skb = rtnetdev_alloc_rtskb(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct sk_buff *oldskb = skb; + DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " + "at %p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = rtnetdev_alloc_rtskb(netdev, bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + kfree_rtskb(oldskb); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + kfree_rtskb(skb); + kfree_rtskb(oldskb); + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + + /* Use new allocation */ + kfree_rtskb(oldskb); + } + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + DPRINTK(RX_ERR, ERR, + "dma align check failed: %u bytes at %p\n", + adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + kfree_rtskb(skb); + buffer_info->skb = NULL; + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + u16 phy_status; + u16 phy_ctrl; + + if ((phy->type != e1000_phy_igp) || !mac->autoneg || + !(phy->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back */ + e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(&adapter->hw) && + !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(&adapter->hw) && + !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +#if 0 +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { +#ifdef SIOCGMIIPHY + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); +#endif +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + default: + return -EOPNOTSUPP; + } +} + +#ifdef SIOCGMIIPHY +/** + * e1000_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + switch (data->reg_num & 0x1F) { + case MII_BMCR: + data->val_out = adapter->phy_regs.bmcr; + break; + case MII_BMSR: + data->val_out = adapter->phy_regs.bmsr; + break; + case MII_PHYSID1: + data->val_out = (adapter->hw.phy.id >> 16); + break; + case MII_PHYSID2: + data->val_out = (adapter->hw.phy.id & 0xFFFF); + break; + case MII_ADVERTISE: + data->val_out = adapter->phy_regs.advertise; + break; + case MII_LPA: + data->val_out = adapter->phy_regs.lpa; + break; + case MII_EXPANSION: + data->val_out = adapter->phy_regs.expansion; + break; + case MII_CTRL1000: + data->val_out = adapter->phy_regs.ctrl1000; + break; + case MII_STAT1000: + data->val_out = adapter->phy_regs.stat1000; + break; + case MII_ESTATUS: + data->val_out = adapter->phy_regs.estatus; + break; + default: + return -EIO; + } + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} +#endif +#endif + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + DPRINTK(PROBE, ERR, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct e1000_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct e1000_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct e1000_adapter *adapter = hw->back; + u16 cap_offset; + + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (!cap_offset) + return -E1000_ERR_CONFIG; + + pci_read_config_word(adapter->pdev, cap_offset + reg, value); + + return E1000_SUCCESS; +} + +#ifdef NETIF_F_HW_VLAN_TX +static void e1000_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u32 ctrl, rctl; + + e1000_irq_disable(adapter); + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); + ctrl |= E1000_CTRL_VME; + E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); + + if ((adapter->hw.mac.type != e1000_ich8lan) && + (adapter->hw.mac.type != e1000_ich9lan)) { + /* enable VLAN receive filtering */ + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); + e1000_update_mng_vlan(adapter); + } + } else { + /* disable VLAN tag insert/strip */ + ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); + + if ((adapter->hw.mac.type != e1000_ich8lan) && + (adapter->hw.mac.type != e1000_ich9lan)) { + /* disable VLAN filtering */ + rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + rctl &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); + if (adapter->mng_vlan_id != + (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } + } + + e1000_irq_enable(adapter); +} + +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u32 vfta, index; + struct net_device *v_netdev; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return; + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(&adapter->hw, index, vfta); + /* Copy feature flags from netdev to the vlan netdev for this vid. + * This allows things like TSO to bubble down to our vlan device. + */ + v_netdev = vlan_group_get_device(adapter->vlgrp, vid); + v_netdev->features |= adapter->netdev->features; + vlan_group_set_device(adapter->vlgrp, vid, v_netdev); +} + +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u32 vfta, index; + + e1000_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + e1000_irq_enable(adapter); + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000_release_hw_control(adapter); + return; + } + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(&adapter->hw, index, vfta); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); + + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_N_VID; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + e1000_vlan_rx_add_vid(adapter->netdev, vid); + } + } +} +#endif + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && + spddplx != (SPEED_1000 + DUPLEX_FULL)) { + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + } + + switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + } + return 0; +} + +#ifdef USE_REBOOT_NOTIFIER +/* only want to do this for 2.4 kernels? */ +static int e1000_notify_reboot(struct notifier_block *nb, + unsigned long event, void *p) +{ + struct pci_dev *pdev = NULL; + + switch (event) { + case SYS_DOWN: + case SYS_HALT: + case SYS_POWER_OFF: + while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { + if (pci_dev_driver(pdev) == &e1000_driver) + e1000_suspend(pdev, PMSG_SUSPEND); + } + } + return NOTIFY_DONE; +} +#endif + +#ifdef CONFIG_PM +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if ((err = pci_enable_device(pdev))) { + printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (rtnetif_running(netdev) && (err = e1000_request_irq(adapter))) + return err; + + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + e1000_power_up_phy(&adapter->hw); + e1000_setup_link(&adapter->hw); + } + e1000_reset(adapter); + E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0); + + e1000_init_manageability(adapter); + + if (rtnetif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + /* If the controller is 82573 or ICHx and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (((adapter->hw.mac.type != e1000_82573) && + (adapter->hw.mac.type != e1000_ich8lan) && + (adapter->hw.mac.type != e1000_ich9lan)) || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + return 0; +} +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + + for (i = 0; i < adapter->num_tx_queues ; i++ ) + e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); +#ifndef CONFIG_E1000_NAPI + for (i = 0; i < adapter->num_rx_queues ; i++ ) + adapter->clean_rx(adapter, &adapter->rx_ring[i], NULL); +#endif + enable_irq(adapter->pdev->irq); +} +#endif + +#ifdef HAVE_PCI_ERS +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + + netif_device_detach(netdev); + + if (rtnetif_running(netdev)) + e1000_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + + if (pci_enable_device(pdev)) { + printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev->priv; + + e1000_init_manageability(adapter); + + if (rtnetif_running(netdev)) { + if (e1000_up(adapter)) { + printk("e1000: can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* If the controller is 82573 or ICHx and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (((adapter->hw.mac.type != e1000_82573) && + (adapter->hw.mac.type != e1000_ich8lan) && + (adapter->hw.mac.type != e1000_ich9lan)) || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + +} +#endif /* HAVE_PCI_ERS */ + +s32 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size) +{ + hw->dev_spec = kmalloc(size, GFP_KERNEL); + + if (!hw->dev_spec) + return -ENOMEM; + + memset(hw->dev_spec, 0, size); + + return E1000_SUCCESS; +} + +void e1000_free_dev_spec_struct(struct e1000_hw *hw) +{ + if (!hw->dev_spec) + return; + + kfree(hw->dev_spec); +} + +/* vim: set ts=4: */ +/* e1000_main.c */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c new file mode 100644 index 0000000..4145dbd --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c @@ -0,0 +1,384 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" +#include "e1000_manage.h" + +static u8 e1000_calculate_checksum(u8 *buffer, u32 length); + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw * hw) +{ + u32 hicr; + s32 ret_val = E1000_SUCCESS; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_check_mng_mode_generic - Generic check managament mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm; + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + fwsm = E1000_READ_REG(hw, E1000_FWSM); + + return ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + bool tx_filter = TRUE; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + /* No manageability, no filtering */ + if (!e1000_check_mng_mode(hw)) { + tx_filter = FALSE; + goto out; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val != E1000_SUCCESS) { + tx_filter = FALSE; + goto out; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) { + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + offset + i); + } + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if (hdr_csum != csum) + goto out; + if (hdr->signature != E1000_IAMT_SIGNATURE) + goto out; + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + tx_filter = FALSE; + +out: + hw->mac.tx_pkt_filtering = tx_filter; + return tx_filter; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw * hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + goto out; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + goto out; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + goto out; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + +out: + return ret_val; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw * hw, + struct e1000_host_mng_command_header * hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw * hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + s32 ret_val = E1000_SUCCESS; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { + ret_val = -E1000_ERR_PARAM; + goto out; + } + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data); + } + +out: + return ret_val; +} + +/** + * e1000_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to allow ARPs to be processed by the host. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = FALSE; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = TRUE; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = TRUE; + goto out; + } + } + +out: + return ret_val; +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h new file mode 100644 index 0000000..b11b865 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h @@ -0,0 +1,81 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +} e1000_mng_mode; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c new file mode 100644 index 0000000..f89d490 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c @@ -0,0 +1,893 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" +#include "e1000_nvm.h" + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = E1000_SUCCESS; + break; + } + + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_lower_eec_clk(hw, &eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { + /* CS on Microcwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 ret_val = E1000_SUCCESS; + u16 timeout = 0; + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + /* Set CS */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + } else if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + usec_delay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* + * Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + e1000_release_nvm(hw); + +out: + return ret_val; +} + +/** + * e1000_read_nvm_microwire - Reads EEPROM's using microwire + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u8 read_opcode = NVM_READ_OPCODE_MICROWIRE; + + DEBUGFUNC("e1000_read_nvm_microwire"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset + i), + nvm->address_bits); + + /* + * Read the data. For microwire, each word requires the + * overhead of setup and tear-down. + */ + data[i] = e1000_shift_in_eec_bits(hw, 16); + e1000_standby_nvm(hw); + } + +release: + e1000_release_nvm(hw); + +out: + return ret_val; +} + +/** + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + msec_delay(10); + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + msec_delay(10); +release: + e1000_release_nvm(hw); + +out: + return ret_val; +} + +/** + * e1000_write_nvm_microwire - Writes EEPROM using microwire + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using microwire interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u32 eecd; + u16 words_written = 0; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_microwire"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = e1000_acquire_nvm(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + + e1000_standby_nvm(hw); + + while (words_written < words) { + e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE, + nvm->opcode_bits); + + e1000_shift_out_eec_bits(hw, (u16)(offset + words_written), + nvm->address_bits); + + e1000_shift_out_eec_bits(hw, data[words_written], 16); + + e1000_standby_nvm(hw); + + for (widx = 0; widx < 200; widx++) { + eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_DO) + break; + usec_delay(50); + } + + if (widx == 200) { + DEBUGOUT("NVM Write did not complete\n"); + ret_val = -E1000_ERR_NVM; + goto release; + } + + e1000_standby_nvm(hw); + + words_written++; + } + + e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + +release: + e1000_release_nvm(hw); + +out: + return ret_val; +} + +/** + * e1000_read_pba_num_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 nvm_data; + + DEBUGFUNC("e1000_read_pba_num_generic"); + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + *pba_num = (u32)(nvm_data << 16); + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + *pba_num |= nvm_data; + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_data, i; + + DEBUGFUNC("e1000_read_mac_addr"); + + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = i >> 1; + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + /* Flip last bit of mac address if we're on second port */ + if (hw->bus.func == E1000_FUNC_1) + hw->mac.perm_addr[5] ^= 1; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum.\n"); + } + +out: + return ret_val; +} + +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + +/* Function pointers local to this file and not intended for public use */ + +/** + * e1000_acquire_nvm - Acquire exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * For those silicon families which have implemented a NVM acquire function, + * run the defined function else return success. + **/ +s32 e1000_acquire_nvm(struct e1000_hw *hw) +{ + if (hw->func.acquire_nvm) + return hw->func.acquire_nvm(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * For those silicon families which have implemented a NVM release function, + * run the defined fucntion else return success. + **/ +void e1000_release_nvm(struct e1000_hw *hw) +{ + if (hw->func.release_nvm) + hw->func.release_nvm(hw); +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h new file mode 100644 index 0000000..1803600 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h @@ -0,0 +1,61 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_stop_nvm(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/* Function pointers */ +s32 e1000_acquire_nvm(struct e1000_hw *hw); +void e1000_release_nvm(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h new file mode 100644 index 0000000..3a09cc2 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h @@ -0,0 +1,124 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS-dependent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/if_ether.h> + +#include "kcompat.h" + +#define usec_delay(x) udelay(x) +#ifndef msec_delay +#define msec_delay(x) do { if(in_interrupt()) { \ + /* Don't sleep in interrupt context! */ \ + BUG(); \ + } else { \ + msleep(x); \ + } } while (0) + +/* Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + */ +#define msec_delay_irq(x) mdelay(x) +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE +#define ETH_ADDR_LEN ETH_ALEN + +#ifdef __BIG_ENDIAN +#define E1000_BIG_ENDIAN __BIG_ENDIAN +#endif + + +#define DEBUGOUT(S) +#define DEBUGOUT1(S, A...) + +#define DEBUGFUNC(F) DEBUGOUT(F "\n") +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT7 DEBUGOUT3 + +#define E1000_REGISTER(a, reg) (((a)->mac.type >= e1000_82543) \ + ? reg \ + : e1000_translate_register_82542(reg)) + +#define E1000_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg)))) + +#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) + +#define E1000_WRITE_REG_IO(a, reg, offset) do { \ + outl(reg, ((a)->io_base)); \ + outl(offset, ((a)->io_base + 4)); } while(0) + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) + +#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c new file mode 100644 index 0000000..c5db2cb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c @@ -0,0 +1,894 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include <linux/netdevice.h> + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when e1000_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labeled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define E1000_PARAM(X, desc) \ + static const int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X = 0; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 0 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 0 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 0 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 0 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + DPRINTK(PROBE, INFO, + "%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + DPRINTK(PROBE, INFO, "%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int bd = adapter->bd_number; + if (bd >= E1000_MAX_NIC) { + DPRINTK(PROBE, NOTICE, + "Warning: no configuration for board #%i\n", bd); + DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); +#ifndef module_param_array + bd = E1000_MAX_NIC; +#endif + } + + { /* Transmit Descriptor Count */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { .min = E1000_MIN_TXD }} + }; + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + opt.arg.r.max = hw->mac.type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD; + +#ifdef module_param_array + if (num_TxDescriptors > bd) { +#endif + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); +#ifdef module_param_array + } else { + tx_ring->count = opt.def; + } +#endif + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { .min = E1000_MIN_RXD }} + }; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + opt.arg.r.max = hw->mac.type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + +#ifdef module_param_array + if (num_RxDescriptors > bd) { +#endif + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); +#ifdef module_param_array + } else { + rx_ring->count = opt.def; + } +#endif + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + struct e1000_option opt = { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_XsumRX > bd) { +#endif + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; +#ifdef module_param_array + } else { + adapter->rx_csum = opt.def; + } +#endif + } + { /* Flow Control */ + + struct e1000_opt_list fc_list[] = + {{ e1000_fc_none, "Flow Control Disabled" }, + { e1000_fc_rx_pause,"Flow Control Receive Only" }, + { e1000_fc_tx_pause,"Flow Control Transmit Only" }, + { e1000_fc_full, "Flow Control Enabled" }, + { e1000_fc_default, "Flow Control Hardware Default" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = e1000_fc_default, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + +#ifdef module_param_array + if (num_FlowControl > bd) { +#endif + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + hw->fc.original_type = fc; + hw->fc.type = fc; +#ifdef module_param_array + } else { + hw->fc.original_type = opt.def; + hw->fc.type = opt.def; + } +#endif + } + { /* Transmit Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + +#ifdef module_param_array + if (num_TxIntDelay > bd) { +#endif + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->tx_int_delay = opt.def; + } +#endif + } + { /* Transmit Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + +#ifdef module_param_array + if (num_TxAbsIntDelay > bd) { +#endif + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->tx_abs_int_delay = opt.def; + } +#endif + } + { /* Receive Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + /* modify min and default if 82573 for slow ping w/a, + * a value greater than 8 needs to be set for RDTR */ + +#ifdef module_param_array + if (num_RxIntDelay > bd) { +#endif + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->rx_int_delay = opt.def; + } +#endif + } + { /* Receive Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + +#ifdef module_param_array + if (num_RxAbsIntDelay > bd) { +#endif + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); +#ifdef module_param_array + } else { + adapter->rx_abs_int_delay = opt.def; + } +#endif + } + { /* Interrupt Throttling Rate */ + struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + +#ifdef module_param_array + if (num_InterruptThrottleRate > bd) { +#endif + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + break; + case 1: + DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + DPRINTK(PROBE, INFO, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits change itr */ + /* clear the lower two bits because they are + * used as control */ + adapter->itr_setting = adapter->itr & ~3; + break; + } +#ifdef module_param_array + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } +#endif + } + { /* Smart Power Down */ + struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + +#ifdef module_param_array + if (num_SmartPowerDownEnable > bd) { +#endif + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->flags |= spd ? E1000_FLAG_SMART_POWER_DOWN : 0; +#ifdef module_param_array + } else { + adapter->flags &= ~E1000_FLAG_SMART_POWER_DOWN; + } +#endif + } + { /* Kumeran Lock Loss Workaround */ + struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + +#ifdef module_param_array + if (num_KumeranLockLoss > bd) { +#endif + unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + if (hw->mac.type == e1000_ich8lan) + e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, + kmrn_lock_loss); +#ifdef module_param_array + } else { + if (hw->mac.type == e1000_ich8lan) + e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, + opt.def); + } +#endif + } + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } + +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; +#ifndef module_param_array + bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; + if ((Speed[bd] != OPTION_UNSET)) { +#else + if (num_Speed > bd) { +#endif + DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " + "parameter ignored\n"); + } + +#ifndef module_param_array + if ((Duplex[bd] != OPTION_UNSET)) { +#else + if (num_Duplex > bd) { +#endif + DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " + "parameter ignored\n"); + } + +#ifndef module_param_array + if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { +#else + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { +#endif + DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " + "not valid for fiber adapters, " + "parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; +#ifndef module_param_array + bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; +#endif + + { /* Speed */ + struct e1000_opt_list speed_list[] = {{ 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + +#ifdef module_param_array + if (num_Speed > bd) { +#endif + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); +#ifdef module_param_array + } else { + speed = opt.def; + } +#endif + } + { /* Duplex */ + struct e1000_opt_list dplx_list[] = {{ 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (e1000_check_reset_block(hw)) { + DPRINTK(PROBE, INFO, + "Link active due to SoL/IDER Session. " + "Speed/Duplex/AutoNeg parameter ignored.\n"); + return; + } +#ifdef module_param_array + if (num_Duplex > bd) { +#endif + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); +#ifdef module_param_array + } else { + dplx = opt.def; + } +#endif + } + +#ifdef module_param_array + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { +#else + if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { +#endif + DPRINTK(PROBE, INFO, + "AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + struct e1000_option opt = { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + +#ifdef module_param_array + if (num_AutoNeg > bd) { +#endif + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); +#ifdef module_param_array + } else { + an = opt.def; + } +#endif + hw->phy.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + hw->mac.autoneg = adapter->fc_autoneg = TRUE; +#ifdef module_param_array + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) +#else + if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) +#endif + DPRINTK(PROBE, INFO, + "Speed and duplex autonegotiation enabled\n"); + break; + case HALF_DUPLEX: + DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "Half Duplex only\n"); + hw->mac.autoneg = adapter->fc_autoneg = TRUE; + hw->phy.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "Full Duplex only\n"); + hw->mac.autoneg = adapter->fc_autoneg = TRUE; + hw->phy.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + DPRINTK(PROBE, INFO, "10 Mbps Speed specified " + "without Duplex\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n"); + hw->mac.autoneg = adapter->fc_autoneg = TRUE; + hw->phy.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n"); + hw->mac.autoneg = adapter->fc_autoneg = FALSE; + hw->mac.forced_speed_duplex = ADVERTISE_10_HALF; + hw->phy.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n"); + hw->mac.autoneg = adapter->fc_autoneg = FALSE; + hw->mac.forced_speed_duplex = ADVERTISE_10_FULL; + hw->phy.autoneg_advertised = 0; + break; + case SPEED_100: + DPRINTK(PROBE, INFO, "100 Mbps Speed specified " + "without Duplex\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "100 Mbps only\n"); + hw->mac.autoneg = adapter->fc_autoneg = TRUE; + hw->phy.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n"); + hw->mac.autoneg = adapter->fc_autoneg = FALSE; + hw->mac.forced_speed_duplex = ADVERTISE_100_HALF; + hw->phy.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n"); + hw->mac.autoneg = adapter->fc_autoneg = FALSE; + hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; + hw->phy.autoneg_advertised = 0; + break; + case SPEED_1000: + DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " + "Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + DPRINTK(PROBE, INFO, + "Half Duplex is not supported at 1000 Mbps\n"); + fallthrough; + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + DPRINTK(PROBE, INFO, + "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + hw->mac.autoneg = adapter->fc_autoneg = TRUE; + hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + DPRINTK(PROBE, INFO, + "Speed, AutoNeg and MDI-X specifications are " + "incompatible. Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c new file mode 100644 index 0000000..cec2ba3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c @@ -0,0 +1,2106 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" +#include "e1000_phy.h" + +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static void e1000_release_phy(struct e1000_hw *hw); +static s32 e1000_acquire_phy(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = + { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + + DEBUGFUNC("e1000_get_phy_id"); + + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + e1000_release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + e1000_release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_igp"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + e1000_release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + e1000_release_phy(hw); + goto out; + } + } + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + e1000_release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_read_kmrn_reg_generic"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + + usec_delay(2); + e1000_release_phy(hw); + +out: + return ret_val; +} + +/** + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* + * Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1000_read_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = e1000_phy_commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 15ms for MAC to configure PHY from NVM settings. */ + msec_delay(15); + + /* + * The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* + * when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + goto out; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for " + "autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = TRUE; + +out: + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, + PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* + * Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) { + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* + * Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.type) { + case e1000_fc_none: + /* + * Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = e1000_write_phy_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + e1000_config_collision_dist_generic(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Link taking longer than expected.\n"); + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000_write_phy_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + goto out; + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.type = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + e1000_config_collision_dist_generic(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/** + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * e1000_check_downshift_generic - Checks whether a downshift in speed occured + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = FALSE; + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_read_phy_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE; + +out: + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* + * Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* + * This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1000_read_phy_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * e1000_wait_autoneg_generic - Wait for auto-neg compeletion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg_generic"); + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + for (i = 0; i < iterations; i++) { + /* + * Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay_irq(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations) ? TRUE : FALSE; + + return ret_val; +} + +/** + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index+1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which reperesent the + * cobination of course and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* + * Getting bits 15:9, which represent the combination of + * course and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (hw->phy.media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? TRUE + : FALSE; + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = TRUE; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + usec_delay(1); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and relase the semaphore (if necessary). + **/ +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + ret_val = e1000_check_reset_block(hw); + if (ret_val) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = e1000_acquire_phy(hw); + if (ret_val) + goto out; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + e1000_release_phy(hw); + + ret_val = e1000_get_phy_cfg_done(hw); + +out: + return ret_val; +} + +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + + msec_delay_irq(10); + + return E1000_SUCCESS; +} + +/* Internal function pointers */ + +/** + * e1000_get_phy_cfg_done - Generic PHY configuration done + * @hw: pointer to the HW structure + * + * Return success if silicon family did not implement a family specific + * get_cfg_done function. + **/ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + if (hw->func.get_cfg_done) + return hw->func.get_cfg_done(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +static void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->func.release_phy) + hw->func.release_phy(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +static s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->func.acquire_phy) + return hw->func.acquire_phy(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex + * @hw: pointer to the HW structure + * + * When the silicon family has not implemented a forced speed/duplex + * function for the PHY, simply return E1000_SUCCESS. + **/ +s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + if (hw->func.force_speed_duplex) + return hw->func.force_speed_duplex(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + e1000_write_phy_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + e1000_write_phy_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + e1000_write_phy_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + e1000_write_phy_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Giga mode */ + e1000_write_phy_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + e1000_write_phy_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + e1000_write_phy_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + e1000_write_phy_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + e1000_write_phy_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + e1000_write_phy_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + e1000_write_phy_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + e1000_write_phy_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + e1000_write_phy_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + e1000_write_phy_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + e1000_write_phy_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + e1000_write_phy_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + e1000_write_phy_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + e1000_write_phy_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + e1000_write_phy_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + e1000_write_phy_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + e1000_write_phy_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + e1000_write_phy_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + e1000_write_phy_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + e1000_write_phy_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + e1000_write_phy_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + e1000_write_phy_reg(hw, 0x1798, 0xD008); + /* + * Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + e1000_write_phy_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + e1000_write_phy_reg(hw, 0x187A, 0x0800); + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + e1000_write_phy_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + e1000_write_phy_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + e1000_write_phy_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + e1000_write_phy_reg(hw, 0x0000, 0x1340); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h new file mode 100644 index 0000000..111e61e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h @@ -0,0 +1,168 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_copper_link_autoneg(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); + +#define E1000_MAX_PHY_ADDR 4 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP01E1000_GMII_FLEX_SPD 0x0010 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0008 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define IGP03E1000_PHY_MISC_CTRL 0x1B +#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h new file mode 100644 index 0000000..72b9f9c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h @@ -0,0 +1,307 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n))) +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : (0x0E028 + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + (_n << 8)) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : (0x0E03C + ((_n) * 0x40))) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (0x05400 + ((_i) * 8)) +#define E1000_RAH(_i) (0x05404 + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Descriptor uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Descriptor uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Descriptor uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Descriptor uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Descriptor uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ + +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Packet Count - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Inteface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr low reg 0 - RW */ +#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr upper reg 0 - RW */ +#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry message reg 0 - RW */ +#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry vector ctrl reg 0 - RW */ +#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW Array */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h new file mode 100644 index 0000000..48906b7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h @@ -0,0 +1,603 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#include <linux/version.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/mii.h> +#include <asm/io.h> + +#include <rtnet_port.h> +/* NAPI enable/disable flags here */ + + +#ifdef _E1000_H_ +#ifdef CONFIG_E1000_NAPI +#define NAPI +#endif +#ifdef E1000_NAPI +#undef NAPI +#define NAPI +#endif +#ifdef E1000_NO_NAPI +#undef NAPI +#endif +#endif + +#ifdef _IGB_H_ +#define NAPI +#endif + +#ifdef _IXGB_H_ +#ifdef CONFIG_IXGB_NAPI +#define NAPI +#endif +#ifdef IXGB_NAPI +#undef NAPI +#define NAPI +#endif +#ifdef IXGB_NO_NAPI +#undef NAPI +#endif +#endif + + +#ifdef DRIVER_E1000 +#define adapter_struct e1000_adapter +#endif + + +// RTNET settings +#ifdef NAPI +#undef NAPI +#endif + +#undef NETIF_F_TSO +#undef NETIF_F_HW_VLAN_TX +#undef CONFIG_NET_POLL_CONTROLLER +#ifdef ETHTOOL_GPERMADDR +#undef ETHTOOL_GPERMADDR +#endif + + +/* and finally set defines so that the code sees the changes */ +#ifdef NAPI +#ifndef CONFIG_E1000_NAPI +#define CONFIG_E1000_NAPI +#endif +#ifndef CONFIG_IXGB_NAPI +#define CONFIG_IXGB_NAPI +#endif +#else +#undef CONFIG_E1000_NAPI +#undef CONFIG_IXGB_NAPI +#endif + +/* packet split disable/enable */ +#ifdef DISABLE_PACKET_SPLIT +#undef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT +#endif + +/* MSI compatibility code for all kernels and drivers */ +#ifdef DISABLE_PCI_MSI +#undef CONFIG_PCI_MSI +#endif + +#ifdef DISABLE_PM +#undef CONFIG_PM +#endif + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef PMSG_SUSPEND +#define PMSG_SUSPEND 3 +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#endif +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif + + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#ifndef HAVE_FREE_NETDEV +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif + +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif + +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef __read_mostly +#define __read_mostly +#endif + +#ifndef MII_RESV1 +#define MII_RESV1 0x17 /* Reserved... */ +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef num_online_cpus +#define num_online_cpus() smp_num_cpus +#endif + +#ifndef _LINUX_RANDOM_H +#include <linux/random.h> +#endif + +#ifndef DECLARE_BITMAP +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef VLAN_ETH_HLEN +#define VLAN_ETH_HLEN 18 +#endif + +#ifndef VLAN_ETH_FRAME_LEN +#define VLAN_ETH_FRAME_LEN 1518 +#endif + + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef HAVE_PCI_SET_MWI +#define pci_set_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ + PCI_COMMAND_INVALIDATE); +#define pci_clear_mwi(X) pci_write_config_word(X, \ + PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ + ~PCI_COMMAND_INVALIDATE); +#endif + + +#undef HAVE_PCI_ERS + +#endif /* _KCOMPAT_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig new file mode 100644 index 0000000..0544128 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig @@ -0,0 +1,4 @@ +config XENO_DRIVERS_NET_DRV_RT2500 + depends on XENO_DRIVERS_NET && PCI + tristate "Ralink 2500 WLAN" + select XENO_DRIVERS_NET_RTWLAN diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile new file mode 100644 index 0000000..d5e2643 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile @@ -0,0 +1,6 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_RT2500) += rt_rt2x00core.o rt_rt2500pci.o + +rt_rt2x00core-y := rt2x00core.o +rt_rt2500pci-y := rt2500pci.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README new file mode 100644 index 0000000..f47fccd --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README @@ -0,0 +1,58 @@ +RTnet rt2500 WLAN README +======================== + +See http://www.fsf.org/resources/hw/net/wireless/cards.html +for a list of cards which use the rt2500 chipset. + +After the modules rt_rt2x00core and rt_rt2500pci have been loaded the driver +can be configured with rtiwconfig. Following features are currently implemented +(see also rtiwconfig --help): + +* bitrate: The committed value is multiplied with 0.5 Mbit/s. + Valid is 2, 4, 11, 22 for 802.11b and + 12, 18, 24, 36, 48, 72, 96, 108 for 802.11g. + +* channel: Sets frequency/channel. Channel IDs are from 1 to 13. + +* txpower: Sets the transmission power. Zero means minimum TX power. + +* retry: The hardware can be configured to do transmission retries. + This sets the maximum amount of retries. + +* tx mode: The RTnet driver can be used in three different modes: + + "raw": No acknowledgement of transmitted frames is expected. + -> No retries possible. + Target address of WLAN frame is set as passed from higher layers. + + "ack": Acknowledgement of every transmitted frames is expected. + -> Retries are possible. + Target address of WLAN frame is set as passed from higher layers. + WARNING: This is currently only useful for unicast transmission. + + "mcast": No acknowledgement of the frame is expected. + Receiver address of the WLAN frame is the own MAC-Address with group + bit set. + +* drop broadcast: Configures, whether the hardware shall drop received + broadcast frames. + +* drop multicast: Configures, whether the hardware shall drop received + multicast frames. + +* bbp sensibility: Sets the receiving sensibility of the base band processor. + Values around 70 seem to be useful. + +* autoresponder: Determines, whether the hardware responds automatically on + received unicast frames with an ACK frame. + +* regread/regwrite: Gives direct access to the chipset registers. + Only useful if you know what you are doing :) + +The driver has been tested with an ASUS WL-107g PCMCIA and a MSI PC54G2 PCI +card. + +KNOWN BUGS: +After configuring the bitrate via rtiwconfig the driver activates the +hardware autoresponder. If desired, the autoresponder has to be disabled again +manually. diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c new file mode 100644 index 0000000..9bbdce7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c @@ -0,0 +1,1274 @@ +/* rt2500pci.c + * + * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project + * <http://rt2x00.serialmonkey.com> + * 2006 rtnet adaption by Daniel Gregorek + * <dxg@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * Module: rt_rt2500pci + * Abstract: rt2500pci device specific routines. + * Supported chipsets: RT2560. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/delay.h> + +#include "rt2x00.h" +#include "rt2500pci.h" + +#include <rtnet_port.h> + +#ifdef DRV_NAME +#undef DRV_NAME +#define DRV_NAME "rt_rt2500pci" +#endif /* DRV_NAME */ + +/* handler for direct register access from core module */ +static int rt2x00_dev_register_access(struct _rt2x00_core *core, int request, + u32 address, u32 *value) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + u8 u8_value; + + switch (request) { + case IOC_RTWLAN_REGREAD: + rt2x00_register_read(rt2x00pci, address, value); + break; + case IOC_RTWLAN_REGWRITE: + rt2x00_register_write(rt2x00pci, address, *value); + break; + case IOC_RTWLAN_BBPREAD: + rt2x00_bbp_regread(rt2x00pci, address, &u8_value); + *value = u8_value; + break; + case IOC_RTWLAN_BBPWRITE: + rt2x00_bbp_regwrite(rt2x00pci, address, *value); + break; + default: + return -1; + } + + return 0; +} + +/* + * Interrupt routines. + * rt2x00_interrupt_txdone processes all transmitted packetss results. + * rt2x00_interrupt_rxdone processes all received rx packets. + */ +static void rt2x00_interrupt_txdone(struct _data_ring *ring) +{ + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(ring->core->rtnet_dev); + struct _txd *txd = NULL; + u8 tx_result = 0x00; + /* u8 retry_count = 0x00; */ + + do { + txd = DESC_ADDR_DONE(ring); + + if (rt2x00_get_field32(txd->word0, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(txd->word0, TXD_W0_VALID)) + break; + + if (ring->ring_type == RING_TX) { + tx_result = + rt2x00_get_field32(txd->word0, TXD_W0_RESULT); + /* retry_count = rt2x00_get_field32(txd->word0, TXD_W0_RETRY_COUNT); */ + + switch (tx_result) { + case TX_SUCCESS: + rtwlan_dev->stats.tx_packets++; + break; + case TX_SUCCESS_RETRY: + rtwlan_dev->stats.tx_retry++; + break; + case TX_FAIL_RETRY: + DEBUG("TX_FAIL_RETRY.\n"); + break; + case TX_FAIL_INVALID: + DEBUG("TX_FAIL_INVALID.\n"); + break; + case TX_FAIL_OTHER: + DEBUG("TX_FAIL_OTHER.\n"); + break; + default: + DEBUG("Unknown tx result.\n"); + } + } + + rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 0); + + rt2x00_ring_index_done_inc(ring); + } while (!rt2x00_ring_empty(ring)); +} + +static void rt2x00_interrupt_rxdone(struct _data_ring *ring, + nanosecs_abs_t *time_stamp) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(ring->core); + struct rtnet_device *rtnet_dev = ring->core->rtnet_dev; + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev); + struct _rxd *rxd = NULL; + struct rtskb *rtskb; + void *data = NULL; + u16 size = 0x0000; + /* u16 rssi = 0x0000; */ + + while (1) { + rxd = DESC_ADDR(ring); + data = DATA_ADDR(ring); + + if (rt2x00_get_field32(rxd->word0, RXD_W0_OWNER_NIC)) + break; + + size = rt2x00_get_field32(rxd->word0, RXD_W0_DATABYTE_COUNT); + /* rssi = rt2x00_get_field32(rxd->word2, RXD_W2_RSSI); */ + + /* prepare rtskb */ + rtskb = rtnetdev_alloc_rtskb(rtnet_dev, size + NET_IP_ALIGN); + if (!rtskb) { + ERROR("Couldn't allocate rtskb, packet dropped.\n"); + break; + } + rtskb->time_stamp = *time_stamp; + rtskb_reserve(rtskb, NET_IP_ALIGN); + + memcpy(rtskb->data, data, size); + rtskb_put(rtskb, size); + + /* give incoming frame to rtwlan stack */ + rtwlan_rx(rtskb, rtnet_dev); + + rtwlan_dev->stats.rx_packets++; + + rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1); + rt2x00_ring_index_inc(&rt2x00pci->rx); + } +} + +int rt2x00_interrupt(rtdm_irq_t *irq_handle) +{ + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + struct rtnet_device *rtnet_dev = + rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev); + struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev); + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + unsigned int old_packet_cnt = rtwlan_dev->stats.rx_packets; + u32 reg = 0x00000000; + + rtdm_lock_get(&rt2x00pci->lock); + + rt2x00_register_read(rt2x00pci, CSR7, ®); + rt2x00_register_write(rt2x00pci, CSR7, reg); + + if (!reg) { + rtdm_lock_put(&rt2x00pci->lock); + return RTDM_IRQ_NONE; + } + + if (rt2x00_get_field32( + reg, + CSR7_TBCN_EXPIRE)) /* Beacon timer expired interrupt. */ + DEBUG("Beacon timer expired.\n"); + if (rt2x00_get_field32(reg, CSR7_RXDONE)) /* Rx ring done interrupt. */ + rt2x00_interrupt_rxdone(&rt2x00pci->rx, &time_stamp); + if (rt2x00_get_field32( + reg, + CSR7_TXDONE_ATIMRING)) /* Atim ring transmit done interrupt. */ + DEBUG("AtimTxDone.\n"); + if (rt2x00_get_field32( + reg, + CSR7_TXDONE_PRIORING)) /* Priority ring transmit done interrupt. */ + DEBUG("PrioTxDone.\n"); + if (rt2x00_get_field32( + reg, + CSR7_TXDONE_TXRING)) /* Tx ring transmit done interrupt. */ + rt2x00_interrupt_txdone(&rt2x00pci->tx); + + rtdm_lock_put(&rt2x00pci->lock); + + if (old_packet_cnt != rtwlan_dev->stats.rx_packets) + rt_mark_stack_mgr(rtnet_dev); + + return RTDM_IRQ_HANDLED; +} + +void rt2x00_init_eeprom(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg = 0x00000000; + u16 eeprom = 0x0000; + + /* + * 1 - Detect EEPROM width. + */ + rt2x00_register_read(rt2x00pci, CSR21, ®); + rt2x00pci->eeprom_width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + EEPROM_WIDTH_93c46 : + EEPROM_WIDTH_93c66; + + /* + * 2 - Identify rf chipset. + */ + eeprom = rt2x00_eeprom_read_word(rt2x00pci, EEPROM_ANTENNA); + set_chip(&rt2x00pci->chip, RT2560, + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE)); + + /* + * 3 - Identify default antenna configuration. + */ + config->antenna_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + config->antenna_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + DEBUG("antenna_tx=%d antenna_rx=%d\n", config->antenna_tx, + config->antenna_rx); + + /* + * 4 - Read BBP data from EEPROM and store in private structure. + */ + memset(&rt2x00pci->eeprom, 0x00, sizeof(rt2x00pci->eeprom)); + for (eeprom = 0; eeprom < EEPROM_BBP_SIZE; eeprom++) + rt2x00pci->eeprom[eeprom] = rt2x00_eeprom_read_word( + rt2x00pci, EEPROM_BBP_START + eeprom); +} + +void rt2x00_dev_read_mac(struct _rt2x00_pci *rt2x00pci, + struct rtnet_device *rtnet_dev) +{ + u32 reg[2]; + + memset(®, 0x00, sizeof(reg)); + + rt2x00_register_multiread(rt2x00pci, CSR3, ®[0], sizeof(reg)); + + rtnet_dev->dev_addr[0] = rt2x00_get_field32(reg[0], CSR3_BYTE0); + rtnet_dev->dev_addr[1] = rt2x00_get_field32(reg[0], CSR3_BYTE1); + rtnet_dev->dev_addr[2] = rt2x00_get_field32(reg[0], CSR3_BYTE2); + rtnet_dev->dev_addr[3] = rt2x00_get_field32(reg[0], CSR3_BYTE3); + rtnet_dev->dev_addr[4] = rt2x00_get_field32(reg[1], CSR4_BYTE4); + rtnet_dev->dev_addr[5] = rt2x00_get_field32(reg[1], CSR4_BYTE5); + + rtnet_dev->addr_len = 6; +} + +int rt2x00_dev_probe(struct _rt2x00_core *core, void *priv) +{ + struct pci_dev *pci_dev = (struct pci_dev *)priv; + struct _rt2x00_pci *rt2x00pci = core->priv; + + memset(rt2x00pci, 0x00, sizeof(*rt2x00pci)); + + if (unlikely(!pci_dev)) { + ERROR("invalid priv pointer.\n"); + return -ENODEV; + } + rt2x00pci->pci_dev = pci_dev; + + rt2x00pci->rx.data_addr = NULL; + rt2x00pci->tx.data_addr = NULL; + + rt2x00pci->csr_addr = ioremap(pci_resource_start(pci_dev, 0), + pci_resource_len(pci_dev, 0)); + if (!rt2x00pci->csr_addr) { + ERROR("ioremap failed.\n"); + return -ENOMEM; + } + + rt2x00_init_eeprom(rt2x00pci, &core->config); + rt2x00_dev_read_mac(rt2x00pci, core->rtnet_dev); + + return 0; +} + +int rt2x00_dev_remove(struct _rt2x00_core *core) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + + if (rt2x00pci->csr_addr) { + iounmap(rt2x00pci->csr_addr); + rt2x00pci->csr_addr = NULL; + } + + return 0; +} + +/* + * rt2x00_clear_ring + * During the initialization some of the descriptor variables are filled in. + * The default value of the owner variable is different between the types of the descriptor, + * DMA ring entries that receive packets are owned by the device untill a packet is received. + * DMA ring entries that are used to transmit a packet are owned by the module untill the device, + * for these rings the valid bit is set to 0 to indicate it is ready for use. + * should transmit the packet that particular DMA ring entry. + * The BUFFER_ADDRESS variable is used to link a descriptor to a packet data block. + */ +static void rt2x00_clear_ring(struct _rt2x00_pci *rt2x00pci, + struct _data_ring *ring) +{ + struct _rxd *rxd = NULL; + struct _txd *txd = NULL; + dma_addr_t data_dma = + ring->data_dma + (ring->max_entries * ring->desc_size); + u8 counter = 0x00; + + memset(ring->data_addr, 0x00, ring->mem_size); + + for (; counter < ring->max_entries; counter++) { + if (ring->ring_type == RING_RX) { + rxd = (struct _rxd *)__DESC_ADDR(ring, counter); + + rt2x00_set_field32(&rxd->word1, RXD_W1_BUFFER_ADDRESS, + data_dma); + rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1); + } else { + txd = (struct _txd *)__DESC_ADDR(ring, counter); + + rt2x00_set_field32(&txd->word1, TXD_W1_BUFFER_ADDRESS, + data_dma); + rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 0); + rt2x00_set_field32(&txd->word0, TXD_W0_OWNER_NIC, 0); + } + + data_dma += ring->entry_size; + } + + rt2x00_ring_clear_index(ring); +} + +/* + * rt2x00_init_ring_register + * The registers should be updated with the descriptor size and the + * number of entries of each ring. + * The address of the first entry of the descriptor ring is written to the register + * corresponding to the ring. + */ +static void rt2x00_init_ring_register(struct _rt2x00_pci *rt2x00pci) +{ + u32 reg = 0x00000000; + + /* Initialize ring register for RX/TX */ + + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00pci->tx.desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00pci->tx.max_entries); + rt2x00_register_write(rt2x00pci, TXCSR2, reg); + + reg = 0x00000000; + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + rt2x00pci->tx.data_dma); + rt2x00_register_write(rt2x00pci, TXCSR3, reg); + + reg = 0x00000000; + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00pci->rx.desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00pci->rx.max_entries); + rt2x00_register_write(rt2x00pci, RXCSR1, reg); + + reg = 0x00000000; + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + rt2x00pci->rx.data_dma); + rt2x00_register_write(rt2x00pci, RXCSR2, reg); +} + +static int rt2x00_init_registers(struct _rt2x00_pci *rt2x00pci) +{ + u32 reg = 0x00000000; + + DEBUG("Start.\n"); + + rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x3f3b3100)); + + rt2x00_register_write(rt2x00pci, PSCSR0, cpu_to_le32(0x00020002)); + rt2x00_register_write(rt2x00pci, PSCSR1, cpu_to_le32(0x00000002)); + rt2x00_register_write(rt2x00pci, PSCSR2, cpu_to_le32(0x00020002)); + rt2x00_register_write(rt2x00pci, PSCSR3, cpu_to_le32(0x00000002)); + + rt2x00_register_read(rt2x00pci, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00_register_write(rt2x00pci, TIMECSR, reg); + + rt2x00_register_read(rt2x00pci, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + (rt2x00pci->rx.entry_size / 128)); + rt2x00_register_write(rt2x00pci, CSR9, reg); + + rt2x00_register_write(rt2x00pci, CNT3, cpu_to_le32(0x3f080000)); + + rt2x00_register_read(rt2x00pci, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, 0); + rt2x00_register_write(rt2x00pci, RXCSR0, reg); + + rt2x00_register_write(rt2x00pci, MACCSR0, cpu_to_le32(0x00213223)); + + rt2x00_register_read(rt2x00pci, MACCSR1, ®); + rt2x00_set_field32(®, MACCSR1_AUTO_TXBBP, 1); + rt2x00_set_field32(®, MACCSR1_AUTO_RXBBP, 1); + rt2x00_register_write(rt2x00pci, MACCSR1, reg); + + rt2x00_register_read(rt2x00pci, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00_register_write(rt2x00pci, MACCSR2, reg); + + rt2x00_register_read(rt2x00pci, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 47); /* Signal. */ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 51); /* Rssi. */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 42); /* OFDM Rate. */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID3, 51); /* OFDM. */ + rt2x00_set_field32(®, RXCSR3_BBP_ID3_VALID, 1); + rt2x00_register_write(rt2x00pci, RXCSR3, reg); + + rt2x00_register_read(rt2x00pci, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID0, 1); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID1, 1); + rt2x00_register_write(rt2x00pci, RALINKCSR, reg); + + rt2x00_register_write(rt2x00pci, BBPCSR1, cpu_to_le32(0x82188200)); + + rt2x00_register_write(rt2x00pci, TXACKCSR0, cpu_to_le32(0x00000020)); + + rt2x00_register_write(rt2x00pci, ARTCSR0, cpu_to_le32(0x7038140a)); + rt2x00_register_write(rt2x00pci, ARTCSR1, cpu_to_le32(0x1d21252d)); + rt2x00_register_write(rt2x00pci, ARTCSR2, cpu_to_le32(0x1919191d)); + + /* disable Beacon timer */ + rt2x00_register_write(rt2x00pci, CSR14, 0x0); + + reg = 0x00000000; + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, 30); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, 70); + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + rt2x00_register_write(rt2x00pci, LEDCSR, reg); + + reg = 0x00000000; + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_register_write(rt2x00pci, CSR1, reg); + + reg = 0x00000000; + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00_register_write(rt2x00pci, CSR1, reg); + + /* + * We must clear the FCS and FIFI error count. + * These registers are cleared on read, so we may pass a useless variable to store the value. + */ + rt2x00_register_read(rt2x00pci, CNT0, ®); + rt2x00_register_read(rt2x00pci, CNT4, ®); + + return 0; +} + +static void rt2x00_init_write_mac(struct _rt2x00_pci *rt2x00pci, + struct rtnet_device *rtnet_dev) +{ + u32 reg[2]; + + memset(®, 0x00, sizeof(reg)); + + rt2x00_set_field32(®[0], CSR3_BYTE0, rtnet_dev->dev_addr[0]); + rt2x00_set_field32(®[0], CSR3_BYTE1, rtnet_dev->dev_addr[1]); + rt2x00_set_field32(®[0], CSR3_BYTE2, rtnet_dev->dev_addr[2]); + rt2x00_set_field32(®[0], CSR3_BYTE3, rtnet_dev->dev_addr[3]); + rt2x00_set_field32(®[1], CSR4_BYTE4, rtnet_dev->dev_addr[4]); + rt2x00_set_field32(®[1], CSR4_BYTE5, rtnet_dev->dev_addr[5]); + + rt2x00_register_multiwrite(rt2x00pci, CSR3, ®[0], sizeof(reg)); +} + +static int rt2x00_init_bbp(struct _rt2x00_pci *rt2x00pci) +{ + u8 reg_id = 0x00; + u8 value = 0x00; + u8 counter = 0x00; + + for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) { + rt2x00_bbp_regread(rt2x00pci, 0x00, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE("Waiting for BBP register.\n"); + } + + ERROR("hardware problem, BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2x00_bbp_regwrite(rt2x00pci, 3, 0x02); + rt2x00_bbp_regwrite(rt2x00pci, 4, 0x19); + rt2x00_bbp_regwrite(rt2x00pci, 14, 0x1c); + rt2x00_bbp_regwrite(rt2x00pci, 15, 0x30); + rt2x00_bbp_regwrite(rt2x00pci, 16, 0xac); + rt2x00_bbp_regwrite(rt2x00pci, 17, 0x48); + rt2x00_bbp_regwrite(rt2x00pci, 18, 0x18); + rt2x00_bbp_regwrite(rt2x00pci, 19, 0xff); + rt2x00_bbp_regwrite(rt2x00pci, 20, 0x1e); + rt2x00_bbp_regwrite(rt2x00pci, 21, 0x08); + rt2x00_bbp_regwrite(rt2x00pci, 22, 0x08); + rt2x00_bbp_regwrite(rt2x00pci, 23, 0x08); + rt2x00_bbp_regwrite(rt2x00pci, 24, 0x70); + rt2x00_bbp_regwrite(rt2x00pci, 25, 0x40); + rt2x00_bbp_regwrite(rt2x00pci, 26, 0x08); + rt2x00_bbp_regwrite(rt2x00pci, 27, 0x23); + rt2x00_bbp_regwrite(rt2x00pci, 30, 0x10); + rt2x00_bbp_regwrite(rt2x00pci, 31, 0x2b); + rt2x00_bbp_regwrite(rt2x00pci, 32, 0xb9); + rt2x00_bbp_regwrite(rt2x00pci, 34, 0x12); + rt2x00_bbp_regwrite(rt2x00pci, 35, 0x50); + rt2x00_bbp_regwrite(rt2x00pci, 39, 0xc4); + rt2x00_bbp_regwrite(rt2x00pci, 40, 0x02); + rt2x00_bbp_regwrite(rt2x00pci, 41, 0x60); + rt2x00_bbp_regwrite(rt2x00pci, 53, 0x10); + rt2x00_bbp_regwrite(rt2x00pci, 54, 0x18); + rt2x00_bbp_regwrite(rt2x00pci, 56, 0x08); + rt2x00_bbp_regwrite(rt2x00pci, 57, 0x10); + rt2x00_bbp_regwrite(rt2x00pci, 58, 0x08); + rt2x00_bbp_regwrite(rt2x00pci, 61, 0x6d); + rt2x00_bbp_regwrite(rt2x00pci, 62, 0x10); + + DEBUG("Start reading EEPROM contents...\n"); + for (counter = 0; counter < EEPROM_BBP_SIZE; counter++) { + if (rt2x00pci->eeprom[counter] != 0xffff && + rt2x00pci->eeprom[counter] != 0x0000) { + reg_id = rt2x00_get_field16(rt2x00pci->eeprom[counter], + EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(rt2x00pci->eeprom[counter], + EEPROM_BBP_VALUE); + DEBUG("BBP reg_id: 0x%02x, value: 0x%02x.\n", reg_id, + value); + rt2x00_bbp_regwrite(rt2x00pci, reg_id, value); + } + } + DEBUG("...End of EEPROM contents.\n"); + + return 0; +} + +/* + * Device radio routines. + * When the radio is switched on or off, the TX and RX + * should always be reset using the TXCSR0 and RXCSR0 registers. + * The radio itself is switched on and off using the PWRCSR0 register. + */ + +static int rt2x00_dev_radio_on(struct _rt2x00_core *core) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + u32 reg = 0x00000000; + int retval; + + if (rt2x00_pci_alloc_rings(core)) + goto exit_fail; + + rt2x00_clear_ring(rt2x00pci, &rt2x00pci->rx); + rt2x00_clear_ring(rt2x00pci, &rt2x00pci->tx); + + rt2x00_init_ring_register(rt2x00pci); + + if (rt2x00_init_registers(rt2x00pci)) + goto exit_fail; + + rt2x00_init_write_mac(rt2x00pci, core->rtnet_dev); + + if (rt2x00_init_bbp(rt2x00pci)) + goto exit_fail; + + /* + * Clear interrupts. + */ + rt2x00_register_read(rt2x00pci, CSR7, ®); + rt2x00_register_write(rt2x00pci, CSR7, reg); + + /* Register rtdm-irq */ + retval = rtdm_irq_request(&rt2x00pci->irq_handle, core->rtnet_dev->irq, + rt2x00_interrupt, 0, core->rtnet_dev->name, + core->rtnet_dev); + + /* + * Enable interrupts. + */ + rt2x00_register_read(rt2x00pci, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, 0); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 0); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 0); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 0); + rt2x00_set_field32(®, CSR8_RXDONE, 0); + rt2x00_register_write(rt2x00pci, CSR8, reg); + + return 0; + +exit_fail: + rt2x00_pci_free_rings(core); + + return -ENOMEM; +} + +static int rt2x00_dev_radio_off(struct _rt2x00_core *core) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + u32 reg = 0x00000000; + int retval = 0; + + rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x00000000)); + + rt2x00_register_read(rt2x00pci, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00_register_write(rt2x00pci, TXCSR0, reg); + + rt2x00_register_read(rt2x00pci, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, 1); + rt2x00_register_write(rt2x00pci, RXCSR0, reg); + + rt2x00_register_read(rt2x00pci, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_register_write(rt2x00pci, LEDCSR, reg); + + rt2x00_register_read(rt2x00pci, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, 1); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 1); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 1); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 1); + rt2x00_set_field32(®, CSR8_RXDONE, 1); + rt2x00_register_write(rt2x00pci, CSR8, reg); + + rt2x00_pci_free_rings(core); + + if ((retval = rtdm_irq_free(&rt2x00pci->irq_handle)) != 0) + ERROR("rtdm_irq_free=%d\n", retval); + + rt_stack_disconnect(core->rtnet_dev); + + return retval; +} + +/* + * Configuration handlers. + */ + +static void rt2x00_dev_update_autoresp(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg = 0; + + DEBUG("Start.\n"); + + rt2x00_register_read(rt2x00pci, TXCSR1, ®); + + if (config->config_flags & CONFIG_AUTORESP) + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + else + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 0); + + rt2x00_register_write(rt2x00pci, TXCSR1, reg); +} + +static void rt2x00_dev_update_bbpsens(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + rt2x00_bbp_regwrite(rt2x00pci, 0x11, config->bbpsens); +} + +static void rt2x00_dev_update_bssid(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg[2]; + + memset(®, 0x00, sizeof(reg)); + + rt2x00_set_field32(®[0], CSR5_BYTE0, config->bssid[0]); + rt2x00_set_field32(®[0], CSR5_BYTE1, config->bssid[1]); + rt2x00_set_field32(®[0], CSR5_BYTE2, config->bssid[2]); + rt2x00_set_field32(®[0], CSR5_BYTE3, config->bssid[3]); + rt2x00_set_field32(®[1], CSR6_BYTE4, config->bssid[4]); + rt2x00_set_field32(®[1], CSR6_BYTE5, config->bssid[5]); + + rt2x00_register_multiwrite(rt2x00pci, CSR5, ®[0], sizeof(reg)); +} + +static void rt2x00_dev_update_packet_filter(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg = 0x00000000; + + DEBUG("Start.\n"); + + rt2x00_register_read(rt2x00pci, RXCSR0, ®); + + rt2x00_set_field32(®, RXCSR0_DROP_TODS, 0); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, 1); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, 1); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, 1); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); + + /* + * This looks like a bug, but for an unknown reason the register seems to swap the bits !!! + */ + if (config->config_flags & CONFIG_DROP_BCAST) + rt2x00_set_field32(®, RXCSR0_DROP_MCAST, 1); + else + rt2x00_set_field32(®, RXCSR0_DROP_MCAST, 0); + + if (config->config_flags & CONFIG_DROP_MCAST) + rt2x00_set_field32(®, RXCSR0_DROP_BCAST, 1); + else + rt2x00_set_field32(®, RXCSR0_DROP_BCAST, 0); + + rt2x00_register_write(rt2x00pci, RXCSR0, reg); +} + +static void rt2x00_dev_update_channel(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u8 txpower = rt2x00_get_txpower(&rt2x00pci->chip, config->txpower); + u32 reg = 0x00000000; + + if (rt2x00_get_rf_value(&rt2x00pci->chip, config->channel, + &rt2x00pci->channel)) { + ERROR("RF values for chip %04x and channel %d not found.\n", + rt2x00_get_rf(&rt2x00pci->chip), config->channel); + return; + } + + /* + * Set TXpower. + */ + rt2x00_set_field32(&rt2x00pci->channel.rf3, RF3_TXPOWER, txpower); + + /* + * For RT2525 we should first set the channel to half band higher. + */ + if (rt2x00_rf(&rt2x00pci->chip, RF2525)) { + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf1); + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf2 + + cpu_to_le32(0x00000020)); + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3); + if (rt2x00pci->channel.rf4) + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf4); + } + + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf1); + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf2); + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3); + if (rt2x00pci->channel.rf4) + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf4); + + /* + * Channel 14 requires the Japan filter bit to be set. + */ + rt2x00_bbp_regwrite(rt2x00pci, 70, + (config->channel == 14) ? 0x4e : 0x46); + + msleep(1); + + /* + * Clear false CRC during channel switch. + */ + rt2x00_register_read(rt2x00pci, CNT0, ®); + + DEBUG("Switching to channel %d. RF1: 0x%08x, RF2: 0x%08x, RF3: 0x%08x, RF4: 0x%08x.\n", + config->channel, rt2x00pci->channel.rf1, rt2x00pci->channel.rf2, + rt2x00pci->channel.rf3, rt2x00pci->channel.rf4); +} + +static void rt2x00_dev_update_rate(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 value = 0x00000000; + u32 reg = 0x00000000; + u8 counter = 0x00; + + DEBUG("Start.\n"); + + rt2x00_register_read(rt2x00pci, TXCSR1, ®); + + value = config->sifs + (2 * config->slot_time) + config->plcp + + get_preamble(config) + + get_duration(ACK_SIZE, capabilities.bitrate[0]); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, value); + + value = config->sifs + config->plcp + get_preamble(config) + + get_duration(ACK_SIZE, capabilities.bitrate[0]); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, value); + + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, 0x18); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + + rt2x00_register_write(rt2x00pci, TXCSR1, reg); + + reg = 0x00000000; + for (counter = 0; counter < 12; counter++) { + reg |= cpu_to_le32(0x00000001 << counter); + if (capabilities.bitrate[counter] == config->bitrate) + break; + } + + rt2x00_register_write(rt2x00pci, ARCSR1, reg); +} + +static void rt2x00_dev_update_txpower(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u8 txpower = rt2x00_get_txpower(&rt2x00pci->chip, config->txpower); + + DEBUG("Start.\n"); + + rt2x00_set_field32(&rt2x00pci->channel.rf3, RF3_TXPOWER, txpower); + rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3); +} + +static void rt2x00_dev_update_antenna(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg; + u8 reg_rx; + u8 reg_tx; + + rt2x00_register_read(rt2x00pci, BBPCSR1, ®); + rt2x00_bbp_regread(rt2x00pci, 14, ®_rx); + rt2x00_bbp_regread(rt2x00pci, 2, ®_tx); + + /* TX antenna select */ + if (config->antenna_tx == 1) { + /* Antenna A */ + reg_tx = (reg_tx & 0xfc) | 0x00; + reg = (reg & 0xfffcfffc) | 0x00; + } else if (config->antenna_tx == 2) { + /* Antenna B */ + reg_tx = (reg_tx & 0xfc) | 0x02; + reg = (reg & 0xfffcfffc) | 0x00020002; + } else { + /* Diversity */ + reg_tx = (reg_tx & 0xfc) | 0x02; + reg = (reg & 0xfffcfffc) | 0x00020002; + } + + /* RX antenna select */ + if (config->antenna_rx == 1) + reg_rx = (reg_rx & 0xfc) | 0x00; + else if (config->antenna_rx == 2) + reg_rx = (reg_rx & 0xfc) | 0x02; + else + reg_rx = (reg_rx & 0xfc) | 0x02; + + /* + * RT2525E and RT5222 need to flip I/Q + */ + if (rt2x00_rf(&rt2x00pci->chip, RF5222)) { + reg |= 0x00040004; + reg_tx |= 0x04; + } else if (rt2x00_rf(&rt2x00pci->chip, RF2525E)) { + reg |= 0x00040004; + reg_tx |= 0x04; + reg_rx |= 0xfb; + } + + rt2x00_register_write(rt2x00pci, BBPCSR1, reg); + rt2x00_bbp_regwrite(rt2x00pci, 14, reg_rx); + rt2x00_bbp_regwrite(rt2x00pci, 2, reg_tx); +} + +static void rt2x00_dev_update_duration(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg = 0x00000000; + + DEBUG("Start.\n"); + + rt2x00_register_read(rt2x00pci, CSR11, ®); + rt2x00_set_field32(®, CSR11_CWMIN, 5); /* 2^5 = 32. */ + rt2x00_set_field32(®, CSR11_CWMAX, 10); /* 2^10 = 1024. */ + rt2x00_set_field32(®, CSR11_SLOT_TIME, config->slot_time); + rt2x00_set_field32(®, CSR11_CW_SELECT, 1); + rt2x00_register_write(rt2x00pci, CSR11, reg); + + rt2x00_register_read(rt2x00pci, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, config->sifs); + rt2x00_set_field32(®, CSR18_PIFS, config->sifs + config->slot_time); + rt2x00_register_write(rt2x00pci, CSR18, reg); + + rt2x00_register_read(rt2x00pci, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, + config->sifs + (2 * config->slot_time)); + rt2x00_set_field32(®, CSR19_EIFS, + config->sifs + + get_duration((IEEE80211_HEADER + ACK_SIZE), + capabilities.bitrate[0])); + rt2x00_register_write(rt2x00pci, CSR19, reg); +} + +static void rt2x00_dev_update_retry(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg = 0x00000000; + + rt2x00_register_read(rt2x00pci, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, config->long_retry); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, config->short_retry); + rt2x00_register_write(rt2x00pci, CSR11, reg); +} + +static void rt2x00_dev_update_preamble(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg[4]; + u32 preamble = 0x00000000; + + memset(®, 0x00, sizeof(reg)); + + reg[0] = cpu_to_le32(0x00700400 | preamble); /* ARCSR2 */ + reg[1] = cpu_to_le32(0x00380401 | preamble); /* ARCSR3 */ + reg[2] = cpu_to_le32(0x00150402 | preamble); /* ARCSR4 */ + reg[3] = cpu_to_le32(0x000b8403 | preamble); /* ARCSR5 */ + + rt2x00_register_multiwrite(rt2x00pci, ARCSR2, ®[0], sizeof(reg)); +} + +static void rt2x00_dev_update_led(struct _rt2x00_pci *rt2x00pci, + struct _rt2x00_config *config) +{ + u32 reg = 0x00000000; + + rt2x00_register_read(rt2x00pci, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_LINK, config->led_status ? 1 : 0); + rt2x00_register_write(rt2x00pci, LEDCSR, reg); +} + +static int rt2x00_dev_update_config(struct _rt2x00_core *core, u16 update_flags) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + + DEBUG("Start.\n"); + + if (update_flags & UPDATE_BSSID) + rt2x00_dev_update_bssid(rt2x00pci, &core->config); + + if (update_flags & UPDATE_PACKET_FILTER) + rt2x00_dev_update_packet_filter(rt2x00pci, &core->config); + + if (update_flags & UPDATE_CHANNEL) + rt2x00_dev_update_channel(rt2x00pci, &core->config); + + if (update_flags & UPDATE_BITRATE) + rt2x00_dev_update_rate(rt2x00pci, &core->config); + + if (update_flags & UPDATE_TXPOWER) + rt2x00_dev_update_txpower(rt2x00pci, &core->config); + + if (update_flags & UPDATE_ANTENNA) + rt2x00_dev_update_antenna(rt2x00pci, &core->config); + + if (update_flags & UPDATE_DURATION) + rt2x00_dev_update_duration(rt2x00pci, &core->config); + + if (update_flags & UPDATE_RETRY) + rt2x00_dev_update_retry(rt2x00pci, &core->config); + + if (update_flags & UPDATE_PREAMBLE) + rt2x00_dev_update_preamble(rt2x00pci, &core->config); + + if (update_flags & UPDATE_LED_STATUS) + rt2x00_dev_update_led(rt2x00pci, &core->config); + + if (update_flags & UPDATE_AUTORESP) + rt2x00_dev_update_autoresp(rt2x00pci, &core->config); + + if (update_flags & UPDATE_BBPSENS) + rt2x00_dev_update_bbpsens(rt2x00pci, &core->config); + + DEBUG("Exit.\n"); + + return 0; +} + +/* + * Transmission routines. + * rt2x00_write_tx_desc will write the txd descriptor. + * rt2x00_dev_xmit_packet will copy the packets to the appropriate DMA ring. + */ + +/* + * PLCP_SIGNAL, PLCP_SERVICE, PLCP_LENGTH_LOW and PLCP_LENGTH_HIGH are BBP registers. + * For RT2460 devices we need, besides the value we want to write, + * also set the busy bit (0x8000) and the register number (0x0f00). + * The value we want to write is stored in 0x00ff. + * For PLCP_SIGNAL we can optionally enable SHORT_PREAMBLE. + * For PLCP_SERVICE we can set the length extension bit according to + * 802.11b standard 18.2.3.5. + */ +static void rt2x00_write_tx_desc(struct _rt2x00_pci *rt2x00pci, + struct _txd *txd, u32 packet_size, u16 rate, + u16 xmit_flags) +{ + u32 residual = 0x00000000; + u32 duration = 0x00000000; + u16 signal = 0x0000; + u16 service = 0x0000; + u16 length_low = 0x0000; + u16 length_high = 0x0000; + + rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 1); + rt2x00_set_field32(&txd->word0, TXD_W0_DATABYTE_COUNT, packet_size); + rt2x00_set_field32(&txd->word0, TXD_W0_ACK, + (xmit_flags & XMIT_ACK) ? 1 : 0); + rt2x00_set_field32(&txd->word0, TXD_W0_RETRY_MODE, + (xmit_flags & XMIT_LONG_RETRY) ? 1 : 0); + rt2x00_set_field32(&txd->word0, TXD_W0_TIMESTAMP, + (xmit_flags & XMIT_TIMESTAMP) ? 1 : 0); + rt2x00_set_field32(&txd->word0, TXD_W0_MORE_FRAG, + (xmit_flags & XMIT_MORE_FRAGS) ? 1 : 0); + rt2x00_set_field32(&txd->word0, TXD_W0_MORE_FRAG, + (xmit_flags & XMIT_RTS) ? 1 : 0); + rt2x00_set_field32(&txd->word10, TXD_W10_RTS, + (xmit_flags & XMIT_RTS) ? 1 : 0); + rt2x00_set_field32(&txd->word0, TXD_W0_OFDM, + (xmit_flags & XMIT_OFDM) ? 1 : 0); + + packet_size += 4; + + if (xmit_flags & XMIT_OFDM) { + /* + * convert length to microseconds. + */ + length_high = (packet_size >> 6) & 0x3f; + length_low = (packet_size & 0x3f); + } else { + residual = get_duration_res(packet_size, rate); + duration = get_duration(packet_size, rate); + + if (residual != 0) + duration++; + + length_high = duration >> 8; + length_low = duration & 0xff; + } + + signal |= 0x8500 | rt2x00_get_plcp(rate); + if (xmit_flags & XMIT_SHORT_PREAMBLE) + signal |= 0x0008; + + service |= 0x0600 | 0x0004; + if (residual <= (8 % 11)) + service |= 0x0080; + + rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_SIGNAL, signal); + rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_SERVICE, service); + rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_LENGTH_LOW, length_low); + rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_LENGTH_HIGH, length_high); + + /* set XMIT_IFS to XMIT_IFS_NONE */ + rt2x00_set_field32(&txd->word0, TXD_W0_IFS, XMIT_IFS_NONE); + + /* highest priority */ + rt2x00_set_field32(&txd->word2, TXD_W2_CWMIN, 1); + rt2x00_set_field32(&txd->word2, TXD_W2_CWMAX, 2); + rt2x00_set_field32(&txd->word2, TXD_W2_AIFS, 1); + + /* + * set this last, after this the device can start transmitting the packet. + */ + rt2x00_set_field32(&txd->word0, TXD_W0_OWNER_NIC, 1); +} + +static int rt2x00_dev_xmit_packet(struct _rt2x00_core *core, + struct rtskb *rtskb, u16 rate, u16 xmit_flags) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + struct _data_ring *ring = NULL; + struct _txd *txd = NULL; + void *data = NULL; + u32 reg = 0x00000000; + rtdm_lockctx_t context; + + rtdm_lock_get_irqsave(&rt2x00pci->lock, context); + + /* load tx-control register */ + rt2x00_register_read(rt2x00pci, TXCSR0, ®); + + /* select tx-descriptor ring and prepare xmit */ + ring = &rt2x00pci->tx; + rt2x00_set_field32(®, TXCSR0_KICK_TX, 1); + + txd = DESC_ADDR(ring); + data = DATA_ADDR(ring); + + if (rt2x00_get_field32(txd->word0, TXD_W0_OWNER_NIC) || + rt2x00_get_field32(txd->word0, TXD_W0_VALID)) { + rtdm_lock_put_irqrestore(&rt2x00pci->lock, context); + return -ENOMEM; + } + + /* get and patch time stamp just before the transmission */ + if (rtskb->xmit_stamp) + *rtskb->xmit_stamp = + cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp); + + /* copy rtskb to dma */ + memcpy(data, rtskb->data, rtskb->len); + + rt2x00_write_tx_desc(rt2x00pci, txd, rtskb->len, rate, xmit_flags); + rt2x00_ring_index_inc(ring); + + /* let the device do the rest ... */ + rt2x00_register_write(rt2x00pci, TXCSR0, reg); + + rtdm_lock_put_irqrestore(&rt2x00pci->lock, context); + + return 0; +} + +/* + * PCI device handlers for usage by core module. + */ +static struct _rt2x00_dev_handler rt2x00_pci_handler = { + + .dev_module = THIS_MODULE, + .dev_probe = rt2x00_dev_probe, + .dev_remove = rt2x00_dev_remove, + .dev_radio_on = rt2x00_dev_radio_on, + .dev_radio_off = rt2x00_dev_radio_off, + .dev_update_config = rt2x00_dev_update_config, + .dev_register_access = rt2x00_dev_register_access, + .dev_xmit_packet = rt2x00_dev_xmit_packet, +}; + +int rt2x00_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct rtnet_device *rtnet_dev = NULL; + int status = 0x00000000; + + DEBUG("start.\n"); + + if (id->driver_data != RT2560) { + ERROR("detected device not supported.\n"); + status = -ENODEV; + goto exit; + } + + if (pci_enable_device(pci_dev)) { + ERROR("enable device failed.\n"); + status = -EIO; + goto exit; + } + + pci_set_master(pci_dev); + + if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64)) && + dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { + ERROR("PCI DMA not supported\n"); + status = -EIO; + goto exit_disable_device; + } + + if (pci_request_regions(pci_dev, pci_name(pci_dev))) { + ERROR("PCI request regions failed.\n"); + status = -EBUSY; + goto exit_disable_device; + } + INFO("pci_dev->irq=%d\n", pci_dev->irq); + + rtnet_dev = rt2x00_core_probe(&rt2x00_pci_handler, pci_dev, + sizeof(struct _rt2x00_pci)); + + if (!rtnet_dev) { + ERROR("rtnet_device allocation failed.\n"); + status = -ENOMEM; + goto exit_release_regions; + } + + rtnet_dev->irq = pci_dev->irq; + + pci_set_drvdata(pci_dev, rtnet_dev); + + return 0; + +exit_release_regions: + pci_release_regions(pci_dev); + +exit_disable_device: + if (status != -EBUSY) + pci_disable_device(pci_dev); + +exit: + return status; +} + +static void rt2x00_pci_remove(struct pci_dev *pci_dev) +{ + struct rtnet_device *rtnet_dev = pci_get_drvdata(pci_dev); + + rt2x00_core_remove(rtnet_dev); + pci_set_drvdata(pci_dev, NULL); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); +} + +/* + * RT2500 PCI module information. + */ +char version[] = DRV_NAME " - " DRV_VERSION; + +struct pci_device_id rt2x00_device_pci_tbl[] = { + { PCI_DEVICE(0x1814, 0x0201), + .driver_data = RT2560 }, /* Ralink 802.11g */ + { + 0, + } +}; + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("RTnet rt2500 PCI WLAN driver (PCI Module)"); +MODULE_LICENSE("GPL"); + +struct pci_driver rt2x00_pci_driver = { + .name = DRV_NAME, + .id_table = rt2x00_device_pci_tbl, + .probe = rt2x00_pci_probe, + .remove = rt2x00_pci_remove, +}; + +static int __init rt2x00_pci_init(void) +{ + rtdm_printk(KERN_INFO "Loading module: %s\n", version); + return pci_register_driver(&rt2x00_pci_driver); +} + +static void __exit rt2x00_pci_exit(void) +{ + rtdm_printk(KERN_INFO "Unloading module: %s\n", version); + pci_unregister_driver(&rt2x00_pci_driver); +} + +module_init(rt2x00_pci_init); +module_exit(rt2x00_pci_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h new file mode 100644 index 0000000..60e744b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h @@ -0,0 +1,1498 @@ +/* rt2500pci.h + * + * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project + * <http://rt2x00.serialmonkey.com> + * 2006 rtnet adaption by Daniel Gregorek + * <dxg@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * Module: rt2500pci + * Abstract: Data structures and registers for the rt2500pci module. + * Supported chipsets: RT2560. + */ + +#ifndef RT2500PCI_H +#define RT2500PCI_H + +/* + * RT chip defines + */ +#define RT2560 0x0201 + +/* + * RF chip defines + */ +#define RF2522 0x0200 +#define RF2523 0x0201 +#define RF2524 0x0202 +#define RF2525 0x0203 +#define RF2525E 0x0204 +#define RF5222 0x0210 + +/* + * Control/Status Registers(CSR). + */ +#define CSR0 0x0000 /* ASIC revision number. */ +#define CSR1 0x0004 /* System control register. */ +#define CSR2 0x0008 /* System admin status register (invalid). */ +#define CSR3 0x000c /* STA MAC address register 0. */ +#define CSR4 0x0010 /* STA MAC address register 1. */ +#define CSR5 0x0014 /* BSSID register 0. */ +#define CSR6 0x0018 /* BSSID register 1. */ +#define CSR7 0x001c /* Interrupt source register. */ +#define CSR8 0x0020 /* Interrupt mask register. */ +#define CSR9 0x0024 /* Maximum frame length register. */ +#define SECCSR0 0x0028 /* WEP control register. */ +#define CSR11 0x002c /* Back-off control register. */ +#define CSR12 0x0030 /* Synchronization configuration register 0. */ +#define CSR13 0x0034 /* Synchronization configuration register 1. */ +#define CSR14 0x0038 /* Synchronization control register. */ +#define CSR15 0x003c /* Synchronization status register. */ +#define CSR16 0x0040 /* TSF timer register 0. */ +#define CSR17 0x0044 /* TSF timer register 1. */ +#define CSR18 0x0048 /* IFS timer register 0. */ +#define CSR19 0x004c /* IFS timer register 1. */ +#define CSR20 0x0050 /* WakeUp register. */ +#define CSR21 0x0054 /* EEPROM control register. */ +#define CSR22 0x0058 /* CFP Control Register. */ + +/* + * Transmit related CSRs. + */ +#define TXCSR0 0x0060 /* TX control register. */ +#define TXCSR1 0x0064 /* TX configuration register. */ +#define TXCSR2 0x0068 /* TX descriptor configuratioon register. */ +#define TXCSR3 0x006c /* TX Ring Base address register. */ +#define TXCSR4 0x0070 /* TX Atim Ring Base address register. */ +#define TXCSR5 0x0074 /* TX Prio Ring Base address register. */ +#define TXCSR6 0x0078 /* Beacon base address. */ +#define TXCSR7 0x007c /* AutoResponder Control Register. */ +#define TXCSR8 0x0098 /* CCK TX BBP registers. */ +#define TXCSR9 0x0094 /* OFDM TX BBP registers. */ + +/* + * Receive related CSRs. + */ +#define RXCSR0 0x0080 /* RX control register. */ +#define RXCSR1 0x0084 /* RX descriptor configuration register. */ +#define RXCSR2 0x0088 /* RX Ring base address register. */ +#define RXCSR3 0x0090 /* BBP ID register 0 */ +#define ARCSR1 0x009c /* Auto Responder PLCP config register 1. */ + +/* + * PCI control CSRs. + */ +#define PCICSR 0x008c /* PCI control register. */ + +/* + * Statistic Register. + */ +#define CNT0 0x00a0 /* FCS error count. */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac /* PLCP error count. */ +#define CNT2 0x00b0 /* long error count. */ +#define TIMECSR3 0x00b4 +#define CNT3 0x00b8 /* CCA false alarm count. */ +#define CNT4 0x00bc /* Rx FIFO overflow count. */ +#define CNT5 0x00c0 /* Tx FIFO underrun count. */ + +/* + * Baseband Control Register. + */ +#define PWRCSR0 0x00c4 /* Power mode configuration. */ +#define PSCSR0 0x00c8 /* Power state transition time. */ +#define PSCSR1 0x00cc /* Power state transition time. */ +#define PSCSR2 0x00d0 /* Power state transition time. */ +#define PSCSR3 0x00d4 /* Power state transition time. */ +#define PWRCSR1 0x00d8 /* Manual power control / status. */ +#define TIMECSR 0x00dc /* Timer control. */ +#define MACCSR0 0x00e0 /* MAC configuration. */ +#define MACCSR1 0x00e4 /* MAC configuration. */ +#define RALINKCSR 0x00e8 /* Ralink Auto-reset register. */ +#define BCNCSR 0x00ec /* Beacon interval control register. */ + +/* + * BBP / RF / IF Control Register. + */ +#define BBPCSR 0x00f0 /* BBP serial control. */ +#define RFCSR 0x00f4 /* RF serial control. */ +#define LEDCSR 0x00f8 /* LED control register */ + +#define SECCSR3 0x00fc /* AES control register. */ + +/* + * ASIC pointer information. + */ +#define RXPTR 0x0100 /* Current RX ring address. */ +#define TXPTR 0x0104 /* Current Tx ring address. */ +#define PRIPTR 0x0108 /* Current Priority ring address. */ +#define ATIMPTR 0x010c /* Current ATIM ring address. */ + +#define TXACKCSR0 0x0110 /* TX ACK timeout. */ +#define ACKCNT0 0x0114 /* TX ACK timeout count. */ +#define ACKCNT1 0x0118 /* RX ACK timeout count. */ + +/* + * GPIO and others. + */ +#define GPIOCSR 0x0120 /* GPIO. */ +#define FIFOCSR0 0x0128 /* TX FIFO pointer. */ +#define FIFOCSR1 0x012c /* RX FIFO pointer. */ +#define BCNCSR1 0x0130 /* Tx BEACON offset time, unit: 1 usec. */ +#define MACCSR2 0x0134 /* TX_PE to RX_PE delay time, unit: 1 PCI clock cycle. */ +#define TESTCSR 0x0138 /* TEST mode selection register. */ +#define ARCSR2 0x013c /* 1 Mbps ACK/CTS PLCP. */ +#define ARCSR3 0x0140 /* 2 Mbps ACK/CTS PLCP. */ +#define ARCSR4 0x0144 /* 5.5 Mbps ACK/CTS PLCP. */ +#define ARCSR5 0x0148 /* 11 Mbps ACK/CTS PLCP. */ +#define ARTCSR0 0x014c /* ACK/CTS payload consumed time for 1/2/5.5/11 mbps. */ +#define ARTCSR1 \ + 0x0150 /* OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. */ +#define ARTCSR2 \ + 0x0154 /* OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. */ +#define SECCSR1 0x0158 /* WEP control register. */ +#define BBPCSR1 0x015c /* BBP TX configuration. */ +#define DBANDCSR0 0x0160 /* Dual band configuration register 0. */ +#define DBANDCSR1 0x0164 /* Dual band configuration register 1. */ +#define BBPPCSR 0x0168 /* BBP Pin control register. */ +#define DBGSEL0 0x016c /* MAC special debug mode selection register 0. */ +#define DBGSEL1 0x0170 /* MAC special debug mode selection register 1. */ +#define BISTCSR 0x0174 /* BBP BIST register. */ +#define MCAST0 0x0178 /* multicast filter register 0. */ +#define MCAST1 0x017c /* multicast filter register 1. */ +#define UARTCSR0 0x0180 /* UART1 TX register. */ +#define UARTCSR1 0x0184 /* UART1 RX register. */ +#define UARTCSR3 0x0188 /* UART1 frame control register. */ +#define UARTCSR4 0x018c /* UART1 buffer control register. */ +#define UART2CSR0 0x0190 /* UART2 TX register. */ +#define UART2CSR1 0x0194 /* UART2 RX register. */ +#define UART2CSR3 0x0198 /* UART2 frame control register. */ +#define UART2CSR4 0x019c /* UART2 buffer control register. */ + +/* + * EEPROM addresses + */ +#define EEPROM_ANTENNA 0x10 +#define EEPROM_GEOGRAPHY 0x12 +#define EEPROM_BBP_START 0x13 +#define EEPROM_BBP_END 0x22 + +#define EEPROM_BBP_SIZE 16 + +/* + * CSR Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR1: System control register. + */ +#define CSR1_SOFT_RESET \ + FIELD32(0, 0x00000001) /* Software reset, 1: reset, 0: normal. */ +#define CSR1_BBP_RESET \ + FIELD32(1, 0x00000002) /* Hardware reset, 1: reset, 0, release. */ +#define CSR1_HOST_READY \ + FIELD32(2, 0x00000004) /* Host ready after initialization. */ + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3_BYTE0 FIELD32(0, 0x000000ff) /* MAC address byte 0. */ +#define CSR3_BYTE1 FIELD32(8, 0x0000ff00) /* MAC address byte 1. */ +#define CSR3_BYTE2 FIELD32(16, 0x00ff0000) /* MAC address byte 2. */ +#define CSR3_BYTE3 FIELD32(24, 0xff000000) /* MAC address byte 3. */ + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4_BYTE4 FIELD32(0, 0x000000ff) /* MAC address byte 4. */ +#define CSR4_BYTE5 FIELD32(8, 0x0000ff00) /* MAC address byte 5. */ + +/* + * CSR5: BSSID register 0. + */ +#define CSR5_BYTE0 FIELD32(0, 0x000000ff) /* BSSID address byte 0. */ +#define CSR5_BYTE1 FIELD32(8, 0x0000ff00) /* BSSID address byte 1. */ +#define CSR5_BYTE2 FIELD32(16, 0x00ff0000) /* BSSID address byte 2. */ +#define CSR5_BYTE3 FIELD32(24, 0xff000000) /* BSSID address byte 3. */ + +/* + * CSR6: BSSID register 1. + */ +#define CSR6_BYTE4 FIELD32(0, 0x000000ff) /* BSSID address byte 4. */ +#define CSR6_BYTE5 FIELD32(8, 0x0000ff00) /* BSSID address byte 5. */ + +/* + * CSR7: Interrupt source register. + * Write 1 to clear. + */ +#define CSR7_TBCN_EXPIRE \ + FIELD32(0, 0x00000001) /* beacon timer expired interrupt. */ +#define CSR7_TWAKE_EXPIRE \ + FIELD32(1, 0x00000002) /* wakeup timer expired interrupt. */ +#define CSR7_TATIMW_EXPIRE \ + FIELD32(2, 0x00000004) /* timer of atim window expired interrupt. */ +#define CSR7_TXDONE_TXRING \ + FIELD32(3, 0x00000008) /* tx ring transmit done interrupt. */ +#define CSR7_TXDONE_ATIMRING \ + FIELD32(4, 0x00000010) /* atim ring transmit done interrupt. */ +#define CSR7_TXDONE_PRIORING \ + FIELD32(5, 0x00000020) /* priority ring transmit done interrupt. */ +#define CSR7_RXDONE FIELD32(6, 0x00000040) /* receive done interrupt. */ +#define CSR7_DECRYPTION_DONE \ + FIELD32(7, 0x00000080) /* Decryption done interrupt. */ +#define CSR7_ENCRYPTION_DONE \ + FIELD32(8, 0x00000100) /* Encryption done interrupt. */ +#define CSR7_UART1_TX_TRESHOLD \ + FIELD32(9, 0x00000200) /* UART1 TX reaches threshold. */ +#define CSR7_UART1_RX_TRESHOLD \ + FIELD32(10, 0x00000400) /* UART1 RX reaches threshold. */ +#define CSR7_UART1_IDLE_TRESHOLD \ + FIELD32(11, 0x00000800) /* UART1 IDLE over threshold. */ +#define CSR7_UART1_TX_BUFF_ERROR \ + FIELD32(12, 0x00001000) /* UART1 TX buffer error. */ +#define CSR7_UART1_RX_BUFF_ERROR \ + FIELD32(13, 0x00002000) /* UART1 RX buffer error. */ +#define CSR7_UART2_TX_TRESHOLD \ + FIELD32(14, 0x00004000) /* UART2 TX reaches threshold. */ +#define CSR7_UART2_RX_TRESHOLD \ + FIELD32(15, 0x00008000) /* UART2 RX reaches threshold. */ +#define CSR7_UART2_IDLE_TRESHOLD \ + FIELD32(16, 0x00010000) /* UART2 IDLE over threshold. */ +#define CSR7_UART2_TX_BUFF_ERROR \ + FIELD32(17, 0x00020000) /* UART2 TX buffer error. */ +#define CSR7_UART2_RX_BUFF_ERROR \ + FIELD32(18, 0x00040000) /* UART2 RX buffer error. */ +#define CSR7_TIMER_CSR3_EXPIRE \ + FIELD32(19, \ + 0x00080000) /* TIMECSR3 timer expired (802.1H quiet period). */ + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + */ +#define CSR8_TBCN_EXPIRE \ + FIELD32(0, 0x00000001) /* beacon timer expired interrupt. */ +#define CSR8_TWAKE_EXPIRE \ + FIELD32(1, 0x00000002) /* wakeup timer expired interrupt. */ +#define CSR8_TATIMW_EXPIRE \ + FIELD32(2, 0x00000004) /* timer of atim window expired interrupt. */ +#define CSR8_TXDONE_TXRING \ + FIELD32(3, 0x00000008) /* tx ring transmit done interrupt. */ +#define CSR8_TXDONE_ATIMRING \ + FIELD32(4, 0x00000010) /* atim ring transmit done interrupt. */ +#define CSR8_TXDONE_PRIORING \ + FIELD32(5, 0x00000020) /* priority ring transmit done interrupt. */ +#define CSR8_RXDONE FIELD32(6, 0x00000040) /* receive done interrupt. */ +#define CSR8_DECRYPTION_DONE \ + FIELD32(7, 0x00000080) /* Decryption done interrupt. */ +#define CSR8_ENCRYPTION_DONE \ + FIELD32(8, 0x00000100) /* Encryption done interrupt. */ +#define CSR8_UART1_TX_TRESHOLD \ + FIELD32(9, 0x00000200) /* UART1 TX reaches threshold. */ +#define CSR8_UART1_RX_TRESHOLD \ + FIELD32(10, 0x00000400) /* UART1 RX reaches threshold. */ +#define CSR8_UART1_IDLE_TRESHOLD \ + FIELD32(11, 0x00000800) /* UART1 IDLE over threshold. */ +#define CSR8_UART1_TX_BUFF_ERROR \ + FIELD32(12, 0x00001000) /* UART1 TX buffer error. */ +#define CSR8_UART1_RX_BUFF_ERROR \ + FIELD32(13, 0x00002000) /* UART1 RX buffer error. */ +#define CSR8_UART2_TX_TRESHOLD \ + FIELD32(14, 0x00004000) /* UART2 TX reaches threshold. */ +#define CSR8_UART2_RX_TRESHOLD \ + FIELD32(15, 0x00008000) /* UART2 RX reaches threshold. */ +#define CSR8_UART2_IDLE_TRESHOLD \ + FIELD32(16, 0x00010000) /* UART2 IDLE over threshold. */ +#define CSR8_UART2_TX_BUFF_ERROR \ + FIELD32(17, 0x00020000) /* UART2 TX buffer error. */ +#define CSR8_UART2_RX_BUFF_ERROR \ + FIELD32(18, 0x00040000) /* UART2 RX buffer error. */ +#define CSR8_TIMER_CSR3_EXPIRE \ + FIELD32(19, \ + 0x00080000) /* TIMECSR3 timer expired (802.1H quiet period). */ + +/* + * CSR9: Maximum frame length register. + */ +#define CSR9_MAX_FRAME_UNIT \ + FIELD32(7, \ + 0x00000f80) /* maximum frame length in 128b unit, default: 12. */ + +/* + * SECCSR0: WEP control register. + */ +#define SECCSR0_KICK_DECRYPT \ + FIELD32(0, 0x00000001) /* Kick decryption engine, self-clear. */ +#define SECCSR0_ONE_SHOT \ + FIELD32(1, 0x00000002) /* 0: ring mode, 1: One shot only mode. */ +#define SECCSR0_DESC_ADDRESS \ + FIELD32(2, 0xfffffffc) /* Descriptor physical address of frame. */ + +/* + * CSR11: Back-off control register. + */ +#define CSR11_CWMIN \ + FIELD32(0, 0x0000000f) /* CWmin. Default cwmin is 31 (2^5 - 1). */ +#define CSR11_CWMAX \ + FIELD32(4, 0x000000f0) /* CWmax. Default cwmax is 1023 (2^10 - 1). */ +#define CSR11_SLOT_TIME \ + FIELD32(8, 0x00001f00) /* slot time, default is 20us for 802.11b */ +#define CSR11_CW_SELECT \ + FIELD32(13, \ + 0x00002000) /* CWmin/CWmax selection, 1: Register, 0: TXD. */ +#define CSR11_LONG_RETRY FIELD32(16, 0x00ff0000) /* long retry count. */ +#define CSR11_SHORT_RETRY FIELD32(24, 0xff000000) /* short retry count. */ + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + */ +#define CSR12_BEACON_INTERVAL \ + FIELD32(0, 0x0000ffff) /* beacon interval, default is 100 TU. */ +#define CSR12_CFPMAX_DURATION \ + FIELD32(16, 0xffff0000) /* cfp maximum duration, default is 100 TU. */ + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + */ +#define CSR13_ATIMW_DURATION FIELD32(0, 0x0000ffff) /* atim window duration. */ +#define CSR13_CFP_PERIOD \ + FIELD32(16, 0x00ff0000) /* cfp period, default is 0 TU. */ + +/* + * CSR14: Synchronization control register. + */ +#define CSR14_TSF_COUNT FIELD32(0, 0x00000001) /* enable tsf auto counting. */ +#define CSR14_TSF_SYNC \ + FIELD32(1, \ + 0x00000006) /* tsf sync, 0: disable, 1: infra, 2: ad-hoc mode. */ +#define CSR14_TBCN FIELD32(3, 0x00000008) /* enable tbcn with reload value. */ +#define CSR14_TCFP \ + FIELD32(4, 0x00000010) /* enable tcfp & cfp / cp switching. */ +#define CSR14_TATIMW \ + FIELD32(5, 0x00000020) /* enable tatimw & atim window switching. */ +#define CSR14_BEACON_GEN FIELD32(6, 0x00000040) /* enable beacon generator. */ +#define CSR14_CFP_COUNT_PRELOAD \ + FIELD32(8, 0x0000ff00) /* cfp count preload value. */ +#define CSR14_TBCM_PRELOAD \ + FIELD32(16, 0xffff0000) /* tbcn preload value in units of 64us. */ + +/* + * CSR15: Synchronization status register. + */ +#define CSR15_CFP \ + FIELD32(0, 0x00000001) /* ASIC is in contention-free period. */ +#define CSR15_ATIMW FIELD32(1, 0x00000002) /* ASIC is in ATIM window. */ +#define CSR15_BEACON_SENT FIELD32(2, 0x00000004) /* Beacon is send. */ + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16_LOW_TSFTIMER FIELD32(0, 0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17_HIGH_TSFTIMER FIELD32(0, 0xffffffff) + +/* + * CSR18: IFS timer register 0. + */ +#define CSR18_SIFS FIELD32(0, 0x000001ff) /* sifs, default is 10 us. */ +#define CSR18_PIFS FIELD32(16, 0x01f00000) /* pifs, default is 30 us. */ + +/* + * CSR19: IFS timer register 1. + */ +#define CSR19_DIFS FIELD32(0, 0x0000ffff) /* difs, default is 50 us. */ +#define CSR19_EIFS FIELD32(16, 0xffff0000) /* eifs, default is 364 us. */ + +/* + * CSR20: Wakeup timer register. + */ +#define CSR20_DELAY_AFTER_TBCN \ + FIELD32(0, \ + 0x0000ffff) /* delay after tbcn expired in units of 1/16 TU. */ +#define CSR20_TBCN_BEFORE_WAKEUP \ + FIELD32(16, 0x00ff0000) /* number of beacon before wakeup. */ +#define CSR20_AUTOWAKE \ + FIELD32(24, 0x01000000) /* enable auto wakeup / sleep mechanism. */ + +/* + * CSR21: EEPROM control register. + */ +#define CSR21_RELOAD \ + FIELD32(0, 0x00000001) /* Write 1 to reload eeprom content. */ +#define CSR21_EEPROM_DATA_CLOCK FIELD32(1, 0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(2, 0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(3, 0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(4, 0x00000010) +#define CSR21_TYPE_93C46 FIELD32(5, 0x00000020) /* 1: 93c46, 0:93c66. */ + +/* + * CSR22: CFP control register. + */ +#define CSR22_CFP_DURATION_REMAIN \ + FIELD32(0, 0x0000ffff) /* cfp duration remain, in units of TU. */ +#define CSR22_RELOAD_CFP_DURATION \ + FIELD32(16, 0x00010000) /* Write 1 to reload cfp duration remain. */ + +/* + * TX / RX Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + */ +#define TXCSR0_KICK_TX FIELD32(0, 0x00000001) /* kick tx ring. */ +#define TXCSR0_KICK_ATIM FIELD32(1, 0x00000002) /* kick atim ring. */ +#define TXCSR0_KICK_PRIO FIELD32(2, 0x00000004) /* kick priority ring. */ +#define TXCSR0_ABORT \ + FIELD32(3, 0x00000008) /* abort all transmit related ring operation. */ + +/* + * TXCSR1: TX Configuration Register. + */ +#define TXCSR1_ACK_TIMEOUT \ + FIELD32(0, \ + 0x000001ff) /* ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. */ +#define TXCSR1_ACK_CONSUME_TIME \ + FIELD32(9, \ + 0x0003fe00) /* ack consume time, default = sifs + acktime @ 1mbps. */ +#define TXCSR1_TSF_OFFSET FIELD32(18, 0x00fc0000) /* insert tsf offset. */ +#define TXCSR1_AUTORESPONDER \ + FIELD32(24, \ + 0x01000000) /* enable auto responder which include ack & cts. */ + +/* + * TXCSR2: Tx descriptor configuration register. + */ +#define TXCSR2_TXD_SIZE \ + FIELD32(0, 0x000000ff) /* tx descriptor size, default is 48. */ +#define TXCSR2_NUM_TXD FIELD32(8, 0x0000ff00) /* number of txd in ring. */ +#define TXCSR2_NUM_ATIM FIELD32(16, 0x00ff0000) /* number of atim in ring. */ +#define TXCSR2_NUM_PRIO \ + FIELD32(24, 0xff000000) /* number of priority in ring. */ + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3_TX_RING_REGISTER FIELD32(0, 0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0, 0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0, 0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6_BEACON_REGISTER FIELD32(0, 0xffffffff) + +/* + * TXCSR7: Auto responder control register. + */ +#define TXCSR7_AR_POWERMANAGEMENT \ + FIELD32(0, 0x00000001) /* auto responder power management bit. */ + +/* + * TXCSR8: CCK Tx BBP register. + */ +#define TXCSR8_CCK_SIGNAL \ + FIELD32(0, 0x000000ff) /* BBP rate field address for CCK. */ +#define TXCSR8_CCK_SERVICE \ + FIELD32(8, 0x0000ff00) /* BBP service field address for CCK. */ +#define TXCSR8_CCK_LENGTH_LOW \ + FIELD32(16, 0x00ff0000) /* BBP length low byte address for CCK. */ +#define TXCSR8_CCK_LENGTH_HIGH \ + FIELD32(24, 0xff000000) /* BBP length high byte address for CCK. */ + +/* + * TXCSR9: OFDM TX BBP registers + */ +#define TXCSR9_OFDM_RATE \ + FIELD32(0, 0x000000ff) /* BBP rate field address for OFDM. */ +#define TXCSR9_OFDM_SERVICE \ + FIELD32(8, 0x0000ff00) /* BBP service field address for OFDM. */ +#define TXCSR9_OFDM_LENGTH_LOW \ + FIELD32(16, 0x00ff0000) /* BBP length low byte address for OFDM. */ +#define TXCSR9_OFDM_LENGTH_HIGH \ + FIELD32(24, 0xff000000) /* BBP length high byte address for OFDM. */ + +/* + * RXCSR0: RX Control Register. + */ +#define RXCSR0_DISABLE_RX FIELD32(0, 0x00000001) /* disable rx engine. */ +#define RXCSR0_DROP_CRC FIELD32(1, 0x00000002) /* drop crc error. */ +#define RXCSR0_DROP_PHYSICAL FIELD32(2, 0x00000004) /* drop physical error. */ +#define RXCSR0_DROP_CONTROL FIELD32(3, 0x00000008) /* drop control frame. */ +#define RXCSR0_DROP_NOT_TO_ME \ + FIELD32(4, 0x00000010) /* drop not to me unicast frame. */ +#define RXCSR0_DROP_TODS \ + FIELD32(5, 0x00000020) /* drop frame tods bit is true. */ +#define RXCSR0_DROP_VERSION_ERROR \ + FIELD32(6, 0x00000040) /* drop version error frame. */ +#define RXCSR0_PASS_CRC \ + FIELD32(7, 0x00000080) /* pass all packets with crc attached. */ +#define RXCSR0_PASS_PLCP \ + FIELD32(8, \ + 0x00000100) /* Pass all packets with 4 bytes PLCP attached. */ +#define RXCSR0_DROP_MCAST FIELD32(9, 0x00000200) /* Drop multicast frames. */ +#define RXCSR0_DROP_BCAST FIELD32(10, 0x00000400) /* Drop broadcast frames. */ +#define RXCSR0_ENABLE_QOS \ + FIELD32(11, 0x00000800) /* Accept QOS data frame and parse QOS field. */ + +/* + * RXCSR1: RX descriptor configuration register. + */ +#define RXCSR1_RXD_SIZE \ + FIELD32(0, 0x000000ff) /* rx descriptor size, default is 32b. */ +#define RXCSR1_NUM_RXD FIELD32(8, 0x0000ff00) /* number of rxd in ring. */ + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2_RX_RING_REGISTER FIELD32(0, 0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + */ +#define RXCSR3_BBP_ID0 FIELD32(0, 0x0000007f) /* bbp register 0 id. */ +#define RXCSR3_BBP_ID0_VALID \ + FIELD32(7, 0x00000080) /* bbp register 0 id is valid or not. */ +#define RXCSR3_BBP_ID1 FIELD32(8, 0x00007f00) /* bbp register 1 id. */ +#define RXCSR3_BBP_ID1_VALID \ + FIELD32(15, 0x00008000) /* bbp register 1 id is valid or not. */ +#define RXCSR3_BBP_ID2 FIELD32(16, 0x007f0000) /* bbp register 2 id. */ +#define RXCSR3_BBP_ID2_VALID \ + FIELD32(23, 0x00800000) /* bbp register 2 id is valid or not. */ +#define RXCSR3_BBP_ID3 FIELD32(24, 0x7f000000) /* bbp register 3 id. */ +#define RXCSR3_BBP_ID3_VALID \ + FIELD32(31, 0x80000000) /* bbp register 3 id is valid or not. */ + +/* + * ARCSR1: Auto Responder PLCP config register 1. + */ +#define ARCSR1_AR_BBP_DATA2 \ + FIELD32(0, 0x000000ff) /* Auto responder BBP register 2 data. */ +#define ARCSR1_AR_BBP_ID2 \ + FIELD32(8, 0x0000ff00) /* Auto responder BBP register 2 Id. */ +#define ARCSR1_AR_BBP_DATA3 \ + FIELD32(16, 0x00ff0000) /* Auto responder BBP register 3 data. */ +#define ARCSR1_AR_BBP_ID3 \ + FIELD32(24, 0xff000000) /* Auto responder BBP register 3 Id. */ + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PCISR: PCI control register. + */ +#define PCICSR_BIG_ENDIAN \ + FIELD32(0, 0x00000001) /* 1: big endian, 0: little endian. */ +#define PCICSR_RX_TRESHOLD \ + FIELD32(1, 0x00000006) /* rx threshold in dw to start pci access */ +/* 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. */ +#define PCICSR_TX_TRESHOLD \ + FIELD32(3, 0x00000018) /* tx threshold in dw to start pci access */ +/* 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. */ +#define PCICSR_BURST_LENTH FIELD32(5, 0x00000060) /* pci burst length */ +/* 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. */ +#define PCICSR_ENABLE_CLK FIELD32(7, 0x00000080) /* enable clk_run, */ +/* pci clock can't going down to non-operational. */ +#define PCICSR_READ_MULTIPLE \ + FIELD32(8, 0x00000100) /* Enable memory read multiple. */ +#define PCICSR_WRITE_INVALID \ + FIELD32(9, 0x00000200) /* Enable memory write & invalid. */ + +/* + * PWRCSR1: Manual power control / status register. + * state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + */ +#define PWRCSR1_SET_STATE \ + FIELD32(0, \ + 0x00000001) /* set state. Write 1 to trigger, self cleared. */ +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(1, 0x00000006) /* BBP desired state. */ +#define PWRCSR1_RF_DESIRE_STATE FIELD32(3, 0x00000018) /* RF desired state. */ +#define PWRCSR1_BBP_CURR_STATE FIELD32(5, 0x00000060) /* BBP current state. */ +#define PWRCSR1_RF_CURR_STATE FIELD32(7, 0x00000180) /* RF current state. */ +#define PWRCSR1_PUT_TO_SLEEP \ + FIELD32(9, \ + 0x00000200) /* put to sleep. Write 1 to trigger, self cleared. */ + +/* + * TIMECSR: Timer control register. + */ +#define TIMECSR_US_COUNT \ + FIELD32(0, 0x000000ff) /* 1 us timer count in units of clock cycles. */ +#define TIMECSR_US_64_COUNT \ + FIELD32(8, 0x0000ff00) /* 64 us timer count in units of 1 us timer. */ +#define TIMECSR_BEACON_EXPECT \ + FIELD32(16, 0x00070000) /* Beacon expect window. */ + +/* + * MACCSR1: MAC configuration register 1. + */ +#define MACCSR1_KICK_RX \ + FIELD32(0, 0x00000001) /* kick one-shot rx in one-shot rx mode. */ +#define MACCSR1_ONESHOT_RXMODE \ + FIELD32(1, 0x00000002) /* enable one-shot rx mode for debugging. */ +#define MACCSR1_BBPRX_RESET_MODE \ + FIELD32(2, 0x00000004) /* ralink bbp rx reset mode. */ +#define MACCSR1_AUTO_TXBBP \ + FIELD32(3, 0x00000008) /* auto tx logic access bbp control register. */ +#define MACCSR1_AUTO_RXBBP \ + FIELD32(4, 0x00000010) /* auto rx logic access bbp control register. */ +#define MACCSR1_LOOPBACK FIELD32(5, 0x00000060) /* loopback mode. */ +/* 0: normal, 1: internal, 2: external, 3:rsvd. */ +#define MACCSR1_INTERSIL_IF \ + FIELD32(7, 0x00000080) /* intersil if calibration pin. */ + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + */ +#define RALINKCSR_AR_BBP_DATA0 \ + FIELD32(0, 0x000000ff) /* auto reset bbp register 0 data. */ +#define RALINKCSR_AR_BBP_ID0 \ + FIELD32(8, 0x00007f00) /* auto reset bbp register 0 id. */ +#define RALINKCSR_AR_BBP_VALID0 \ + FIELD32(15, 0x00008000) /* auto reset bbp register 0 valid. */ +#define RALINKCSR_AR_BBP_DATA1 \ + FIELD32(16, 0x00ff0000) /* auto reset bbp register 1 data. */ +#define RALINKCSR_AR_BBP_ID1 \ + FIELD32(24, 0x7f000000) /* auto reset bbp register 1 id. */ +#define RALINKCSR_AR_BBP_VALID1 \ + FIELD32(31, 0x80000000) /* auto reset bbp register 1 valid. */ + +/* + * BCNCSR: Beacon interval control register. + */ +#define BCNCSR_CHANGE \ + FIELD32(0, 0x00000001) /* write one to change beacon interval. */ +#define BCNCSR_DELTATIME FIELD32(1, 0x0000001e) /* the delta time value. */ +#define BCNCSR_NUM_BEACON \ + FIELD32(5, 0x00001fe0) /* number of beacon according to mode. */ +#define BCNCSR_MODE FIELD32(13, 0x00006000) /* please refer to asic specs. */ +#define BCNCSR_PLUS \ + FIELD32(15, 0x00008000) /* plus or minus delta time value. */ + +/* + * BBPCSR: BBP serial control register. + */ +#define BBPCSR_VALUE \ + FIELD32(0, 0x000000ff) /* register value to program into bbp. */ +#define BBPCSR_REGNUM FIELD32(8, 0x00007f00) /* selected bbp register. */ +#define BBPCSR_BUSY \ + FIELD32(15, 0x00008000) /* 1: asic is busy execute bbp programming. */ +#define BBPCSR_WRITE_CONTROL \ + FIELD32(16, 0x00010000) /* 1: write bbp, 0: read bbp. */ + +/* + * RFCSR: RF serial control register. + */ +#define RFCSR_VALUE \ + FIELD32(0, 0x00ffffff) /* register value + id to program into rf/if. */ +#define RFCSR_NUMBER_OF_BITS \ + FIELD32(24, \ + 0x1f000000) /* number of bits used in value (i:20, rfmd:22). */ +#define RFCSR_IF_SELECT \ + FIELD32(29, 0x20000000) /* chip to program: 0: rf, 1: if. */ +#define RFCSR_PLL_LD FIELD32(30, 0x40000000) /* rf pll_ld status. */ +#define RFCSR_BUSY \ + FIELD32(31, 0x80000000) /* 1: asic is busy execute rf programming. */ + +/* + * LEDCSR: LED control register. + */ +#define LEDCSR_ON_PERIOD FIELD32(0, 0x000000ff) /* on period, default 70ms. */ +#define LEDCSR_OFF_PERIOD FIELD32(8, 0x0000ff00) /* off period, default 30ms. */ +#define LEDCSR_LINK FIELD32(16, 0x00010000) /* 0: linkoff, 1: linkup. */ +#define LEDCSR_ACTIVITY FIELD32(17, 0x00020000) /* 0: idle, 1: active. */ +#define LEDCSR_LINK_POLARITY \ + FIELD32(18, 0x00040000) /* 0: active low, 1: active high. */ +#define LEDCSR_ACTIVITY_POLARITY \ + FIELD32(19, 0x00080000) /* 0: active low, 1: active high. */ +#define LEDCSR_LED_DEFAULT \ + FIELD32(20, 0x00100000) /* LED state for "enable" 0: ON, 1: OFF. */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR_BIT0 FIELD32(0, 0x00000001) +#define GPIOCSR_BIT1 FIELD32(1, 0x00000002) +#define GPIOCSR_BIT2 FIELD32(2, 0x00000004) +#define GPIOCSR_BIT3 FIELD32(3, 0x00000008) +#define GPIOCSR_BIT4 FIELD32(4, 0x00000010) +#define GPIOCSR_BIT5 FIELD32(5, 0x00000020) +#define GPIOCSR_BIT6 FIELD32(6, 0x00000040) +#define GPIOCSR_BIT7 FIELD32(7, 0x00000080) +#define GPIOCSR_DIR0 FIELD32(8, 0x00000100) +#define GPIOCSR_DIR1 FIELD32(9, 0x00000200) +#define GPIOCSR_DIR2 FIELD32(10, 0x00000400) +#define GPIOCSR_DIR3 FIELD32(11, 0x00000800) +#define GPIOCSR_DIR4 FIELD32(12, 0x00001000) +#define GPIOCSR_DIR5 FIELD32(13, 0x00002000) +#define GPIOCSR_DIR6 FIELD32(14, 0x00004000) +#define GPIOCSR_DIR7 FIELD32(15, 0x00008000) + +/* + * BCNCSR1: Tx BEACON offset time control register. + */ +#define BCNCSR1_PRELOAD \ + FIELD32(0, 0x0000ffff) /* beacon timer offset in units of usec. */ +#define BCNCSR1_BEACON_CWMIN FIELD32(16, 0x000f0000) /* 2^CwMin. */ + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + */ +#define MACCSR2_DELAY \ + FIELD32(0, \ + 0x000000ff) /* RX_PE low width, in units of pci clock cycle. */ + +/* + * SECCSR1_RT2509: WEP control register + */ +#define SECCSR1_KICK_ENCRYPT \ + FIELD32(0, 0x00000001) /* Kick encryption engine, self-clear. */ +#define SECCSR1_ONE_SHOT \ + FIELD32(1, 0x00000002) /* 0: ring mode, 1: One shot only mode. */ +#define SECCSR1_DESC_ADDRESS \ + FIELD32(2, 0xfffffffc) /* Descriptor physical address of frame. */ + +/* + * RF registers + */ +#define RF1_TUNER FIELD32(17, 0x00020000) +#define RF3_TUNER FIELD32(8, 0x00000100) +#define RF3_TXPOWER FIELD32(9, 0x00003e00) + +/* + * EEPROM content format. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * EEPROM operation defines. + */ +#define EEPROM_WIDTH_93c46 6 +#define EEPROM_WIDTH_93c66 8 +#define EEPROM_WRITE_OPCODE 0x05 +#define EEPROM_READ_OPCODE 0x06 + +/* + * EEPROM antenna. + */ +#define EEPROM_ANTENNA_NUM FIELD16(0, 0x0003) /* Number of antenna's. */ +#define EEPROM_ANTENNA_TX_DEFAULT \ + FIELD16(2, 0x000c) /* Default antenna 0: diversity, 1: A, 2: B. */ +#define EEPROM_ANTENNA_RX_DEFAULT \ + FIELD16(4, 0x0030) /* Default antenna 0: diversity, 1: A, 2: B. */ +#define EEPROM_ANTENNA_LED_MODE \ + FIELD16(6, 0x01c0) /* 0: default, 1: TX/RX activity, */ +/* 2: Single LED (ignore link), 3: reserved. */ +#define EEPROM_ANTENNA_DYN_TXAGC \ + FIELD16(9, 0x0200) /* Dynamic TX AGC control. */ +#define EEPROM_ANTENNA_HARDWARE_RADIO \ + FIELD16(10, 0x0400) /* 1: Hardware controlled radio. Read GPIO0. */ +#define EEPROM_ANTENNA_RF_TYPE \ + FIELD16(11, 0xf800) /* rf_type of this adapter. */ + +/* + * EEPROM geography. + */ +#define EEPROM_GEOGRAPHY_GEO \ + FIELD16(8, 0x0f00) /* Default geography setting for device. */ + +/* + * EEPROM NIC config. + */ +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0, 0x0001) /* 0: enable, 1: disable. */ +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(1, 0x0002) /* 0: enable, 1: disable. */ +#define EEPROM_NIC_CCK_TX_POWER \ + FIELD16(2, 0x000c) /* CCK TX power compensation. */ + +/* + * EEPROM TX power. + */ +#define EEPROM_TX_POWER1 FIELD16(0, 0x00ff) +#define EEPROM_TX_POWER2 FIELD16(8, 0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_VALUE FIELD16(0, 0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(8, 0xff00) + +/* + * EEPROM VERSION. + */ +#define EEPROM_VERSION_FAE FIELD16(0, 0x00ff) /* FAE release number. */ +#define EEPROM_VERSION FIELD16(8, 0xff00) + +/* + * DMA ring defines and data structures. + */ + +/* + * Size of a single descriptor. + */ +#define SIZE_DESCRIPTOR 48 + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ +struct _txd { + u32 word0; +#define TXD_W0_OWNER_NIC FIELD32(0, 0x00000001) +#define TXD_W0_VALID FIELD32(1, 0x00000002) +#define TXD_W0_RESULT FIELD32(2, 0x0000001c) /* Set by device. */ +#define TXD_W0_RETRY_COUNT FIELD32(5, 0x000000e0) /* Set by device. */ +#define TXD_W0_MORE_FRAG FIELD32(8, 0x00000100) /* Set by device. */ +#define TXD_W0_ACK FIELD32(9, 0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(10, 0x00000400) +#define TXD_W0_OFDM FIELD32(11, 0x00000800) +#define TXD_W0_CIPHER_OWNER FIELD32(12, 0x00001000) +#define TXD_W0_IFS FIELD32(13, 0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(15, 0x00008000) +#define TXD_W0_DATABYTE_COUNT FIELD32(16, 0x0fff0000) +#define TXD_W0_CIPHER_ALG FIELD32(29, 0xe0000000) + + u32 word1; +#define TXD_W1_BUFFER_ADDRESS FIELD32(0, 0xffffffff) + + u32 word2; +#define TXD_W2_IV_OFFSET FIELD32(0, 0x0000003f) +#define TXD_W2_AIFS FIELD32(6, 0x000000c0) +#define TXD_W2_CWMIN FIELD32(8, 0x00000f00) +#define TXD_W2_CWMAX FIELD32(12, 0x0000f000) + + u32 word3; +#define TXD_W3_PLCP_SIGNAL FIELD32(0, 0x000000ff) +#define TXD_W3_PLCP_SERVICE FIELD32(8, 0x0000ff00) +#define TXD_W3_PLCP_LENGTH_LOW FIELD32(16, 0x00ff0000) +#define TXD_W3_PLCP_LENGTH_HIGH FIELD32(24, 0xff000000) + + u32 word4; +#define TXD_W4_IV FIELD32(0, 0xffffffff) + + u32 word5; +#define TXD_W5_EIV FIELD32(0, 0xffffffff) + + u32 word6; +#define TXD_W6_KEY FIELD32(0, 0xffffffff) + + u32 word7; +#define TXD_W7_KEY FIELD32(0, 0xffffffff) + + u32 word8; +#define TXD_W8_KEY FIELD32(0, 0xffffffff) + + u32 word9; +#define TXD_W9_KEY FIELD32(0, 0xffffffff) + + u32 word10; +#define TXD_W10_RTS FIELD32(0, 0x00000001) +#define TXD_W10_TX_RATE FIELD32(0, 0x000000fe) /* For module only. */ +} __attribute__((packed)); + +/* + * RX descriptor format for RX Ring. + */ +struct _rxd { + u32 word0; +#define RXD_W0_OWNER_NIC FIELD32(0, 0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(1, 0x00000002) +#define RXD_W0_MULTICAST FIELD32(2, 0x00000004) +#define RXD_W0_BROADCAST FIELD32(3, 0x00000008) +#define RXD_W0_MY_BSS FIELD32(4, 0x00000010) +#define RXD_W0_CRC FIELD32(5, 0x00000020) +#define RXD_W0_OFDM FIELD32(6, 0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(7, 0x00000080) +#define RXD_W0_CIPHER_OWNER FIELD32(8, 0x00000100) +#define RXD_W0_ICV_ERROR FIELD32(9, 0x00000200) +#define RXD_W0_IV_OFFSET FIELD32(10, 0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(16, 0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(29, 0xe0000000) + + u32 word1; +#define RXD_W1_BUFFER_ADDRESS FIELD32(0, 0xffffffff) + + u32 word2; +#define RXD_W2_BBR0 FIELD32(0, 0x000000ff) +#define RXD_W2_RSSI FIELD32(8, 0x0000ff00) +#define RXD_W2_TA FIELD32(16, 0xffff0000) + + u32 word3; +#define RXD_W3_TA FIELD32(0, 0xffffffff) + + u32 word4; +#define RXD_W4_IV FIELD32(0, 0xffffffff) + + u32 word5; +#define RXD_W5_EIV FIELD32(0, 0xffffffff) + + u32 word6; +#define RXD_W6_KEY FIELD32(0, 0xffffffff) + + u32 word7; +#define RXD_W7_KEY FIELD32(0, 0xffffffff) + + u32 word8; +#define RXD_W8_KEY FIELD32(0, 0xffffffff) + + u32 word9; +#define RXD_W9_KEY FIELD32(0, 0xffffffff) + + u32 word10; +#define RXD_W10_DROP FIELD32(0, 0x00000001) +} __attribute__((packed)); + +/* + * _rt2x00_pci + * This is the main structure which contains all variables required to communicate with the PCI device. + */ +struct _rt2x00_pci { + /* + * PCI device structure. + */ + struct pci_dev *pci_dev; + + /* + * Chipset identification. + */ + struct _rt2x00_chip chip; + + /* + * csr_addr + * Base address of device registers, all exact register addresses are calculated from this address. + */ + void __iomem *csr_addr; + + /* + * RF register values for current channel. + */ + struct _rf_channel channel; + + /* + * EEPROM bus width. + */ + u8 eeprom_width; + + u8 __pad; /* For alignment only. */ + + /* + * EEPROM BBP data. + */ + u16 eeprom[EEPROM_BBP_SIZE]; + + /* + * DMA packet ring. + */ + struct _data_ring rx; + struct _data_ring tx; + + rtdm_irq_t irq_handle; + rtdm_lock_t lock; + +} __attribute__((packed)); + +static int rt2x00_get_rf_value(const struct _rt2x00_chip *chip, + const u8 channel, struct _rf_channel *rf_reg) +{ + int index = 0x00; + + index = rt2x00_get_channel_index(channel); + if (index < 0) + return -EINVAL; + + memset(rf_reg, 0x00, sizeof(*rf_reg)); + + if (rt2x00_rf(chip, RF2522)) { + rf_reg->rf1 = 0x00002050; + rf_reg->rf3 = 0x00000101; + goto update_rf2_1; + } + if (rt2x00_rf(chip, RF2523)) { + rf_reg->rf1 = 0x00022010; + rf_reg->rf3 = 0x000e0111; + rf_reg->rf4 = 0x00000a1b; + goto update_rf2_2; + } + if (rt2x00_rf(chip, RF2524)) { + rf_reg->rf1 = 0x00032020; + rf_reg->rf3 = 0x00000101; + rf_reg->rf4 = 0x00000a1b; + goto update_rf2_2; + } + if (rt2x00_rf(chip, RF2525)) { + rf_reg->rf1 = 0x00022020; + rf_reg->rf2 = 0x00080000; + rf_reg->rf3 = 0x00060111; + rf_reg->rf4 = 0x00000a1b; + goto update_rf2_2; + } + if (rt2x00_rf(chip, RF2525E)) { + rf_reg->rf2 = 0x00080000; + rf_reg->rf3 = 0x00060111; + goto update_rf2_3; + } + if (rt2x00_rf(chip, RF5222)) { + rf_reg->rf3 = 0x00000101; + goto update_rf2_3; + } + + return -EINVAL; + +update_rf2_1: /* RF2522. */ + rf_reg->rf2 = 0x000c1fda + (index * 0x14); + if (channel == 14) + rf_reg->rf2 += 0x0000001c; + goto exit; + +update_rf2_2: /* RF2523, RF2524, RF2525. */ + rf_reg->rf2 |= 0x00000c9e + (index * 0x04); + if (rf_reg->rf2 & 0x00000040) + rf_reg->rf2 += 0x00000040; + if (channel == 14) { + rf_reg->rf2 += 0x08; + rf_reg->rf4 &= ~0x00000018; + } + goto exit; + +update_rf2_3: /* RF2525E, RF5222. */ + if (OFDM_CHANNEL(channel)) { + rf_reg->rf1 = 0x00022020; + rf_reg->rf2 |= 0x00001136 + (index * 0x04); + if (rf_reg->rf2 & 0x00000040) + rf_reg->rf2 += 0x00000040; + if (channel == 14) { + rf_reg->rf2 += 0x04; + rf_reg->rf4 = 0x00000a1b; + } else { + rf_reg->rf4 = 0x00000a0b; + } + } else if (UNII_LOW_CHANNEL(channel)) { + rf_reg->rf1 = 0x00022010; + rf_reg->rf2 = 0x00018896 + (index * 0x04); + rf_reg->rf4 = 0x00000a1f; + } else if (HIPERLAN2_CHANNEL(channel)) { + rf_reg->rf1 = 0x00022010; + rf_reg->rf2 = 0x00008802 + (index * 0x04); + rf_reg->rf4 = 0x00000a0f; + } else if (UNII_HIGH_CHANNEL(channel)) { + rf_reg->rf1 = 0x00022020; + rf_reg->rf2 = 0x000090a6 + (index * 0x08); + rf_reg->rf4 = 0x00000a07; + } + +exit: + rf_reg->rf1 = cpu_to_le32(rf_reg->rf1); + rf_reg->rf2 = cpu_to_le32(rf_reg->rf2); + rf_reg->rf3 = cpu_to_le32(rf_reg->rf3); + rf_reg->rf4 = cpu_to_le32(rf_reg->rf4); + + return 0; +} + +/* + * Get txpower value in dBm mathing the requested percentage. + */ +static inline u8 rt2x00_get_txpower(const struct _rt2x00_chip *chip, + const u8 tx_power) +{ + return tx_power / 100 * 31; + + /* + if(tx_power <= 3) + return 19; + else if(tx_power <= 12) + return 22; + else if(tx_power <= 25) + return 25; + else if(tx_power <= 50) + return 28; + else if(tx_power <= 75) + return 30; + else if(tx_power <= 100) + return 31; + + ERROR("Invalid tx_power.\n"); + return 31; + */ +} + +/* + * Ring handlers. + */ +static inline int +rt2x00_pci_alloc_ring(struct _rt2x00_core *core, struct _data_ring *ring, + const u8 ring_type, const u16 max_entries, + const u16 entry_size, const u16 desc_size) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + + rt2x00_init_ring(core, ring, ring_type, max_entries, entry_size, + desc_size); + + ring->data_addr = + dma_alloc_coherent(&rt2x00pci->pci_dev->dev, ring->mem_size, + &ring->data_dma, GFP_KERNEL); + if (!ring->data_addr) + return -ENOMEM; + + memset(ring->data_addr, 0x00, ring->mem_size); + + return 0; +} + +static int rt2x00_pci_alloc_rings(struct _rt2x00_core *core) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + + if (rt2x00_pci_alloc_ring(core, &rt2x00pci->rx, RING_RX, RX_ENTRIES, + DATA_FRAME_SIZE, SIZE_DESCRIPTOR) || + rt2x00_pci_alloc_ring(core, &rt2x00pci->tx, RING_TX, TX_ENTRIES, + DATA_FRAME_SIZE, SIZE_DESCRIPTOR)) { + ERROR("DMA allocation failed.\n"); + return -ENOMEM; + } + + return 0; +} + +static inline void rt2x00_pci_free_ring(struct _data_ring *ring) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(ring->core); + + if (ring->data_addr) + dma_free_coherent(&rt2x00pci->pci_dev->dev, ring->mem_size, + ring->data_addr, ring->data_dma); + ring->data_addr = NULL; + + rt2x00_deinit_ring(ring); +} + +static void rt2x00_pci_free_rings(struct _rt2x00_core *core) +{ + struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core); + + rt2x00_pci_free_ring(&rt2x00pci->rx); + rt2x00_pci_free_ring(&rt2x00pci->tx); +} + +/* + * Macro's for calculating exact position in data ring. + */ +#define DESC_BASE(__ring) ((void *)((__ring)->data_addr)) +#define DATA_BASE(__ring) \ + ((void *)(DESC_BASE(__ring) + \ + ((__ring)->max_entries * (__ring)->desc_size))) + +#define __DESC_ADDR(__ring, __index) \ + ((void *)(DESC_BASE(__ring) + ((__index) * (__ring)->desc_size))) +#define __DATA_ADDR(__ring, __index) \ + ((void *)(DATA_BASE(__ring) + ((__index) * (__ring)->entry_size))) + +#define DESC_ADDR(__ring) (__DESC_ADDR(__ring, (__ring)->index)) +#define DESC_ADDR_DONE(__ring) (__DESC_ADDR(__ring, (__ring)->index_done)) + +#define DATA_ADDR(__ring) (__DATA_ADDR(__ring, (__ring)->index)) +#define DATA_ADDR_DONE(__ring) (__DATA_ADDR(__ring, (__ring)->index_done)) + +/* + * Register access. + * All access to the registers will go through rt2x00_register_read and rt2x00_register_write. + * BBP and RF register require indirect register access through the register BBPCSR and RFCSR. + * The indirect register access work with busy bits, and a read or write function call can fail. + * Specific fields within a register can be accessed using the set and get field routines, + * these function will handle the requirement of little_endian and big_endian conversions. + */ +#define REGISTER_BUSY_COUNT \ + 10 /* Number of retries before failing access BBP & RF indirect register */ +#define REGISTER_BUSY_DELAY \ + 100 /* Delay between each register access retry. (us) */ + +static void rt2x00_register_read(const struct _rt2x00_pci *rt2x00pci, + const unsigned long offset, u32 *value) +{ + *value = readl((void *)(rt2x00pci->csr_addr + offset)); +} + +static void rt2x00_register_multiread(const struct _rt2x00_pci *rt2x00pci, + const unsigned long offset, u32 *value, + const u16 length) +{ + memcpy_fromio((void *)value, (void *)(rt2x00pci->csr_addr + offset), + length); +} + +static void rt2x00_register_write(const struct _rt2x00_pci *rt2x00pci, + const unsigned long offset, const u32 value) +{ + writel(value, (void *)(rt2x00pci->csr_addr + offset)); +} + +static void rt2x00_register_multiwrite(const struct _rt2x00_pci *rt2x00pci, + const unsigned long offset, u32 *value, + const u16 length) +{ + memcpy_toio((void *)(rt2x00pci->csr_addr + offset), (void *)value, + length); +} + +static void rt2x00_bbp_regwrite(const struct _rt2x00_pci *rt2x00pci, + const u8 reg_id, const u8 value) +{ + u32 reg = 0x00000000; + u8 counter = 0x00; + + for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) { + rt2x00_register_read(rt2x00pci, BBPCSR, ®); + if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) + goto bbp_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR("BBPCSR register busy. Write failed\n"); + return; + +bbp_write: + reg = 0x00000000; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, reg_id); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00_register_write(rt2x00pci, BBPCSR, reg); +} + +static void rt2x00_bbp_regread(const struct _rt2x00_pci *rt2x00pci, + const u8 reg_id, u8 *value) +{ + u32 reg = 0x00000000; + u8 counter = 0x00; + + /* + * We first have to acquire the requested BBP register, + * so we write the register id into the BBP register first. + */ + rt2x00_set_field32(®, BBPCSR_REGNUM, reg_id); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00_register_write(rt2x00pci, BBPCSR, reg); + + for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) { + rt2x00_register_read(rt2x00pci, BBPCSR, ®); + if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) { + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); + return; + } + udelay(REGISTER_BUSY_DELAY); + } + + ERROR("BBPCSR register busy. Read failed\n"); + *value = 0xff; +} + +static void rt2x00_rf_regwrite(const struct _rt2x00_pci *rt2x00pci, + const u32 value) +{ + u32 reg = 0x00000000; + u8 counter = 0x00; + + for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) { + rt2x00_register_read(rt2x00pci, RFCSR, ®); + if (!rt2x00_get_field32(reg, RFCSR_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR("RFCSR register busy. Write failed\n"); + return; + +rf_write: + reg = value; + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + // printk(KERN_INFO "DEBUG: %s:%d: reg=%x\n", __FILE__, __LINE__, reg); + + rt2x00_register_write(rt2x00pci, RFCSR, reg); +} + +/* + * EEPROM access. + * The EEPROM is being accessed by word index. + * rt2x00_eeprom_read_word is the main access function that can be called by + * the rest of the module. It will take the index number of the eeprom word + * and the bus width. + */ +static inline void rt2x00_eeprom_pulse_high(const struct _rt2x00_pci *rt2x00pci, + u32 *flags) +{ + rt2x00_set_field32(flags, CSR21_EEPROM_DATA_CLOCK, 1); + rt2x00_register_write(rt2x00pci, CSR21, *flags); + udelay(1); +} + +static inline void rt2x00_eeprom_pulse_low(const struct _rt2x00_pci *rt2x00pci, + u32 *flags) +{ + rt2x00_set_field32(flags, CSR21_EEPROM_DATA_CLOCK, 0); + rt2x00_register_write(rt2x00pci, CSR21, *flags); + udelay(1); +} + +static void rt2x00_eeprom_shift_out_bits(const struct _rt2x00_pci *rt2x00pci, + const u16 data, const u16 count) +{ + u32 flags = 0x00000000; + u32 mask = 0x0001 << (count - 1); + + rt2x00_register_read(rt2x00pci, CSR21, &flags); + + /* + * Clear data flags. + */ + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0); + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0); + + /* + * Start writing all bits. + */ + do { + /* + * Only set the data_in flag when we are at the correct bit. + */ + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, + (data & mask) ? 1 : 0); + + rt2x00_register_write(rt2x00pci, CSR21, flags); + + rt2x00_eeprom_pulse_high(rt2x00pci, &flags); + rt2x00_eeprom_pulse_low(rt2x00pci, &flags); + + /* + * Shift to next bit. + */ + mask >>= 1; + } while (mask); + + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0); + rt2x00_register_write(rt2x00pci, CSR21, flags); +} + +static void rt2x00_eeprom_shift_in_bits(const struct _rt2x00_pci *rt2x00pci, + u16 *data) +{ + u32 flags = 0x00000000; + u8 counter = 0x00; + + rt2x00_register_read(rt2x00pci, CSR21, &flags); + + /* + * Clear data flags. + */ + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0); + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0); + + /* + * Start reading all 16 bits. + */ + for (counter = 0; counter < 16; counter++) { + /* + * Shift to the next bit. + */ + *data <<= 1; + + rt2x00_eeprom_pulse_high(rt2x00pci, &flags); + + rt2x00_register_read(rt2x00pci, CSR21, &flags); + + /* + * Clear data_in flag and set the data bit to 1 when the data_out flag is set. + */ + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0); + if (rt2x00_get_field32(flags, CSR21_EEPROM_DATA_OUT)) + *data |= 1; + + rt2x00_eeprom_pulse_low(rt2x00pci, &flags); + } +} + +static u16 rt2x00_eeprom_read_word(const struct _rt2x00_pci *rt2x00pci, + const u8 word) +{ + u32 flags = 0x00000000; + u16 data = 0x0000; + + /* + * Clear all flags, and enable chip select. + */ + rt2x00_register_read(rt2x00pci, CSR21, &flags); + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0); + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0); + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_CLOCK, 0); + rt2x00_set_field32(&flags, CSR21_EEPROM_CHIP_SELECT, 1); + rt2x00_register_write(rt2x00pci, CSR21, flags); + + /* + * kick a pulse. + */ + rt2x00_eeprom_pulse_high(rt2x00pci, &flags); + rt2x00_eeprom_pulse_low(rt2x00pci, &flags); + + /* + * Select the read opcode and bus_width. + */ + rt2x00_eeprom_shift_out_bits(rt2x00pci, EEPROM_READ_OPCODE, 3); + rt2x00_eeprom_shift_out_bits(rt2x00pci, word, rt2x00pci->eeprom_width); + + rt2x00_eeprom_shift_in_bits(rt2x00pci, &data); + + /* + * Clear chip_select and data_in flags. + */ + rt2x00_register_read(rt2x00pci, CSR21, &flags); + rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0); + rt2x00_set_field32(&flags, CSR21_EEPROM_CHIP_SELECT, 0); + rt2x00_register_write(rt2x00pci, CSR21, flags); + + /* + * kick a pulse. + */ + rt2x00_eeprom_pulse_high(rt2x00pci, &flags); + rt2x00_eeprom_pulse_low(rt2x00pci, &flags); + + return data; +} + +#endif /* RT2500PCI_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h new file mode 100644 index 0000000..ec8ca90 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h @@ -0,0 +1,649 @@ +/* rt2x00.h + * + * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project + * <http://rt2x00.serialmonkey.com> + * 2006 rtnet adaption by Daniel Gregorek + * <dxg@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 global information. + Supported chipsets: RT2560 +*/ + +#ifndef RT2X00_H +#define RT2X00_H + +#include <linux/netdevice.h> +#include <linux/wireless.h> + +#include <rtnet_port.h> +#include <rtwlan.h> + +#define MAX_UNITS 2 + +/* + * Module information. + */ +#define DRV_NAME "rt2x00" +#define DRV_VERSION "0.1" +#define DRV_AUTHOR "Daniel Gregorek <dxg@gmx.de>" +//#define CONFIG_RT2X00_DEBUG + +/* + * Debug defines. + * The debug variable will be exported by the device specific module. + * For this reason this variable must be set to extern to make it accessible + * to the core module as well. + */ +#ifdef CONFIG_RT2X00_DEBUG +extern int rt2x00_debug_level; +#define DEBUG_PRINTK(__message...) \ + do { \ + rtdm_printk(__message); \ + } while (0) +#else /* CONFIG_RT2X00_DEBUG */ +#define DEBUG_PRINTK(__message...) \ + do { \ + } while (0) +#endif /* CONFIG_RT2X00_DEBUG */ + +/* + * Various debug levels. + * PANIC and ERROR indicates serious problems within the module, + * these should never be ignored and thus we will always print the message. + */ +#define PANIC(__message, __args...) \ + rtdm_printk(KERN_PANIC DRV_NAME "->%s: Panic - " __message, \ + __FUNCTION__, ##__args); +#define ERROR(__message, __args...) \ + rtdm_printk(KERN_ERR DRV_NAME "->%s: Error - " __message, \ + __FUNCTION__, ##__args); +#define WARNING(__message, __args...) \ + rtdm_printk(KERN_WARNING DRV_NAME "->%s: Warning - " __message, \ + __FUNCTION__, ##__args); +#define NOTICE(__message, __args...) \ + rtdm_printk(KERN_NOTICE DRV_NAME "->%s: Notice - " __message, \ + __FUNCTION__, ##__args); +#define INFO(__message, __args...) \ + rtdm_printk(KERN_INFO DRV_NAME "->%s: Info - " __message, \ + __FUNCTION__, ##__args); +#define DEBUG(__message, __args...) \ + DEBUG_PRINTK(KERN_DEBUG DRV_NAME "->%s: Debug - " __message, \ + __FUNCTION__, ##__args); + +/* + * RT2x00 ring types. + */ + +/* + * Ring names. + */ +#define RING_RX 0x01 /* Ring used for receiving packets. */ +#define RING_TX 0x02 /* Ring used for transmitting normal packets. */ + +/* + * Ring sizes. + */ +#define DATA_FRAME_SIZE 2432 +#define MGMT_FRAME_SIZE 256 + +/* + * RT2x00 xmit flags. + */ +#define XMIT_IFS_SIFS 0x0001 +#define XMIT_IFS_BACKOFF 0x0002 +#define XMIT_IFS_NEW_BACKOFF 0x0004 +#define XMIT_IFS_NONE 0x0008 +#define XMIT_NEW_SEQUENCE 0x0010 +#define XMIT_ACK 0x0020 +#define XMIT_TIMESTAMP 0x0040 +#define XMIT_RTS 0x0080 +#define XMIT_OFDM 0x0100 +#define XMIT_LONG_RETRY 0x0200 +#define XMIT_MORE_FRAGS 0x0400 +#define XMIT_SHORT_PREAMBLE 0x0800 +#define XMIT_START 0x1000 + +/* + * RT2x00 Statistics flags. + */ +#define STATS_TX_RESULT 0x01 +#define STATS_TX_RETRY_COUNT 0x02 +#define STATS_RX_CRC 0x10 +#define STATS_RX_PHYSICAL 0x20 +#define STATS_RX_QUALITY 0x40 +#define STATS_RX_DROP 0x80 + +/* + * TX result flags. + */ +#define TX_SUCCESS 0 +#define TX_SUCCESS_RETRY 1 +#define TX_FAIL_RETRY 2 +#define TX_FAIL_INVALID 3 +#define TX_FAIL_OTHER 4 + +/* + * Channel type defines. + */ +#define CHANNEL_OFDM 0x01 +#define CHANNEL_UNII_LOW 0x02 +#define CHANNEL_HIPERLAN2 0x04 +#define CHANNEL_UNII_HIGH 0x08 + +#define CHANNEL_OFDM_MIN 1 +#define CHANNEL_OFDM_MAX 14 +#define CHANNEL_UNII_LOW_MIN 36 +#define CHANNEL_UNII_LOW_MAX 64 +#define CHANNEL_HIPERLAN2_MIN 100 +#define CHANNEL_HIPERLAN2_MAX 140 +#define CHANNEL_UNII_HIGH_MIN 149 +#define CHANNEL_UNII_HIGH_MAX 161 + +/* + * Device 802.11abg capabilities. + */ +static struct _rt2x00_capabilities { + u8 txpower[6]; + u8 bitrate[12]; +} __attribute__ ((packed)) capabilities = { + /* + * tx-power. + */ + .txpower = { + 3, 12, 25, 50, 75, 100, + }, + + /* + * Bitrates + */ + .bitrate = { + 2, 4, 11, 22, /* CCK. */ + 12, 18, 24, 36, 48, 72, 96, 108, /* OFDM. */ + }, +}; + +struct _rt2x00_config { + u8 config_flags; +#define CONFIG_DROP_BCAST 0x0001 +#define CONFIG_DROP_MCAST 0x0002 +#define CONFIG_AUTORESP 0x0004 + + u8 antenna_tx; + u8 antenna_rx; + + u8 bssid[ETH_ALEN]; + u8 short_retry; + u8 long_retry; + + u8 channel; + u8 bitrate; /* 0.5Mbit/sec */ + u8 txpower; /* % */ + + u8 bbpsens; + + /* + * LED status + */ + u8 led_status; + + u16 __pad2; /* For alignment only. */ + + /* + * Duration values in us. + */ + u8 plcp; + u8 sifs; + u8 slot_time; + + /* + * Configuration values that have to be updated to device. + */ + u16 update_flags; +#define UPDATE_ALL_CONFIG 0xffff +#define UPDATE_BSSID 0x0001 +#define UPDATE_PACKET_FILTER 0x0002 +#define UPDATE_CHANNEL 0x0004 +#define UPDATE_BITRATE 0x0008 +#define UPDATE_RETRY 0x0010 +#define UPDATE_TXPOWER 0x0020 +#define UPDATE_ANTENNA 0x0040 +#define UPDATE_DURATION 0x0080 +#define UPDATE_PREAMBLE 0x0100 +#define UPDATE_AUTORESP 0x0200 +#define UPDATE_LED_STATUS 0x0400 +#define UPDATE_BBPSENS 0x0800 + +} __attribute__((packed)); + +struct _rt2x00_core { + /* + * RT2x00 device status flags (atomic read/write access). + */ + unsigned long flags; + +#define DEVICE_ENABLED 0 /* Device has been opened. */ +#define DEVICE_AWAKE 1 /* Device is not suspended. */ +#define DEVICE_RADIO_ON 2 /* Device antenna is enabled. */ +#define DEVICE_CONFIG_UPDATE 3 /* Device is updating configuration. */ + + /* + * Device handler. + */ + struct _rt2x00_dev_handler *handler; + + /* + * RTnet device we belong to. + */ + struct rtnet_device *rtnet_dev; + + /* + * RTwlan stack structure. + */ + struct rtwlan_device *rtwlan_dev; + + /* + * Device configuration. + */ + struct _rt2x00_config config; + + void *priv; + +} __attribute__((packed)); + +/* + * Device specific handlers. + */ +struct _rt2x00_dev_handler { + /* + * Device specific module. + */ + struct module *dev_module; + + /* + * Initialization handlers. + */ + int (*dev_probe)(struct _rt2x00_core *core, void *priv); + int (*dev_remove)(struct _rt2x00_core *core); + + /* + * Radio control. + */ + int (*dev_radio_on)(struct _rt2x00_core *core); + int (*dev_radio_off)(struct _rt2x00_core *core); + + /* + * Configuration handlers. + */ + int (*dev_update_config)(struct _rt2x00_core *core, u16 update_flags); + + /* + * xmit handler. + */ + int (*dev_xmit_packet)(struct _rt2x00_core *core, struct rtskb *rtskb, + u16 rate, u16 xmit_flags); + + /* + * Handler for direct access to register from core. + */ + int (*dev_register_access)(struct _rt2x00_core *core, int request, + u32 address, u32 *value); + +} __attribute__((packed)); + +static inline void *rt2x00_priv(const struct _rt2x00_core *core) +{ + return core->priv; +} + +/* + * Duration calculations + * The rate variable passed is: 2 * real_rate (in Mb/s). + * Therefore length has to be multiplied with 8 to convert bytes to bits and mulltiply the length + * with 2 to compensate for the difference between real_rate and the rate variable. + */ +#define ACK_SIZE 14 +#define IEEE80211_HEADER 24 + +static inline u16 get_duration(const unsigned int size, const u8 rate) +{ + return ((size * 8 * 2) / rate); +} + +static inline u16 get_duration_res(const unsigned int size, const u8 rate) +{ + return ((size * 8 * 2) % rate); +} + +static inline u16 get_preamble(const struct _rt2x00_config *config) +{ + return 144; +} + +/* + * Register handlers. + * We store the position of a register field inside a field structure, + * This will simplify the process of setting and reading a certain field + * inside the register. + */ +struct _rt2x00_field16 { + u16 bit_offset; + u16 bit_mask; +} __attribute__((packed)); + +struct _rt2x00_field32 { + u32 bit_offset; + u32 bit_mask; +} __attribute__((packed)); + +#define FIELD16(__offset, __mask) \ + ((struct _rt2x00_field16){ (__offset), (__mask) }) +#define FIELD32(__offset, __mask) \ + ((struct _rt2x00_field32){ (__offset), (__mask) }) + +static inline void rt2x00_set_field32(u32 *reg, + const struct _rt2x00_field32 field, + const u32 value) +{ + *reg &= cpu_to_le32(~(field.bit_mask)); + *reg |= cpu_to_le32((value << field.bit_offset) & field.bit_mask); +} + +static inline void rt2x00_set_field32_nb(u32 *reg, + const struct _rt2x00_field32 field, + const u32 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u32 rt2x00_get_field32(const u32 reg, + const struct _rt2x00_field32 field) +{ + return (le32_to_cpu(reg) & field.bit_mask) >> field.bit_offset; +} + +static inline u32 rt2x00_get_field32_nb(const u32 reg, + const struct _rt2x00_field32 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +static inline void rt2x00_set_field16(u16 *reg, + const struct _rt2x00_field16 field, + const u16 value) +{ + *reg &= cpu_to_le16(~(field.bit_mask)); + *reg |= cpu_to_le16((value << field.bit_offset) & field.bit_mask); +} + +static inline void rt2x00_set_field16_nb(u16 *reg, + const struct _rt2x00_field16 field, + const u16 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u16 rt2x00_get_field16(const u16 reg, + const struct _rt2x00_field16 field) +{ + return (le16_to_cpu(reg) & field.bit_mask) >> field.bit_offset; +} + +static inline u16 rt2x00_get_field16_nb(const u16 reg, + const struct _rt2x00_field16 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +/* + * rf register sructure for channel selection. + */ +struct _rf_channel { + u32 rf1; + u32 rf2; + u32 rf3; + u32 rf4; +} __attribute__((packed)); + +/* + * Chipset identification + * The chipset on the device is composed of a RT and RF chip. + * The chipset combination is important for determining device capabilities. + */ +struct _rt2x00_chip { + u16 rt; + u16 rf; +} __attribute__((packed)); + +/* + * Set chipset data. + * Some rf values for RT2400 devices are equal to rf values for RT2500 devices. + * To prevent problems, all rf values will be masked to clearly seperate each chipset. + */ +static inline void set_chip(struct _rt2x00_chip *chipset, const u16 rt, + const u16 rf) +{ + INFO("Chipset detected - rt: %04x, rf: %04x.\n", rt, rf); + + chipset->rt = rt; + chipset->rf = rf | (chipset->rt & 0xff00); +} + +static inline char rt2x00_rt(const struct _rt2x00_chip *chipset, const u16 chip) +{ + return (chipset->rt == chip); +} + +static inline char rt2x00_rf(const struct _rt2x00_chip *chipset, const u16 chip) +{ + return (chipset->rf == chip); +} + +static inline u16 rt2x00_get_rf(const struct _rt2x00_chip *chipset) +{ + return chipset->rf; +} + +/* + * _data_ring + * Data rings are used by the device to send and receive packets. + * The data_addr is the base address of the data memory. + * Device specifice information is pointed to by the priv pointer. + * The index values may only be changed with the functions ring_index_inc() + * and ring_index_done_inc(). + */ +struct _data_ring { + /* + * Base address of packet ring. + */ + dma_addr_t data_dma; + void *data_addr; + + /* + * Private device specific data. + */ + void *priv; + struct _rt2x00_core *core; + + /* + * Current index values. + */ + u8 index; + u8 index_done; + + /* + * Ring type set with RING_* define. + */ + u8 ring_type; + + /* + * Number of entries in this ring. + */ + u8 max_entries; + + /* + * Size of packet and descriptor in bytes. + */ + u16 entry_size; + u16 desc_size; + + /* + * Total allocated memory size. + */ + u32 mem_size; +} __attribute__((packed)); + +/* + * Number of entries in a packet ring. + */ +#define RX_ENTRIES 8 +#define TX_ENTRIES 8 +#define ATIM_ENTRIES 1 +#define PRIO_ENTRIES 2 +#define BEACON_ENTRIES 1 + +/* + * Initialization and cleanup routines. + */ +static inline void rt2x00_init_ring(struct _rt2x00_core *core, + struct _data_ring *ring, const u8 ring_type, + const u16 max_entries, const u16 entry_size, + const u16 desc_size) +{ + ring->core = core; + ring->index = 0; + ring->index_done = 0; + ring->ring_type = ring_type; + ring->max_entries = max_entries; + ring->entry_size = entry_size; + ring->desc_size = desc_size; + ring->mem_size = + ring->max_entries * (ring->desc_size + ring->entry_size); +} + +static inline void rt2x00_deinit_ring(struct _data_ring *ring) +{ + ring->core = NULL; + ring->index = 0; + ring->index_done = 0; + ring->ring_type = 0; + ring->max_entries = 0; + ring->entry_size = 0; + ring->desc_size = 0; + ring->mem_size = 0; +} + +/* + * Ring index manipulation functions. + */ +static inline void rt2x00_ring_index_inc(struct _data_ring *ring) +{ + ring->index = (++ring->index < ring->max_entries) ? ring->index : 0; +} + +static inline void rt2x00_ring_index_done_inc(struct _data_ring *ring) +{ + ring->index_done = + (++ring->index_done < ring->max_entries) ? ring->index_done : 0; +} + +static inline void rt2x00_ring_clear_index(struct _data_ring *ring) +{ + ring->index = 0; + ring->index_done = 0; +} + +static inline u8 rt2x00_ring_empty(struct _data_ring *ring) +{ + return ring->index_done == ring->index; +} + +static inline u8 rt2x00_ring_free_entries(struct _data_ring *ring) +{ + if (ring->index >= ring->index_done) + return ring->max_entries - (ring->index - ring->index_done); + else + return ring->index_done - ring->index; +} + +/* + * Return PLCP value matching the rate. + * PLCP values according to ieee802.11a-1999 p.14. + */ +static inline u8 rt2x00_get_plcp(const u8 rate) +{ + u8 counter = 0x00; + u8 plcp[12] = { + 0x00, 0x01, 0x02, 0x03, /* CCK. */ + 0x0b, 0x0f, 0x0a, 0x0e, 0x09, 0x0d, 0x08, 0x0c, /* OFDM. */ + }; + + for (; counter < 12; counter++) { + if (capabilities.bitrate[counter] == rate) + return plcp[counter]; + } + + return 0xff; +} + +#define OFDM_CHANNEL(__channel) \ + ((__channel) >= CHANNEL_OFDM_MIN && (__channel) <= CHANNEL_OFDM_MAX) +#define UNII_LOW_CHANNEL(__channel) \ + ((__channel) >= CHANNEL_UNII_LOW_MIN && \ + (__channel) <= CHANNEL_UNII_LOW_MAX) +#define HIPERLAN2_CHANNEL(__channel) \ + ((__channel) >= CHANNEL_HIPERLAN2_MIN && \ + (__channel) <= CHANNEL_HIPERLAN2_MAX) +#define UNII_HIGH_CHANNEL(__channel) \ + ((__channel) >= CHANNEL_UNII_HIGH_MIN && \ + (__channel) <= CHANNEL_UNII_HIGH_MAX) + +/* + * Return the index value of the channel starting from the first channel of the range. + * Where range can be OFDM, UNII (low), HiperLAN2 or UNII (high). + */ +static inline int rt2x00_get_channel_index(const u8 channel) +{ + if (OFDM_CHANNEL(channel)) + return (channel - 1); + + if (channel % 4) + return -EINVAL; + + if (UNII_LOW_CHANNEL(channel)) + return ((channel - CHANNEL_UNII_LOW_MIN) / 4); + else if (HIPERLAN2_CHANNEL(channel)) + return ((channel - CHANNEL_HIPERLAN2_MIN) / 4); + else if (UNII_HIGH_CHANNEL(channel)) + return ((channel - CHANNEL_UNII_HIGH_MIN) / 4); + return -EINVAL; +} + +/* + * RT2x00 core module functions that can be used in the device specific modules. + */ +extern struct rtnet_device * +rt2x00_core_probe(struct _rt2x00_dev_handler *handler, void *priv, + u32 sizeof_dev); +extern void rt2x00_core_remove(struct rtnet_device *rtnet_dev); + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c new file mode 100644 index 0000000..fac5c3e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c @@ -0,0 +1,444 @@ +/* rt2x00core.c + * + * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project + * <http://rt2x00.serialmonkey.com> + * 2006 rtnet adaption by Daniel Gregorek + * <dxg@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + * Module: rt2x00core + * Abstract: rt2x00 core routines. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/version.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <asm/io.h> + +#include <rtnet_port.h> + +#include "rt2x00.h" + +#ifdef DRV_NAME +#undef DRV_NAME +#define DRV_NAME "rt_rt2x00core" +#endif /* DRV_NAME */ + +static int rt2x00_radio_on(struct _rt2x00_core *core); +static int rt2x00_radio_off(struct _rt2x00_core *core); + +static int cards[MAX_UNITS] = { [0 ...(MAX_UNITS - 1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); + +/* + * Writes the pending configuration to the device + */ +static void rt2x00_update_config(struct _rt2x00_core *core) +{ + u16 update_flags = 0x0000; + + if (!test_bit(DEVICE_ENABLED, &core->flags) && + !test_bit(DEVICE_RADIO_ON, &core->flags)) + return; + + if (test_and_set_bit(DEVICE_CONFIG_UPDATE, &core->flags)) + return; + + update_flags = core->config.update_flags; + core->config.update_flags = 0; + + if (likely(update_flags)) + core->handler->dev_update_config(core, update_flags); + + clear_bit(DEVICE_CONFIG_UPDATE, &core->flags); +} + +/* + * Radio control. + */ +static int rt2x00_radio_on(struct _rt2x00_core *core) +{ + int status = 0x00000000; + + if (test_bit(DEVICE_RADIO_ON, &core->flags)) { + WARNING("Radio already on.\n"); + return -ENOTCONN; + } + + status = core->handler->dev_radio_on(core); + if (status) + return status; + + set_bit(DEVICE_RADIO_ON, &core->flags); + + return 0; +} + +static int rt2x00_radio_off(struct _rt2x00_core *core) +{ + if (!test_and_clear_bit(DEVICE_RADIO_ON, &core->flags)) { + WARNING("Radio already off.\n"); + return -ENOTCONN; + } + + core->handler->dev_radio_off(core); + + return 0; +} + +/* + * user space io handler + */ +static int rt2x00_ioctl(struct rtnet_device *rtnet_dev, struct ifreq *ifr, + int request) +{ + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev); + struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev); + struct rtwlan_cmd *cmd; + u8 rate, dsss_rate, ofdm_rate; + u32 address, value; + + cmd = (struct rtwlan_cmd *)ifr->ifr_data; + + switch (request) { + case IOC_RTWLAN_IFINFO: + cmd->args.info.bitrate = core->config.bitrate; + cmd->args.info.channel = core->config.channel; + cmd->args.info.retry = core->config.short_retry; + cmd->args.info.txpower = core->config.txpower; + cmd->args.info.bbpsens = core->config.bbpsens; + cmd->args.info.mode = core->rtwlan_dev->mode; + cmd->args.info.rx_packets = core->rtwlan_dev->stats.rx_packets; + cmd->args.info.tx_packets = core->rtwlan_dev->stats.tx_packets; + cmd->args.info.tx_retry = core->rtwlan_dev->stats.tx_retry; + cmd->args.info.autoresponder = + core->config.config_flags & CONFIG_AUTORESP ? 1 : 0; + cmd->args.info.dropbcast = + core->config.config_flags & CONFIG_DROP_BCAST ? 1 : 0; + cmd->args.info.dropmcast = + core->config.config_flags & CONFIG_DROP_MCAST ? 1 : 0; + DEBUG("rtwlan_dev->mode=%d\n", rtwlan_dev->mode); + break; + case IOC_RTWLAN_BITRATE: + rate = cmd->args.set.bitrate; + ofdm_rate = ieee80211_is_ofdm_rate(rate); + dsss_rate = ieee80211_is_dsss_rate(rate); + DEBUG("bitrate=%d\n", rate); + if (!(dsss_rate ^ ofdm_rate)) + NOTICE("Rate %d is not DSSS and not OFDM.\n", rate); + core->config.bitrate = rate; + core->config.update_flags |= UPDATE_BITRATE; + break; + case IOC_RTWLAN_CHANNEL: + DEBUG("channel=%d\n", cmd->args.set.channel); + core->config.channel = cmd->args.set.channel; + core->config.update_flags |= UPDATE_CHANNEL; + break; + case IOC_RTWLAN_RETRY: + core->config.short_retry = cmd->args.set.retry; + core->config.update_flags |= UPDATE_RETRY; + break; + case IOC_RTWLAN_TXPOWER: + core->config.txpower = cmd->args.set.txpower; + core->config.update_flags |= UPDATE_TXPOWER; + break; + case IOC_RTWLAN_AUTORESP: + if (cmd->args.set.autoresponder) + core->config.config_flags |= CONFIG_AUTORESP; + else + core->config.config_flags &= ~CONFIG_AUTORESP; + core->config.update_flags |= UPDATE_AUTORESP; + break; + case IOC_RTWLAN_DROPBCAST: + if (cmd->args.set.dropbcast) + core->config.config_flags |= CONFIG_DROP_BCAST; + else + core->config.config_flags &= ~CONFIG_DROP_BCAST; + core->config.update_flags |= UPDATE_PACKET_FILTER; + break; + case IOC_RTWLAN_DROPMCAST: + if (cmd->args.set.dropmcast) + core->config.config_flags |= CONFIG_DROP_MCAST; + else + core->config.config_flags &= ~CONFIG_DROP_MCAST; + core->config.update_flags |= UPDATE_PACKET_FILTER; + break; + case IOC_RTWLAN_TXMODE: + core->rtwlan_dev->mode = cmd->args.set.mode; + break; + case IOC_RTWLAN_BBPSENS: + value = cmd->args.set.bbpsens; + if (value < 0) + value = 0; + if (value > 127) + value = 127; + core->config.bbpsens = value; + core->config.update_flags |= UPDATE_BBPSENS; + break; + case IOC_RTWLAN_REGREAD: + case IOC_RTWLAN_BBPREAD: + address = cmd->args.reg.address; + core->handler->dev_register_access(core, request, address, + &value); + cmd->args.reg.value = value; + break; + case IOC_RTWLAN_REGWRITE: + case IOC_RTWLAN_BBPWRITE: + address = cmd->args.reg.address; + value = cmd->args.reg.value; + core->handler->dev_register_access(core, request, address, + &value); + break; + default: + ERROR("Unknown request!\n"); + return -1; + } + + if (request != IOC_RTWLAN_IFINFO) + rt2x00_update_config(core); + + return 0; +} + +/* + * TX/RX related routines. + */ +static int rt2x00_start_xmit(struct rtskb *rtskb, + struct rtnet_device *rtnet_dev) +{ + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev); + struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev); + u16 xmit_flags = 0x0000; + u8 rate = 0x00; + + if (unlikely(rtskb)) { + rate = core->config.bitrate; + if (ieee80211_is_ofdm_rate(rate)) + xmit_flags |= XMIT_OFDM; + + /* Check if the packet should be acknowledged */ + if (core->rtwlan_dev->mode == RTWLAN_TXMODE_ACK) + xmit_flags |= XMIT_ACK; + + if (core->handler->dev_xmit_packet(core, rtskb, rate, + xmit_flags)) + ERROR("Packet dropped !"); + + dev_kfree_rtskb(rtskb); + } + + return 0; +} + +/*** + * rt2x00_open + * @rtdev + */ +static int rt2x00_open(struct rtnet_device *rtnet_dev) +{ + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev); + struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev); + int status = 0x00000000; + + DEBUG("Start.\n"); + + if (test_and_set_bit(DEVICE_ENABLED, &core->flags)) { + ERROR("device already enabled.\n"); + return -EBUSY; + } + + /* + * Start rtnet interface. + */ + rt_stack_connect(rtnet_dev, &STACK_manager); + + status = rt2x00_radio_on(core); + if (status) { + clear_bit(DEVICE_ENABLED, &core->flags); + ERROR("Couldn't activate radio.\n"); + return status; + } + + core->config.led_status = 1; + core->config.update_flags |= UPDATE_LED_STATUS; + rt2x00_update_config(core); + + rtnetif_start_queue(rtnet_dev); + + DEBUG("Exit success.\n"); + + return 0; +} + +/*** + * rt2x00_close + * @rtdev + */ +static int rt2x00_close(struct rtnet_device *rtnet_dev) +{ + struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev); + struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev); + + DEBUG("Start.\n"); + + if (!test_and_clear_bit(DEVICE_ENABLED, &core->flags)) { + ERROR("device already disabled.\n"); + return -EBUSY; + } + + rt2x00_radio_off(core); + + rtnetif_stop_queue(rtnet_dev); + rt_stack_disconnect(rtnet_dev); + + return 0; +} + +/* + * Initialization handlers. + */ +static void rt2x00_init_config(struct _rt2x00_core *core) +{ + DEBUG("Start.\n"); + + memset(&core->config.bssid, '\0', sizeof(core->config.bssid)); + + core->config.channel = 1; + core->config.bitrate = capabilities.bitrate[0]; + core->config.bbpsens = 50; + core->config.config_flags = 0; + core->config.config_flags |= + CONFIG_DROP_BCAST | CONFIG_DROP_MCAST | CONFIG_AUTORESP; + core->config.short_retry = 4; + core->config.long_retry = 7; + core->config.txpower = 100; + core->config.plcp = 48; + core->config.sifs = 10; + core->config.slot_time = 20; + core->rtwlan_dev->mode = RTWLAN_TXMODE_RAW; + core->config.update_flags = UPDATE_ALL_CONFIG; +} + +struct rtnet_device *rt2x00_core_probe(struct _rt2x00_dev_handler *handler, + void *priv, u32 sizeof_dev) +{ + struct rtnet_device *rtnet_dev = NULL; + struct _rt2x00_core *core = NULL; + struct rtwlan_device *rtwlan_dev = NULL; + static int cards_found = -1; + int err; + + DEBUG("Start.\n"); + + cards_found++; + if (cards[cards_found] == 0) + goto exit; + + rtnet_dev = + rtwlan_alloc_dev(sizeof_dev + sizeof(*core), RX_ENTRIES * 2); + if (!rtnet_dev) + goto exit; + + rt_rtdev_connect(rtnet_dev, &RTDEV_manager); + rtnet_dev->vers = RTDEV_VERS_2_0; + + rtwlan_dev = rtnetdev_priv(rtnet_dev); + memset(rtwlan_dev, 0x00, sizeof(*rtwlan_dev)); + + core = rtwlan_priv(rtwlan_dev); + memset(core, 0x00, sizeof(*core)); + + core->rtwlan_dev = rtwlan_dev; + core->handler = handler; + core->priv = (void *)core + sizeof(*core); + core->rtnet_dev = rtnet_dev; + + /* Set configuration default values. */ + rt2x00_init_config(core); + + if (core->handler->dev_probe && core->handler->dev_probe(core, priv)) { + ERROR("device probe failed.\n"); + goto exit; + } + INFO("Device " MAC_FMT " detected.\n", MAC_ARG(rtnet_dev->dev_addr)); + + rtwlan_dev->hard_start_xmit = rt2x00_start_xmit; + + rtnet_dev->open = &rt2x00_open; + rtnet_dev->stop = &rt2x00_close; + rtnet_dev->do_ioctl = &rt2x00_ioctl; + rtnet_dev->hard_header = &rt_eth_header; + + if ((err = rt_register_rtnetdev(rtnet_dev)) != 0) { + rtdev_free(rtnet_dev); + ERROR("rtnet_device registration failed.\n"); + printk("err=%d\n", err); + goto exit_dev_remove; + } + + set_bit(DEVICE_AWAKE, &core->flags); + + return rtnet_dev; + +exit_dev_remove: + if (core->handler->dev_remove) + core->handler->dev_remove(core); + +exit: + return NULL; +} +EXPORT_SYMBOL_GPL(rt2x00_core_probe); + +void rt2x00_core_remove(struct rtnet_device *rtnet_dev) +{ + rt_unregister_rtnetdev(rtnet_dev); + rt_rtdev_disconnect(rtnet_dev); + + rtdev_free(rtnet_dev); +} +EXPORT_SYMBOL_GPL(rt2x00_core_remove); + +/* + * RT2x00 core module information. + */ +static char version[] = DRV_NAME " - " DRV_VERSION; + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("RTnet rt2500 PCI WLAN driver (Core Module)"); +MODULE_LICENSE("GPL"); + +static int __init rt2x00_core_init(void) +{ + printk(KERN_INFO "Loading module: %s\n", version); + return 0; +} + +static void __exit rt2x00_core_exit(void) +{ + printk(KERN_INFO "Unloading module: %s\n", version); +} + +module_init(rt2x00_core_init); +module_exit(rt2x00_core_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile new file mode 100644 index 0000000..316d8c3 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile @@ -0,0 +1,5 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_FEC) += rtnet_fec.o + +rtnet_fec-y := fec_main.o fec_ptp.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h new file mode 100644 index 0000000..0e25662 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/****************************************************************************/ + +/* + * fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC + * processors. + * + * (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com) + * (C) Copyright 2000-2001, Lineo (www.lineo.com) + */ + +/****************************************************************************/ +#ifndef FEC_H +#define FEC_H +/****************************************************************************/ + +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> +#include <rtnet_port.h> + +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +/* + * Just figures, Motorola would have to change the offsets for + * registers in the same peripheral device on different models + * of the ColdFire! + */ +#define FEC_IEVENT 0x004 /* Interrupt event reg */ +#define FEC_IMASK 0x008 /* Interrupt mask reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ +#define FEC_ECNTRL 0x024 /* Ethernet control reg */ +#define FEC_MII_DATA 0x040 /* MII manage frame reg */ +#define FEC_MII_SPEED 0x044 /* MII speed control reg */ +#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */ +#define FEC_R_CNTRL 0x084 /* Receive control reg */ +#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */ +#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ +#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ +#define FEC_OPD 0x0ec /* Opcode + Pause duration */ +#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ +#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ +#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ +#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ +#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ +#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ +#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */ +#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ +#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ +#define FEC_R_FSTART 0x150 /* FIFO receive start reg */ +#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ +#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */ +#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ +#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */ +#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ +#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ +#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */ +#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ +#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ +#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ +#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ +#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/ +#define FEC_RACC 0x1c4 /* Receive Accelerator function */ +#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ +#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ +#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ +#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */ +#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */ +#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ +#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ +#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ +#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ +#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ +#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ + +#define BM_MIIGSK_CFGR_MII 0x00 +#define BM_MIIGSK_CFGR_RMII 0x01 +#define BM_MIIGSK_CFGR_FRCONT_10M 0x40 + +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */ +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ +#define RMON_T_COL 0x224 /* RMON TX collision count */ +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ +#define RMON_T_OCTETS 0x244 /* RMON TX octets */ +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */ +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */ +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2a4 /* Reserved */ +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */ + +#else + +#define FEC_ECNTRL 0x000 /* Ethernet control reg */ +#define FEC_IEVENT 0x004 /* Interrupt even reg */ +#define FEC_IMASK 0x008 /* Interrupt mask reg */ +#define FEC_IVEC 0x00c /* Interrupt vec status reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0 +#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ +#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0 +#define FEC_MII_DATA 0x040 /* MII manage frame reg */ +#define FEC_MII_SPEED 0x044 /* MII speed control reg */ +#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ +#define FEC_R_FSTART 0x090 /* FIFO receive start reg */ +#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */ +#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */ +#define FEC_R_CNTRL 0x104 /* Receive control reg */ +#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */ +#define FEC_X_CNTRL 0x144 /* Transmit Control reg */ +#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */ +#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */ +#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */ +#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */ +#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */ +#define FEC_R_DES_START_1 FEC_R_DES_START_0 +#define FEC_R_DES_START_2 FEC_R_DES_START_0 +#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */ +#define FEC_X_DES_START_1 FEC_X_DES_START_0 +#define FEC_X_DES_START_2 FEC_X_DES_START_0 +#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0 +#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0 +#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ +/* Not existed in real chip + * Just for pass build. + */ +#define FEC_RCMR_1 0xfff +#define FEC_RCMR_2 0xfff +#define FEC_DMA_CFG_1 0xfff +#define FEC_DMA_CFG_2 0xfff +#define FEC_TXIC0 0xfff +#define FEC_TXIC1 0xfff +#define FEC_TXIC2 0xfff +#define FEC_RXIC0 0xfff +#define FEC_RXIC1 0xfff +#define FEC_RXIC2 0xfff +#endif /* CONFIG_M5272 */ + + +/* + * Define the buffer descriptor structure. + * + * Evidently, ARM SoCs have the FEC block generated in a + * little endian mode so adjust endianness accordingly. + */ +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) +#define fec32_to_cpu le32_to_cpu +#define fec16_to_cpu le16_to_cpu +#define cpu_to_fec32 cpu_to_le32 +#define cpu_to_fec16 cpu_to_le16 +#define __fec32 __le32 +#define __fec16 __le16 + +struct bufdesc { + __fec16 cbd_datlen; /* Data length */ + __fec16 cbd_sc; /* Control and status info */ + __fec32 cbd_bufaddr; /* Buffer address */ +}; +#else +#define fec32_to_cpu be32_to_cpu +#define fec16_to_cpu be16_to_cpu +#define cpu_to_fec32 cpu_to_be32 +#define cpu_to_fec16 cpu_to_be16 +#define __fec32 __be32 +#define __fec16 __be16 + +struct bufdesc { + __fec16 cbd_sc; /* Control and status info */ + __fec16 cbd_datlen; /* Data length */ + __fec32 cbd_bufaddr; /* Buffer address */ +}; +#endif + +struct bufdesc_ex { + struct bufdesc desc; + __fec32 cbd_esc; + __fec32 cbd_prot; + __fec32 cbd_bdu; + __fec32 ts; + __fec16 res0[4]; +}; + +/* + * The following definitions courtesy of commproc.h, which where + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). + */ +#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ +#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ +#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ +#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ +#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ +#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ +#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ +#define BD_SC_BR ((ushort)0x0020) /* Break received */ +#define BD_SC_FR ((ushort)0x0010) /* Framing error */ +#define BD_SC_PR ((ushort)0x0008) /* Parity error */ +#define BD_SC_OV ((ushort)0x0002) /* Overrun */ +#define BD_SC_CD ((ushort)0x0001) /* ?? */ + +/* Buffer descriptor control/status used by Ethernet receive. + */ +#define BD_ENET_RX_EMPTY ((ushort)0x8000) +#define BD_ENET_RX_WRAP ((ushort)0x2000) +#define BD_ENET_RX_INTR ((ushort)0x1000) +#define BD_ENET_RX_LAST ((ushort)0x0800) +#define BD_ENET_RX_FIRST ((ushort)0x0400) +#define BD_ENET_RX_MISS ((ushort)0x0100) +#define BD_ENET_RX_LG ((ushort)0x0020) +#define BD_ENET_RX_NO ((ushort)0x0010) +#define BD_ENET_RX_SH ((ushort)0x0008) +#define BD_ENET_RX_CR ((ushort)0x0004) +#define BD_ENET_RX_OV ((ushort)0x0002) +#define BD_ENET_RX_CL ((ushort)0x0001) +#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ + +/* Enhanced buffer descriptor control/status used by Ethernet receive */ +#define BD_ENET_RX_VLAN 0x00000004 + +/* Buffer descriptor control/status used by Ethernet transmit. + */ +#define BD_ENET_TX_READY ((ushort)0x8000) +#define BD_ENET_TX_PAD ((ushort)0x4000) +#define BD_ENET_TX_WRAP ((ushort)0x2000) +#define BD_ENET_TX_INTR ((ushort)0x1000) +#define BD_ENET_TX_LAST ((ushort)0x0800) +#define BD_ENET_TX_TC ((ushort)0x0400) +#define BD_ENET_TX_DEF ((ushort)0x0200) +#define BD_ENET_TX_HB ((ushort)0x0100) +#define BD_ENET_TX_LC ((ushort)0x0080) +#define BD_ENET_TX_RL ((ushort)0x0040) +#define BD_ENET_TX_RCMASK ((ushort)0x003c) +#define BD_ENET_TX_UN ((ushort)0x0002) +#define BD_ENET_TX_CSL ((ushort)0x0001) +#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ + +/* enhanced buffer descriptor control/status used by Ethernet transmit */ +#define BD_ENET_TX_INT 0x40000000 +#define BD_ENET_TX_TS 0x20000000 +#define BD_ENET_TX_PINS 0x10000000 +#define BD_ENET_TX_IINS 0x08000000 + + +/* This device has up to three irqs on some platforms */ +#define FEC_IRQ_NUM 3 + +/* Maximum number of queues supported + * ENET with AVB IP can support up to 3 independent tx queues and rx queues. + * User can point the queue number that is less than or equal to 3. + */ +#define FEC_ENET_MAX_TX_QS 3 +#define FEC_ENET_MAX_RX_QS 3 + +#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \ + (((X) == 2) ? \ + FEC_R_DES_START_2 : FEC_R_DES_START_0)) +#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \ + (((X) == 2) ? \ + FEC_X_DES_START_2 : FEC_X_DES_START_0)) +#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ + (((X) == 2) ? \ + FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) + +#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) + +#define DMA_CLASS_EN (1 << 16) +#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xffff +#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE(X) (((X) == 1) ? \ + (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ + (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2)) +#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ + RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) +#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ + RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) +#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2) +#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20) + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it it best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ + +#define FEC_ENET_RX_PAGES 256 +#define FEC_ENET_RX_FRSIZE 2048 +#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) +#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) +#define FEC_ENET_TX_FRSIZE 2048 +#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) +#define TX_RING_SIZE 512 /* Must be power of two */ +#define TX_RING_MOD_MASK 511 /* for this to work */ + +#define BD_ENET_RX_INT 0x00800000 +#define BD_ENET_RX_PTP ((ushort)0x0400) +#define BD_ENET_RX_ICE 0x00000020 +#define BD_ENET_RX_PCR 0x00000010 +#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) + +/* Interrupt events/masks. */ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */ +#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */ +#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ +#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) +#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_TS_AVAIL ((uint)0x00010000) +#define FEC_ENET_TS_TIMER ((uint)0x00008000) + +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) + +/* ENET interrupt coalescing macro define */ +#define FEC_ITR_CLK_SEL (0x1 << 30) +#define FEC_ITR_EN (0x1 << 31) +#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20) +#define FEC_ITR_ICTT(X) ((X) & 0xffff) +#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ +#define FEC_ITR_ICTT_DEFAULT 10 /* Set 10 us timer threshold */ + +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 + +/* Controller is ENET-MAC */ +#define FEC_QUIRK_ENET_MAC (1 << 0) +/* Controller needs driver to swap frame */ +#define FEC_QUIRK_SWAP_FRAME (1 << 1) +/* Controller uses gasket */ +#define FEC_QUIRK_USE_GASKET (1 << 2) +/* Controller has GBIT support */ +#define FEC_QUIRK_HAS_GBIT (1 << 3) +/* Controller has extend desc buffer */ +#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) +/* Controller has hardware vlan support */ +#define FEC_QUIRK_HAS_VLAN (1 << 6) +/* ENET IP errata ERR006358 + * + * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously + * detected as not set during a prior frame transmission, then the + * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs + * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in + * frames not being transmitted until there is a 0-to-1 transition on + * ENET_TDAR[TDAR]. + */ +#define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) +/* There is a TDAR race condition for mutliQ when the software sets TDAR + * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). + * This will cause the udma_tx and udma_tx_arbiter state machines to hang. + * The issue exist at i.MX6SX enet IP. + */ +#define FEC_QUIRK_ERR007885 (1 << 9) +/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue: + * After set ENET_ATCR[Capture], there need some time cycles before the counter + * value is capture in the register clock domain. + * The wait-time-cycles is at least 6 clock cycles of the slower clock between + * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz, + * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns + * (40ns * 6). + */ +#define FEC_QUIRK_BUG_CAPTURE (1 << 10) +/* Controller has only one MDIO bus */ +#define FEC_QUIRK_SINGLE_MDIO (1 << 11) +/* Controller supports RACC register */ +#define FEC_QUIRK_HAS_RACC (1 << 12) +/* Controller supports interrupt coalesc */ +#define FEC_QUIRK_HAS_COALESCE (1 << 13) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 14) +/* The MIB counters should be cleared and enabled during + * initialisation. + */ +#define FEC_QUIRK_MIB_CLEAR (1 << 15) +/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers, + * those FIFO receive registers are resolved in other platforms. + */ +#define FEC_QUIRK_HAS_FRREG (1 << 16) + +/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid + * the generation of an MII event. This must be avoided in the older + * FEC blocks where it will stop MII events being generated. + */ +#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17) +/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC + * as an alternative option to make sure it works well with various PHYs. + * For the implementation of delayed clock, ENET takes synchronized 250MHz + * clocks to generate 2ns delay. + */ +#define FEC_QUIRK_DELAYED_CLKS_SUPPORT (1 << 18) + +struct bufdesc_prop { + int qid; + /* Address of Rx and Tx buffers */ + struct bufdesc *base; + struct bufdesc *last; + struct bufdesc *cur; + void __iomem *reg_desc_active; + dma_addr_t dma; + unsigned short ring_size; + unsigned char dsize; + unsigned char dsize_log2; +}; + +struct fec_enet_priv_tx_q { + struct bufdesc_prop bd; + unsigned char *tx_bounce[TX_RING_SIZE]; + union { /* CAUTION: must be same cell count. */ + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + struct rtskb *tx_rtbuff[TX_RING_SIZE]; + }; + + unsigned short tx_stop_threshold; + unsigned short tx_wake_threshold; + + struct bufdesc *dirty_tx; + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; +}; + +struct fec_enet_priv_rx_q { + struct bufdesc_prop bd; + union { /* CAUTION: must be same cell count. */ + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct rtskb *rx_rtbuff[RX_RING_SIZE]; + }; +}; + +struct fec_stop_mode_gpr { + struct regmap *gpr; + u8 reg; + u8 bit; +}; + +/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct fec_enet_private { + /* Hardware registers of the FEC device */ + void __iomem *hwp; + + struct net_device *netdev; + + struct fec_rt_data { + rtdm_irq_t irq_handle[3]; + rtdm_lock_t lock; + rtdm_nrtsig_t mdio_sig; + struct rtnet_device dev; + } rtnet; + + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_ref; + struct clk *clk_enet_out; + struct clk *clk_ptp; + + bool ptp_clk_on; + struct mutex ptp_clk_mutex; + unsigned int num_tx_queues; + unsigned int num_rx_queues; + + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; + struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; + + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; + + struct platform_device *pdev; + + int dev_id; + + /* Phylib and MDIO interface */ + struct mii_bus *mii_bus; + uint phy_speed; + phy_interface_t phy_interface; + struct device_node *phy_node; + int link; + int full_duplex; + int speed; + struct completion mdio_done; + int irq[FEC_IRQ_NUM]; + int irqnr; + bool bufdesc_ex; + int pause_flag; + int wol_flag; + u32 quirks; + + int csum_flags; + + struct work_struct tx_timeout_work; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + unsigned long last_overflow_check; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + int rx_hwtstamp_filter; + u32 base_incval; + u32 cycle_speed; + int hwts_rx_en; + int hwts_tx_en; + struct delayed_work time_keep; + struct regulator *reg_phy; + struct fec_stop_mode_gpr stop_gpr; + + unsigned int tx_align; + unsigned int rx_align; + + /* hw interrupt coalesce */ + unsigned int rx_pkts_itr; + unsigned int rx_time_itr; + unsigned int tx_pkts_itr; + unsigned int tx_time_itr; + unsigned int itr_clk_rate; + + u32 rx_copybreak; + + /* ptp clock period in ns*/ + unsigned int ptp_inc; + + /* pps */ + int pps_channel; + unsigned int reload_period; + int pps_enable; + unsigned int next_counter; + + u64 ethtool_stats[]; +}; + +void fec_ptp_init(struct platform_device *pdev, int irq_idx); +void fec_ptp_stop(struct platform_device *pdev); +void fec_ptp_start_cyclecounter(struct net_device *ndev); +void fec_ptp_disable_hwts(struct net_device *ndev); +int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); +int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); + +/****************************************************************************/ +#endif /* FEC_H */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c new file mode 100644 index 0000000..99c5ec6 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c @@ -0,0 +1,3708 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * Right now, I am very wasteful with the buffers. I allocate memory + * pages and then divide them into 2K frame buffers. This way I know I + * have buffers large enough to hold one frame within one buffer descriptor. + * Once I get this working, I will use 64 or 128 byte CPM buffers, which + * will be much more memory efficient and will easily handle lots of + * small packets. + * + * Much better multiple PHY support by Magnus Damm. + * Copyright (c) 2000 Ericsson Radio Systems AB. + * + * Support for FEC controller of ColdFire processors. + * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) + * + * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) + * Copyright (c) 2004-2006 Macq Electronique SA. + * + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/pm_runtime.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <net/ip.h> +#include <net/tso.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/icmp.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/crc32.h> +#include <linux/platform_device.h> +#include <linux/mdio.h> +#include <linux/phy.h> +#include <linux/fec.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/regulator/consumer.h> +#include <linux/if_vlan.h> +#include <linux/pinctrl/consumer.h> +#include <linux/prefetch.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/iopoll.h> +#include <soc/imx/cpuidle.h> +#include <asm/cacheflush.h> + +#include "fec.h" + +static void set_multicast_list(struct net_device *ndev); +static void fec_enet_itr_coal_init(struct net_device *ndev); + +#define DRIVER_NAME "rt_fec" + +static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; + +/* Pause frame feild and FIFO threshold */ +#define FEC_ENET_FCE (1 << 5) +#define FEC_ENET_RSEM_V 0x84 +#define FEC_ENET_RSFL_V 16 +#define FEC_ENET_RAEM_V 0x8 +#define FEC_ENET_RAFL_V 0x8 +#define FEC_ENET_OPD_V 0xFFF0 +#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ + +struct fec_devinfo { + u32 quirks; +}; + +static const struct fec_devinfo fec_imx25_info = { + .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | + FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx27_info = { + .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, +}; + +static const struct fec_devinfo fec_imx28_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII, +}; + +static const struct fec_devinfo fec_imx6q_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII, +}; + +static const struct fec_devinfo fec_mvf600_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, +}; + +static const struct fec_devinfo fec_imx6x_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII, +}; + +static const struct fec_devinfo fec_imx6ul_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | + FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | + FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII, +}; + +static const struct fec_devinfo fec_imx8mq_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII, +}; + +static const struct fec_devinfo fec_imx8qm_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_DELAYED_CLKS_SUPPORT, +}; + +static struct platform_device_id fec_devtype[] = { + { + /* keep it for coldfire */ + .name = DRIVER_NAME, + .driver_data = 0, + }, { + .name = "imx25-fec", + .driver_data = (kernel_ulong_t)&fec_imx25_info, + }, { + .name = "imx27-fec", + .driver_data = (kernel_ulong_t)&fec_imx27_info, + }, { + .name = "imx28-fec", + .driver_data = (kernel_ulong_t)&fec_imx28_info, + }, { + .name = "imx6q-fec", + .driver_data = (kernel_ulong_t)&fec_imx6q_info, + }, { + .name = "mvf600-fec", + .driver_data = (kernel_ulong_t)&fec_mvf600_info, + }, { + .name = "imx6sx-fec", + .driver_data = (kernel_ulong_t)&fec_imx6x_info, + }, { + .name = "imx6ul-fec", + .driver_data = (kernel_ulong_t)&fec_imx6ul_info, + }, { + .name = "imx8mq-fec", + .driver_data = (kernel_ulong_t)&fec_imx8mq_info, + }, { + .name = "imx8qm-fec", + .driver_data = (kernel_ulong_t)&fec_imx8qm_info, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, fec_devtype); + +enum imx_fec_type { + IMX25_FEC = 1, /* runs on i.mx25/50/53 */ + IMX27_FEC, /* runs on i.mx27/35/51 */ + IMX28_FEC, + IMX6Q_FEC, + MVF600_FEC, + IMX6SX_FEC, + IMX6UL_FEC, + IMX8MQ_FEC, + IMX8QM_FEC, +}; + +static const struct of_device_id fec_dt_ids[] = { + { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, + { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, + { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, + { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, + { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fec_dt_ids); + +static unsigned char macaddr[ETH_ALEN]; +module_param_array(macaddr, byte, NULL, 0); +MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); + +#if defined(CONFIG_M5272) +/* + * Some hardware gets it MAC address out of local flash memory. + * if this is non-zero then assume it is the address to get MAC from. + */ +#if defined(CONFIG_NETtel) +#define FEC_FLASHMAC 0xf0006006 +#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) +#define FEC_FLASHMAC 0xf0006000 +#elif defined(CONFIG_CANCam) +#define FEC_FLASHMAC 0xf0020000 +#elif defined (CONFIG_M5272C3) +#define FEC_FLASHMAC (0xffe04000 + 4) +#elif defined(CONFIG_MOD5272) +#define FEC_FLASHMAC 0xffc0406b +#else +#define FEC_FLASHMAC 0 +#endif +#endif /* CONFIG_M5272 */ + +/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + * + * 2048 byte skbufs are allocated. However, alignment requirements + * varies between FEC variants. Worst case is 64, so round down by 64. + */ +#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) +#define PKT_MINBUF_SIZE 64 + +/* FEC receive acceleration */ +#define FEC_RACC_IPDIS (1 << 1) +#define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_SHIFT16 BIT(7) +#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) + +/* MIB Control Register */ +#define FEC_MIB_CTRLSTAT_DISABLE BIT(31) + +/* + * The 5270/5271/5280/5282/532x RX control register also contains maximum frame + * size bits. Other FEC hardware does not, so we need to take that into + * account when setting it. + */ +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) +#else +#define OPT_FRAME_SIZE 0 +#endif + +/* FEC MII MMFR bits definition */ +#define FEC_MMFR_ST (1 << 30) +#define FEC_MMFR_ST_C45 (0) +#define FEC_MMFR_OP_READ (2 << 28) +#define FEC_MMFR_OP_READ_C45 (3 << 28) +#define FEC_MMFR_OP_WRITE (1 << 28) +#define FEC_MMFR_OP_ADDR_WRITE (0) +#define FEC_MMFR_PA(v) ((v & 0x1f) << 23) +#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) +#define FEC_MMFR_TA (2 << 16) +#define FEC_MMFR_DATA(v) (v & 0xffff) +/* FEC ECR bits definition */ +#define FEC_ECR_MAGICEN (1 << 2) +#define FEC_ECR_SLEEP (1 << 3) + +#define FEC_MII_TIMEOUT 30000 /* us */ + +/* Transmitter timeout */ +#define TX_TIMEOUT (2 * HZ) + +#define FEC_PAUSE_FLAG_AUTONEG 0x1 +#define FEC_PAUSE_FLAG_ENABLE 0x2 +#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) +#define FEC_WOL_FLAG_ENABLE (0x1 << 1) +#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) + +#define COPYBREAK_DEFAULT 256 + +/* Max number of allowed TCP segments for software TSO */ +#define FEC_MAX_TSO_SEGS 100 +#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_dma) && \ + (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) + +static int mii_cnt; + +static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp >= bd->last) ? bd->base + : (struct bufdesc *)(((void *)bdp) + bd->dsize); +} + +static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp <= bd->base) ? bd->last + : (struct bufdesc *)(((void *)bdp) - bd->dsize); +} + +static int fec_enet_get_bd_index(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; +} + +static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) +{ + int entries; + + entries = (((const char *)txq->dirty_tx - + (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; + + return entries >= 0 ? entries : entries + txq->bd.ring_size; +} + +static void swap_buffer(void *bufaddr, int len) +{ + int i; + unsigned int *buf = bufaddr; + + for (i = 0; i < len; i += 4, buf++) + swab32s(buf); +} + +static void fec_dump(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + int index = 0; + + netdev_info(ndev, "TX ring dump\n"); + pr_info("Nr SC addr len SKB\n"); + + txq = fep->tx_queue[0]; + bdp = txq->bd.base; + + do { + pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", + index, + bdp == txq->bd.cur ? 'S' : ' ', + bdp == txq->dirty_tx ? 'H' : ' ', + fec16_to_cpu(bdp->cbd_sc), + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + txq->tx_skbuff[index]); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + index++; + } while (bdp != txq->bd.base); +} + +static inline bool is_ipv4_pkt(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; +} + +static int fec_rt_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + struct rtskb *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct bufdesc *bdp, *last_bdp; + void *bufaddr; + dma_addr_t addr; + unsigned short status; + unsigned short buflen; + unsigned int estatus = 0; + unsigned int index; + int entries_free; + rtdm_lockctx_t c; + + entries_free = fec_enet_get_free_txdesc_num(txq); + if (entries_free < MAX_SKB_FRAGS + 1) { + rtdm_printk_ratelimited("%s: NOT enough BD for SG!\n", + dev_name(&fep->pdev->dev)); + return NETDEV_TX_BUSY; + } + + rtdm_lock_get_irqsave(&frt->lock, c); + + if (skb->xmit_stamp) + *skb->xmit_stamp = + cpu_to_be64(rtdm_clock_read_monotonic() + + *skb->xmit_stamp); + + /* Fill in a Tx ring entry */ + bdp = txq->bd.cur; + last_bdp = bdp; + status = fec16_to_cpu(bdp->cbd_sc); + status &= ~BD_ENET_TX_STATS; + + /* Set buffer length and buffer pointer */ + bufaddr = skb->data; + buflen = rtskb_headlen(skb); + + index = fec_enet_get_bd_index(bdp, &txq->bd); + if (((unsigned long) bufaddr) & fep->tx_align || + fep->quirks & FEC_QUIRK_SWAP_FRAME) { + memcpy(txq->tx_bounce[index], skb->data, buflen); + bufaddr = txq->tx_bounce[index]; + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, buflen); + } + + addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { + rtdm_lock_put_irqrestore(&frt->lock, c); + dev_kfree_rtskb(skb); + rtdm_printk_ratelimited("%s: Tx DMA memory map failed\n", + dev_name(&fep->pdev->dev)); + return NETDEV_TX_BUSY; + } + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(buflen); + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + estatus = BD_ENET_TX_INT; + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = cpu_to_fec32(estatus); + } + + index = fec_enet_get_bd_index(last_bdp, &txq->bd); + txq->tx_rtbuff[index] = skb; + + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + wmb(); + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); + bdp->cbd_sc = cpu_to_fec16(status); + + /* If this was the last BD in the ring, start at the beginning again. */ + bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + + /* Make sure the update to bdp and tx_rtbuff are performed + * before txq->bd.cur. + */ + wmb(); + txq->bd.cur = bdp; + + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + + rtdm_lock_put_irqrestore(&frt->lock, c); + + return 0; +} + +static netdev_tx_t +fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + return -EBUSY; +} + +static netdev_tx_t +fec_rt_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct fec_enet_priv_tx_q *txq; + struct fec_enet_private *fep; + + fep = container_of(rtdev, struct fec_enet_private, rtnet.dev); + txq = fep->tx_queue[0]; + + return fec_rt_txq_submit_skb(txq, skb, fep->netdev); +} + +static struct net_device_stats *fec_rt_stats(struct rtnet_device *rtdev) +{ + struct fec_enet_private *fep; + + fep = container_of(rtdev, struct fec_enet_private, rtnet.dev); + + return &fep->netdev->stats; +} + +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + struct bufdesc *bdp; + unsigned int i; + unsigned int q; + + for (q = 0; q < fep->num_rx_queues; q++) { + /* Initialize the receive buffer descriptors. */ + rxq = fep->rx_queue[q]; + bdp = rxq->bd.base; + + for (i = 0; i < rxq->bd.ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); + else + bdp->cbd_sc = cpu_to_fec16(0); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + + rxq->bd.cur = rxq->bd.base; + } + + for (q = 0; q < fep->num_tx_queues; q++) { + /* ...and the same for transmit */ + txq = fep->tx_queue[q]; + bdp = txq->bd.base; + txq->bd.cur = bdp; + + for (i = 0; i < txq->bd.ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = cpu_to_fec16(0); + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + if (txq->tx_skbuff[i]) { + dev_kfree_rtskb(txq->tx_rtbuff[i]); + txq->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = cpu_to_fec32(0); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + txq->dirty_tx = bdp; + } +} + +static void fec_enet_active_rxring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->rx_queue[i]->bd.reg_desc_active); +} + +static void fec_enet_enable_ring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + int i; + + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); + + /* enable DMA1/2 */ + if (i) + writel(RCMR_MATCHEN | RCMR_CMP(i), + fep->hwp + FEC_RCMR(i)); + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(DMA_CLASS_EN | IDLE_SLOPE(i), + fep->hwp + FEC_DMA_CFG(i)); + } +} + +static void fec_enet_reset_skb(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + int i, j; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + + for (j = 0; j < txq->bd.ring_size; j++) { + if (txq->tx_skbuff[j]) { + dev_kfree_rtskb(txq->tx_rtbuff[j]); + txq->tx_skbuff[j] = NULL; + } + } + } +} + +/* + * This function is called to start or restart the FEC during a link + * change, transmit timeout, or to reconfigure the FEC. The network + * packet processing for this device must be stopped before this call. + */ +static void +fec_restart(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u32 val; + u32 temp_mac[2]; + u32 rcntl = OPT_FRAME_SIZE | 0x04; + u32 ecntl = 0x2; /* ETHEREN */ + + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } + + /* + * enet-mac reset will reset mac address registers too, + * so need to reconfigure it. + */ + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); + writel((__force u32)cpu_to_be32(temp_mac[0]), + fep->hwp + FEC_ADDR_LOW); + writel((__force u32)cpu_to_be32(temp_mac[1]), + fep->hwp + FEC_ADDR_HIGH); + + /* Clear any outstanding interrupt, except MDIO. */ + writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); + + fec_enet_bd_init(ndev); + + fec_enet_enable_ring(ndev); + + /* Reset tx SKB buffers. */ + fec_enet_reset_skb(ndev); + + /* Enable MII mode */ + if (fep->full_duplex == DUPLEX_FULL) { + /* FD enable */ + writel(0x04, fep->hwp + FEC_X_CNTRL); + } else { + /* No Rcv on Xmit */ + rcntl |= 0x02; + writel(0x0, fep->hwp + FEC_X_CNTRL); + } + + /* Set MII speed */ + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + +#if !defined(CONFIG_M5272) + if (fep->quirks & FEC_QUIRK_HAS_RACC) { + val = readl(fep->hwp + FEC_RACC); + /* align IP header */ + val |= FEC_RACC_SHIFT16; + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + /* set RX checksum */ + val |= FEC_RACC_OPTIONS; + else + val &= ~FEC_RACC_OPTIONS; + writel(val, fep->hwp + FEC_RACC); + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); + } +#endif + + /* + * The phy interface and speed need to get configured + * differently on enet-mac. + */ + if (fep->quirks & FEC_QUIRK_ENET_MAC) { + /* Enable flow control and length check */ + rcntl |= 0x40000000 | 0x00000020; + + /* RGMII, RMII or MII */ + if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + rcntl |= (1 << 6); + else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + rcntl |= (1 << 8); + else + rcntl &= ~(1 << 8); + + /* 1G, 100M or 10M */ + if (ndev->phydev) { + if (ndev->phydev->speed == SPEED_1000) + ecntl |= (1 << 5); + else if (ndev->phydev->speed == SPEED_100) + rcntl &= ~(1 << 9); + else + rcntl |= (1 << 9); + } + } else { +#ifdef FEC_MIIGSK_ENR + if (fep->quirks & FEC_QUIRK_USE_GASKET) { + u32 cfgr; + /* disable the gasket and wait */ + writel(0, fep->hwp + FEC_MIIGSK_ENR); + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) + udelay(1); + + /* + * configure the gasket: + * RMII, 50 MHz, no loopback, no echo + * MII, 25 MHz, no loopback, no echo + */ + cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; + if (ndev->phydev && ndev->phydev->speed == SPEED_10) + cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; + writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); + + /* re-enable the gasket */ + writel(2, fep->hwp + FEC_MIIGSK_ENR); + } +#endif + } + +#if !defined(CONFIG_M5272) + /* enable pause frame*/ + if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || + ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && + ndev->phydev && ndev->phydev->pause)) { + rcntl |= FEC_ENET_FCE; + + /* set FIFO threshold parameter to reduce overrun */ + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); + + /* OPD */ + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); + } else { + rcntl &= ~FEC_ENET_FCE; + } +#endif /* !defined(CONFIG_M5272) */ + + writel(rcntl, fep->hwp + FEC_R_CNTRL); + + /* Setup multicast filter. */ + set_multicast_list(ndev); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif + + if (fep->quirks & FEC_QUIRK_ENET_MAC) { + /* enable ENET endian swap */ + ecntl |= (1 << 8); + /* enable ENET store and forward mode */ + writel(1 << 8, fep->hwp + FEC_X_WMRK); + } + + if (fep->bufdesc_ex) + ecntl |= (1 << 4); + +#ifndef CONFIG_M5272 + /* Enable the MIB statistic event counters */ + writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); +#endif + + /* And last, enable the transmit and receive processing */ + writel(ecntl, fep->hwp + FEC_ECNTRL); + fec_enet_active_rxring(ndev); + + if (fep->bufdesc_ex) + fec_ptp_start_cyclecounter(ndev); + + /* Enable interrupts we wish to service */ + if (fep->link) + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + else + writel(0, fep->hwp + FEC_IMASK); + + /* Init the interrupt coalescing */ + fec_enet_itr_coal_init(ndev); + +} + +static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) +{ + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; + + if (stop_gpr->gpr) { + if (enabled) + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), + BIT(stop_gpr->bit)); + else + regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, + BIT(stop_gpr->bit), 0); + } else if (pdata && pdata->sleep_mode_enable) { + pdata->sleep_mode_enable(enabled); + } +} + +static void +fec_stop(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); + u32 val; + + /* We cannot expect a graceful transmit stop without link !!! */ + if (fep->link) { + writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ + udelay(10); + if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) + netdev_err(ndev, "Graceful transmit stop did not complete!\n"); + } + + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + } else { + writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + fec_enet_stop_mode(fep, true); + } + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + + /* We have to keep ENET enabled to have MII interrupt stay working */ + if (fep->quirks & FEC_QUIRK_ENET_MAC && + !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + writel(2, fep->hwp + FEC_ECNTRL); + writel(rmii_mode, fep->hwp + FEC_R_CNTRL); + } +} + +static void +#if LINUX_VERSION_CODE > KERNEL_VERSION(5,6,0) +fec_timeout(struct net_device *ndev, unsigned int txqueue) +#else +fec_timeout(struct net_device *ndev) +#endif +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + fec_dump(ndev); + + ndev->stats.tx_errors++; + + schedule_work(&fep->tx_timeout_work); +} + +static void fec_enet_timeout_work(struct work_struct *work) +{ + struct fec_enet_private *fep = + container_of(work, struct fec_enet_private, tx_timeout_work); + struct net_device *ndev = fep->netdev; + struct fec_rt_data *frt = &fep->rtnet; + + rtnl_lock(); + if (netif_device_present(ndev) || rtnetif_running(&frt->dev)) { + rtnetif_stop_queue(&frt->dev); + fec_restart(ndev); + rtnetif_wake_queue(&frt->dev); + } + rtnl_unlock(); +} + +static void +fec_rt_tx_queue(struct net_device *ndev, u16 queue_id) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct bufdesc *bdp; + unsigned short status; + struct rtskb *skb; + struct fec_enet_priv_tx_q *txq; + int index; + + txq = fep->tx_queue[queue_id]; + + rtdm_lock_get(&frt->lock); + + /* get next bdp of dirty_tx */ + bdp = txq->dirty_tx; + + /* get next bdp of dirty_tx */ + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + + while (bdp != READ_ONCE(txq->bd.cur)) { + /* Order the load of bd.cur and cbd_sc */ + rmb(); + status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); + if (status & BD_ENET_TX_READY) + break; + + index = fec_enet_get_bd_index(bdp, &txq->bd); + + skb = txq->tx_rtbuff[index]; + txq->tx_rtbuff[index] = NULL; + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); + if (!skb) + goto skb_done; + + /* Check for errors. */ + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | + BD_ENET_TX_CSL)) { + ndev->stats.tx_errors++; + if (status & BD_ENET_TX_HB) /* No heartbeat */ + ndev->stats.tx_heartbeat_errors++; + if (status & BD_ENET_TX_LC) /* Late collision */ + ndev->stats.tx_window_errors++; + if (status & BD_ENET_TX_RL) /* Retrans limit */ + ndev->stats.tx_aborted_errors++; + if (status & BD_ENET_TX_UN) /* Underrun */ + ndev->stats.tx_fifo_errors++; + if (status & BD_ENET_TX_CSL) /* Carrier lost */ + ndev->stats.tx_carrier_errors++; + } else { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + } + + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (status & BD_ENET_TX_DEF) + ndev->stats.collisions++; + + dev_kfree_rtskb(skb); +skb_done: + /* Make sure the update to bdp and tx_rtbuff are performed + * before dirty_tx + */ + wmb(); + txq->dirty_tx = bdp; + + /* Update pointer to next buffer descriptor to be transmitted */ + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + + /* Since we have freed up a buffer, the ring is no longer full + */ + if (rtnetif_queue_stopped(&frt->dev)) + rtnetif_wake_queue(&frt->dev); + } + + /* ERR006358: Keep the transmitter going */ + if (bdp != txq->bd.cur && + readl(txq->bd.reg_desc_active) == 0) + writel(0, txq->bd.reg_desc_active); + + rtdm_lock_put(&frt->lock); +} + +static void fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_tx_queues - 1; i >= 0; i--) + fec_rt_tx_queue(ndev, i); +} + +static int +fec_rt_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct rtskb *skb) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int off; + + off = ((unsigned long)skb->data) & fep->rx_align; + if (off) + rtskb_reserve(skb, fep->rx_align + 1 - off); + + bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, RTSKB_SIZE - fep->rx_align, DMA_FROM_DEVICE)); + if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { + rtdm_printk_ratelimited("%s: Rx DMA memory map failed\n", + dev_name(&fep->pdev->dev)); + return -ENOMEM; + } + + return 0; +} + +static int +fec_rt_rx_queue(struct net_device *ndev, int budget, u16 queue_id) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct fec_enet_priv_rx_q *rxq; + struct bufdesc *bdp; + unsigned short status; + struct rtskb *skb_new, *skb; + ushort pkt_len; + __u8 *data; + int pkt_received = 0; + struct bufdesc_ex *ebdp = NULL; + int index; + bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + +#ifdef CONFIG_M532x + flush_cache_all(); +#endif + rxq = fep->rx_queue[queue_id]; + + rtdm_lock_get(&frt->lock); + + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = rxq->bd.cur; + + while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { + + if (pkt_received >= budget) + break; + pkt_received++; + + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); + + /* Check for errors. */ + status ^= BD_ENET_RX_LAST; + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | + BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | + BD_ENET_RX_CL)) { + ndev->stats.rx_errors++; + if (status & BD_ENET_RX_OV) { + /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + goto rx_processing_done; + } + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH + | BD_ENET_RX_LAST)) { + /* Frame too long or too short. */ + ndev->stats.rx_length_errors++; + if (status & BD_ENET_RX_LAST) + netdev_err(ndev, "rcv is not +last\n"); + } + if (status & BD_ENET_RX_CR) /* CRC Error */ + ndev->stats.rx_crc_errors++; + /* Report late collisions as a frame error. */ + if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + ndev->stats.rx_frame_errors++; + goto rx_processing_done; + } + + /* Process the incoming frame. */ + ndev->stats.rx_packets++; + pkt_len = fec16_to_cpu(bdp->cbd_datlen); + ndev->stats.rx_bytes += pkt_len; + + index = fec_enet_get_bd_index(bdp, &rxq->bd); + skb = rxq->rx_rtbuff[index]; + if (skb == NULL) + goto rx_processing_done; + + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + RTSKB_SIZE - fep->rx_align, + DMA_FROM_DEVICE); + + prefetch(skb->data - NET_IP_ALIGN); + rtskb_put(skb, pkt_len - 4); + data = skb->data; + + if (need_swap) + swap_buffer(data, pkt_len); + +#if !defined(CONFIG_M5272) + if (fep->quirks & FEC_QUIRK_HAS_RACC) + data = rtskb_pull(skb, 2); +#endif + + skb->protocol = rt_eth_type_trans(skb, &frt->dev); + + /* Extract the enhanced buffer descriptor */ + if (fep->bufdesc_ex) { + ebdp = (struct bufdesc_ex *)bdp; + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) { + if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE); + } + } + + skb_new = rtnetdev_alloc_rtskb(&frt->dev, RTSKB_SIZE); + if (unlikely(skb_new == NULL)) + ndev->stats.rx_dropped++; + else { + rtnetif_rx(skb); + rxq->rx_rtbuff[index] = skb_new; + fec_rt_new_rxbdp(ndev, bdp, skb_new); + } + +rx_processing_done: + /* Clear the status flags for this buffer */ + status &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty */ + status |= BD_ENET_RX_EMPTY; + + if (fep->bufdesc_ex) { + ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); + ebdp->cbd_prot = 0; + ebdp->cbd_bdu = 0; + } + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); + bdp->cbd_sc = cpu_to_fec16(status); + + /* Update BD pointer to next entry */ + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); + + /* Doing this here will keep the FEC running while we process + * incoming frames. On a heavily loaded network, we should be + * able to keep up at the expense of system resources. + */ + writel(0, rxq->bd.reg_desc_active); + } + rxq->bd.cur = bdp; + + rtdm_lock_put(&frt->lock); + + if (pkt_received) + rt_mark_stack_mgr(&frt->dev); + + return pkt_received; +} + +static int fec_enet_rx(struct net_device *ndev, int budget) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i, done = 0; + + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_rx_queues - 1; i >= 0; i--) + done += fec_rt_rx_queue(ndev, budget - done, i); + + return done; +} + +static bool fec_enet_collect_events(struct fec_enet_private *fep) +{ + uint int_events; + + int_events = readl(fep->hwp + FEC_IEVENT); + + /* Don't clear MDIO events, we poll for those */ + int_events &= ~FEC_ENET_MII; + + writel(int_events, fep->hwp + FEC_IEVENT); + + return int_events != 0; +} + +static int +fec_rt_interrupt(rtdm_irq_t *irqh) +{ + struct net_device *ndev = rtdm_irq_get_arg(irqh, struct net_device); + struct fec_enet_private *fep = netdev_priv(ndev); + irqreturn_t ret = RTDM_IRQ_NONE; + uint int_events = fec_enet_collect_events(fep); + + if (int_events && fep->link) { + /* Disable interrupts */ + //writel(0, fep->hwp + FEC_IMASK); + if (int_events && FEC_ENET_RXF) + fec_enet_rx(ndev, RX_RING_SIZE); + if (int_events && FEC_ENET_TXF) + fec_enet_tx(ndev); + ret = RTDM_IRQ_HANDLED; + } + + if (int_events & FEC_ENET_MII) { + rtdm_nrtsig_pend(&fep->rtnet.mdio_sig); + ret = RTDM_IRQ_HANDLED; + } + + return ret; +} + +/* ------------------------------------------------------------------------- */ +static void fec_get_mac(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); + unsigned char *iap, tmpaddr[ETH_ALEN]; + + /* + * try to get mac address in following order: + * + * 1) module parameter via kernel command line in form + * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 + */ + iap = macaddr; + + /* + * 2) from device tree data + */ + if (!is_valid_ether_addr(iap)) { + struct device_node *np = fep->pdev->dev.of_node; + if (np) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + int err = of_get_mac_address(np, tmpaddr); + if (!err) + iap = tmpaddr; +#else + const char *mac = of_get_mac_address(np); + if (!IS_ERR(mac)) + iap = (unsigned char *) mac; +#endif + } + } + + /* + * 3) from flash or fuse (via platform data) + */ + if (!is_valid_ether_addr(iap)) { +#ifdef CONFIG_M5272 + if (FEC_FLASHMAC) + iap = (unsigned char *)FEC_FLASHMAC; +#else + if (pdata) + iap = (unsigned char *)&pdata->mac; +#endif + } + + /* + * 4) FEC mac registers set by bootloader + */ + if (!is_valid_ether_addr(iap)) { + *((__be32 *) &tmpaddr[0]) = + cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); + *((__be16 *) &tmpaddr[4]) = + cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); + iap = &tmpaddr[0]; + } + + /* + * 5) random mac address + */ + if (!is_valid_ether_addr(iap)) { + /* Report it and use a random ethernet address instead */ + dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); + eth_hw_addr_random(ndev); + dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", + ndev->dev_addr); + return; + } + + /* Adjust MAC if using macaddr */ + if (iap == macaddr) { + memcpy(tmpaddr, macaddr, ETH_ALEN); + tmpaddr[ETH_ALEN-1] += fep->dev_id; + eth_hw_addr_set(ndev, tmpaddr); + } else { + eth_hw_addr_set(ndev, iap); + } +} + +/* ------------------------------------------------------------------------- */ + +/* + * Phy section + */ +static void do_adjust_link(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct phy_device *phy_dev = ndev->phydev; + int status_change = 0; + + /* + * If the netdev is down, or is going down, we're not interested + * in link state events, so just mark our idea of the link as down + * and ignore the event. + */ + if (!rtnetif_running(&frt->dev) || !netif_device_present(ndev)) { + fep->link = 0; + } else if (phy_dev->link) { + if (!fep->link) { + fep->link = phy_dev->link; + status_change = 1; + } + + if (fep->full_duplex != phy_dev->duplex) { + fep->full_duplex = phy_dev->duplex; + status_change = 1; + } + + if (phy_dev->speed != fep->speed) { + fep->speed = phy_dev->speed; + status_change = 1; + } + + /* if any of the above changed restart the FEC */ + if (status_change) { + rtnetif_stop_queue(&frt->dev); + fec_restart(ndev); + rtnetif_wake_queue(&frt->dev); + } + } else { + if (fep->link) { + rtnetif_stop_queue(&frt->dev); + fec_stop(ndev); + rtnetif_wake_queue(&frt->dev); + fep->link = phy_dev->link; + status_change = 1; + } + } + + if (status_change) + phy_print_status(phy_dev); +} + +static void fec_enet_adjust_link(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + do_adjust_link(ndev); + + /* + * PHYLIB sets netif_carrier_on() when the link is up, + * propagate state change to RTnet. + */ + if (netif_carrier_ok(ndev)) { + netdev_info(ndev, "carrier detected\n"); + rtnetif_carrier_on(&fep->rtnet.dev); + } else { + netdev_info(ndev, "carrier lost\n"); + rtnetif_carrier_off(&fep->rtnet.dev); + } +} + +static int fec_enet_mdio_wait(struct fec_enet_private *fep) +{ + uint ievent; + int ret; + + ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, + ievent & FEC_ENET_MII, 2, 30000); + + if (!ret) + writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + + return ret; +} + +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; + int ret = 0, frame_start, frame_addr, frame_op; + bool is_c45 = !!(regnum & MII_ADDR_C45); + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + if (is_c45) { + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + frame_addr = (regnum >> 16); + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + goto out; + } + + frame_op = FEC_MMFR_OP_READ_C45; + + } else { + /* C22 read */ + frame_op = FEC_MMFR_OP_READ; + frame_start = FEC_MMFR_ST; + frame_addr = regnum; + } + + /* start a read op */ + writel(frame_start | frame_op | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO read timeout\n"); + goto out; + } + + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; + int ret, frame_start, frame_addr; + bool is_c45 = !!(regnum & MII_ADDR_C45); + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + if (is_c45) { + frame_start = FEC_MMFR_ST_C45; + + /* write address */ + frame_addr = (regnum >> 16); + writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | (regnum & 0xFFFF), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) { + netdev_err(fep->netdev, "MDIO address write timeout\n"); + goto out; + } + } else { + /* C22 write */ + frame_start = FEC_MMFR_ST; + frame_addr = regnum; + } + + /* start a write op */ + writel(frame_start | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + ret = fec_enet_mdio_wait(fep); + if (ret) + netdev_err(fep->netdev, "MDIO write timeout\n"); + +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = ndev->phydev; + + if (phy_dev) { + phy_reset_after_clk_enable(phy_dev); + } else if (fep->phy_node) { + /* + * If the PHY still is not bound to the MAC, but there is + * OF PHY node and a matching PHY device instance already, + * use the OF PHY node to obtain the PHY device instance, + * and then use that PHY device instance when triggering + * the PHY reset. + */ + phy_dev = of_phy_find_device(fep->phy_node); + phy_reset_after_clk_enable(phy_dev); + put_device(&phy_dev->mdio.dev); + } +} + +static int fec_enet_clk_enable(struct net_device *ndev, bool enable) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + if (enable) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + return ret; + + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) { + mutex_unlock(&fep->ptp_clk_mutex); + goto failed_clk_ptp; + } else { + fep->ptp_clk_on = true; + } + mutex_unlock(&fep->ptp_clk_mutex); + } + + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; + + fec_enet_phy_reset_after_clk_enable(ndev); + } else { + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); + clk_disable_unprepare(fep->clk_ptp); + fep->ptp_clk_on = false; + mutex_unlock(&fep->ptp_clk_mutex); + } + clk_disable_unprepare(fep->clk_ref); + } + + return 0; + +failed_clk_ref: + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); + clk_disable_unprepare(fep->clk_ptp); + fep->ptp_clk_on = false; + mutex_unlock(&fep->ptp_clk_mutex); + } +failed_clk_ptp: + clk_disable_unprepare(fep->clk_enet_out); + + return ret; +} + +static int fec_enet_mii_probe(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = NULL; + char mdio_bus_id[MII_BUS_ID_SIZE]; + char phy_name[MII_BUS_ID_SIZE + 3]; + int phy_id; + int dev_id = fep->dev_id; + + if (fep->phy_node) { + phy_dev = of_phy_connect(ndev, fep->phy_node, + &fec_enet_adjust_link, 0, + fep->phy_interface); + if (!phy_dev) { + netdev_err(ndev, "Unable to connect to phy\n"); + return -ENODEV; + } + } else { + /* check for attached phy */ + for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { + if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) + continue; + if (dev_id--) + continue; + strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + break; + } + + if (phy_id >= PHY_MAX_ADDR) { + netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); + strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + phy_id = 0; + } + + snprintf(phy_name, sizeof(phy_name), + PHY_ID_FMT, mdio_bus_id, phy_id); + phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, + fep->phy_interface); + } + + if (IS_ERR(phy_dev)) { + netdev_err(ndev, "could not attach to PHY\n"); + return PTR_ERR(phy_dev); + } + + /* mask with MAC supported features */ + if (fep->quirks & FEC_QUIRK_HAS_GBIT) { + phy_set_max_speed(phy_dev, 1000); + phy_remove_link_mode(phy_dev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); +#if !defined(CONFIG_M5272) + phy_support_sym_pause(phy_dev); +#endif + } + else + phy_set_max_speed(phy_dev, 100); + + fep->link = 0; + fep->full_duplex = 0; + + phy_attached_info(phy_dev); + + return 0; +} + +static int fec_enet_mii_init(struct platform_device *pdev) +{ + static struct mii_bus *fec0_mii_bus; + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + bool suppress_preamble = false; + struct device_node *node; + int err = -ENXIO; + u32 mii_speed, holdtime; + u32 bus_freq; + + /* + * The i.MX28 dual fec interfaces are not equal. + * Here are the differences: + * + * - fec0 supports MII & RMII modes while fec1 only supports RMII + * - fec0 acts as the 1588 time master while fec1 is slave + * - external phys can only be configured by fec0 + * + * That is to say fec1 can not work independently. It only works + * when fec0 is working. The reason behind this design is that the + * second interface is added primarily for Switch mode. + * + * Because of the last point above, both phys are attached on fec0 + * mdio interface in board design, and need to be configured by + * fec0 mii_bus. + */ + if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { + /* fec1 uses fec0 mii_bus */ + if (mii_cnt && fec0_mii_bus) { + fep->mii_bus = fec0_mii_bus; + mii_cnt++; + return 0; + } + return -ENOENT; + } + + bus_freq = 2500000; /* 2.5MHz by default */ + node = of_get_child_by_name(pdev->dev.of_node, "mdio"); + if (node) { + of_property_read_u32(node, "clock-frequency", &bus_freq); + suppress_preamble = of_property_read_bool(node, + "suppress-preamble"); + } + + /* + * Set MII speed (= clk_get_rate() / 2 * phy_speed) + * + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 + * Reference Manual has an error on this, and gets fixed on i.MX6Q + * document. + */ + mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); + if (fep->quirks & FEC_QUIRK_ENET_MAC) + mii_speed--; + if (mii_speed > 63) { + dev_err(&pdev->dev, + "fec clock (%lu) too fast to get right mii speed\n", + clk_get_rate(fep->clk_ipg)); + err = -EINVAL; + goto err_out; + } + + /* + * The i.MX28 and i.MX6 types have another filed in the MSCR (aka + * MII_SPEED) register that defines the MDIO output hold time. Earlier + * versions are RAZ there, so just ignore the difference and write the + * register always. + * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. + * HOLDTIME + 1 is the number of clk cycles the fec is holding the + * output. + * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). + * Given that ceil(clkrate / 5000000) <= 64, the calculation for + * holdtime cannot result in a value greater than 3. + */ + holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; + + fep->phy_speed = mii_speed << 1 | holdtime << 8; + + if (suppress_preamble) + fep->phy_speed |= BIT(7); + + if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { + /* Clear MMFR to avoid to generate MII event by writing MSCR. + * MII event generation condition: + * - writing MSCR: + * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & + * mscr_reg_data_in[7:0] != 0 + * - writing MMFR: + * - mscr[7:0]_not_zero + */ + writel(0, fep->hwp + FEC_MII_DATA); + } + + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + + /* Clear any pending transaction complete indication */ + writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); + + fep->mii_bus = mdiobus_alloc(); + if (fep->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; + } + + fep->mii_bus->name = "fec_enet_mii_bus"; + fep->mii_bus->read = fec_enet_mdio_read; + fep->mii_bus->write = fec_enet_mdio_write; + snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, fep->dev_id + 1); + fep->mii_bus->priv = fep; + fep->mii_bus->parent = &pdev->dev; + + err = of_mdiobus_register(fep->mii_bus, node); + if (err) + goto err_out_free_mdiobus; + of_node_put(node); + + mii_cnt++; + + /* save fec0 mii_bus */ + if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) + fec0_mii_bus = fep->mii_bus; + + return 0; + +err_out_free_mdiobus: + mdiobus_free(fep->mii_bus); +err_out: + of_node_put(node); + return err; +} + +static void fec_enet_mii_remove(struct fec_enet_private *fep) +{ + if (--mii_cnt == 0) { + mdiobus_unregister(fep->mii_bus); + mdiobus_free(fep->mii_bus); + } +} + +static void fec_enet_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + strlcpy(info->driver, fep->pdev->dev.driver->name, + sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); +} + +static int fec_enet_get_regs_len(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct resource *r; + int s = 0; + + r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); + if (r) + s = resource_size(r); + + return s; +} + +/* List of registers that can be safety be read to dump them with ethtool */ +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +static __u32 fec_enet_register_version = 2; +static u32 fec_enet_register_offset[] = { + FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, + FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, + FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, + FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, + FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, + FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, + FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, + FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, + FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, + FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, + FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, + FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, + RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, + RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, + RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, + RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, + RMON_T_P_GTE2048, RMON_T_OCTETS, + IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, + IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, + IEEE_T_FDXFC, IEEE_T_OCTETS_OK, + RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, + RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, + RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, + RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, + RMON_R_P_GTE2048, RMON_R_OCTETS, + IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, + IEEE_R_FDXFC, IEEE_R_OCTETS_OK +}; +#else +static __u32 fec_enet_register_version = 1; +static u32 fec_enet_register_offset[] = { + FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, + FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, + FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, + FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, + FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, + FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, + FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, + FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, + FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 +}; +#endif + +static void fec_enet_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *regbuf) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + struct device *dev = &fep->pdev->dev; + u32 *buf = (u32 *)regbuf; + u32 i, off; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return; + + regs->version = fec_enet_register_version; + + memset(buf, 0, regs->len); + + for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { + off = fec_enet_register_offset[i]; + + if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && + !(fep->quirks & FEC_QUIRK_HAS_FRREG)) + continue; + + off >>= 2; + buf[off] = readl(&theregs[off]); + } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); +} + +static int fec_enet_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->bufdesc_ex) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + if (fep->ptp_clock) + info->phc_index = ptp_clock_index(fep->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; + } else { + return ethtool_op_get_ts_info(ndev, info); + } +} + +#if !defined(CONFIG_M5272) + +static void fec_enet_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; + pause->rx_pause = pause->tx_pause; +} + +static int fec_enet_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + + if (!ndev->phydev) + return -ENODEV; + + if (pause->tx_pause != pause->rx_pause) { + netdev_info(ndev, + "hardware only support enable/disable both tx and rx"); + return -EINVAL; + } + + fep->pause_flag = 0; + + /* tx pause must be same as rx pause */ + fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; + fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; + + phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, + pause->autoneg); + + if (pause->autoneg) { + if (rtnetif_running(&frt->dev)) + fec_stop(ndev); + phy_start_aneg(ndev->phydev); + } + if (rtnetif_running(&frt->dev)) { + rtnetif_stop_queue(&frt->dev); + fec_restart(ndev); + rtnetif_wake_queue(&frt->dev); + } + + return 0; +} + +static const struct fec_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} fec_stats[] = { + /* RMON TX */ + { "tx_dropped", RMON_T_DROP }, + { "tx_packets", RMON_T_PACKETS }, + { "tx_broadcast", RMON_T_BC_PKT }, + { "tx_multicast", RMON_T_MC_PKT }, + { "tx_crc_errors", RMON_T_CRC_ALIGN }, + { "tx_undersize", RMON_T_UNDERSIZE }, + { "tx_oversize", RMON_T_OVERSIZE }, + { "tx_fragment", RMON_T_FRAG }, + { "tx_jabber", RMON_T_JAB }, + { "tx_collision", RMON_T_COL }, + { "tx_64byte", RMON_T_P64 }, + { "tx_65to127byte", RMON_T_P65TO127 }, + { "tx_128to255byte", RMON_T_P128TO255 }, + { "tx_256to511byte", RMON_T_P256TO511 }, + { "tx_512to1023byte", RMON_T_P512TO1023 }, + { "tx_1024to2047byte", RMON_T_P1024TO2047 }, + { "tx_GTE2048byte", RMON_T_P_GTE2048 }, + { "tx_octets", RMON_T_OCTETS }, + + /* IEEE TX */ + { "IEEE_tx_drop", IEEE_T_DROP }, + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, + { "IEEE_tx_1col", IEEE_T_1COL }, + { "IEEE_tx_mcol", IEEE_T_MCOL }, + { "IEEE_tx_def", IEEE_T_DEF }, + { "IEEE_tx_lcol", IEEE_T_LCOL }, + { "IEEE_tx_excol", IEEE_T_EXCOL }, + { "IEEE_tx_macerr", IEEE_T_MACERR }, + { "IEEE_tx_cserr", IEEE_T_CSERR }, + { "IEEE_tx_sqe", IEEE_T_SQE }, + { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, + + /* RMON RX */ + { "rx_packets", RMON_R_PACKETS }, + { "rx_broadcast", RMON_R_BC_PKT }, + { "rx_multicast", RMON_R_MC_PKT }, + { "rx_crc_errors", RMON_R_CRC_ALIGN }, + { "rx_undersize", RMON_R_UNDERSIZE }, + { "rx_oversize", RMON_R_OVERSIZE }, + { "rx_fragment", RMON_R_FRAG }, + { "rx_jabber", RMON_R_JAB }, + { "rx_64byte", RMON_R_P64 }, + { "rx_65to127byte", RMON_R_P65TO127 }, + { "rx_128to255byte", RMON_R_P128TO255 }, + { "rx_256to511byte", RMON_R_P256TO511 }, + { "rx_512to1023byte", RMON_R_P512TO1023 }, + { "rx_1024to2047byte", RMON_R_P1024TO2047 }, + { "rx_GTE2048byte", RMON_R_P_GTE2048 }, + { "rx_octets", RMON_R_OCTETS }, + + /* IEEE RX */ + { "IEEE_rx_drop", IEEE_R_DROP }, + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, + { "IEEE_rx_crc", IEEE_R_CRC }, + { "IEEE_rx_align", IEEE_R_ALIGN }, + { "IEEE_rx_macerr", IEEE_R_MACERR }, + { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, +}; + +#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) + +static void fec_enet_update_ethtool_stats(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); +} + +static void fec_enet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct fec_rt_data *frt = &fep->rtnet; + + if (rtnetif_running(&frt->dev)) + fec_enet_update_ethtool_stats(dev); + + memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); +} + +static void fec_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + fec_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int fec_enet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fec_stats); + default: + return -EOPNOTSUPP; + } +} + +static void fec_enet_clear_ethtool_stats(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + int i; + + /* Disable MIB statistics counters */ + writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); + + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + writel(0, fep->hwp + fec_stats[i].offset); + + /* Don't disable MIB statistics counters */ + writel(0, fep->hwp + FEC_MIB_CTRLSTAT); +} + +#else /* !defined(CONFIG_M5272) */ +#define FEC_STATS_SIZE 0 +static inline void fec_enet_update_ethtool_stats(struct net_device *dev) +{ +} + +static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) +{ +} +#endif /* !defined(CONFIG_M5272) */ + +/* ITR clock source is enet system clock (clk_ahb). + * TCTT unit is cycle_ns * 64 cycle + * So, the ICTT value = X us / (cycle_ns * 64) + */ +static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->itr_clk_rate / 64000) / 1000; +} + +/* Set threshold for interrupt coalescing */ +static void fec_enet_itr_coal_set(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int rx_itr, tx_itr; + + /* Must be greater than zero to avoid unpredictable behavior */ + if (!fep->rx_time_itr || !fep->rx_pkts_itr || + !fep->tx_time_itr || !fep->tx_pkts_itr) + return; + + /* Select enet system clock as Interrupt Coalescing + * timer Clock Source + */ + rx_itr = FEC_ITR_CLK_SEL; + tx_itr = FEC_ITR_CLK_SEL; + + /* set ICFT and ICTT */ + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + + rx_itr |= FEC_ITR_EN; + tx_itr |= FEC_ITR_EN; + + writel(tx_itr, fep->hwp + FEC_TXIC0); + writel(rx_itr, fep->hwp + FEC_RXIC0); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } +} + +static int +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +#endif +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) + return -EOPNOTSUPP; + + ec->rx_coalesce_usecs = fep->rx_time_itr; + ec->rx_max_coalesced_frames = fep->rx_pkts_itr; + + ec->tx_coalesce_usecs = fep->tx_time_itr; + ec->tx_max_coalesced_frames = fep->tx_pkts_itr; + + return 0; +} + +static int +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) +fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +#endif +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct device *dev = &fep->pdev->dev; + unsigned int cycle; + + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) + return -EOPNOTSUPP; + + if (ec->rx_max_coalesced_frames > 255) { + dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames > 255) { + dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); + if (cycle > 0xFFFF) { + dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); + if (cycle > 0xFFFF) { + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); + return -EINVAL; + } + + fep->rx_time_itr = ec->rx_coalesce_usecs; + fep->rx_pkts_itr = ec->rx_max_coalesced_frames; + + fep->tx_time_itr = ec->tx_coalesce_usecs; + fep->tx_pkts_itr = ec->tx_max_coalesced_frames; + + fec_enet_itr_coal_set(ndev); + + return 0; +} + +static void fec_enet_itr_coal_init(struct net_device *ndev) +{ + struct ethtool_coalesce ec; + + ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) + fec_enet_set_coalesce(ndev, &ec, NULL, NULL); +#else + fec_enet_set_coalesce(ndev, &ec); +#endif +} + +static int fec_enet_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = fep->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int fec_enet_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + fep->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void +fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; + } else { + wol->supported = wol->wolopts = 0; + } +} + +static int +fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) + return -EINVAL; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); + if (device_may_wakeup(&ndev->dev)) { + fep->wol_flag |= FEC_WOL_FLAG_ENABLE; + if (fep->irq[0] > 0) + enable_irq_wake(fep->irq[0]); + } else { + fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); + if (fep->irq[0] > 0) + disable_irq_wake(fep->irq[0]); + } + + return 0; +} + +static const struct ethtool_ops fec_enet_ethtool_ops = { +#if LINUX_VERSION_CODE > KERNEL_VERSION(5,7,0) + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, +#endif + .get_drvinfo = fec_enet_get_drvinfo, + .get_regs_len = fec_enet_get_regs_len, + .get_regs = fec_enet_get_regs, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = fec_enet_get_coalesce, + .set_coalesce = fec_enet_set_coalesce, +#ifndef CONFIG_M5272 + .get_pauseparam = fec_enet_get_pauseparam, + .set_pauseparam = fec_enet_set_pauseparam, + .get_strings = fec_enet_get_strings, + .get_ethtool_stats = fec_enet_get_ethtool_stats, + .get_sset_count = fec_enet_get_sset_count, +#endif + .get_ts_info = fec_enet_get_ts_info, + .get_tunable = fec_enet_get_tunable, + .set_tunable = fec_enet_set_tunable, + .get_wol = fec_enet_get_wol, + .set_wol = fec_enet_set_wol, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct phy_device *phydev = ndev->phydev; + + if (!rtnetif_running(&frt->dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + if (fep->bufdesc_ex) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(5,9,0) + bool use_fec_hwts = !phy_has_hwtstamp(phydev); +#else + bool use_fec_hwts = true; +#endif + if (cmd == SIOCSHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_set(ndev, rq); + fec_ptp_disable_hwts(ndev); + } else if (cmd == SIOCGHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_get(ndev, rq); + } + } + + return phy_mii_ioctl(phydev, rq, cmd); +} + +static int fec_rt_ioctl(struct rtnet_device *rtdev, struct ifreq *rq, int cmd) +{ + struct fec_enet_private *fep; + + fep = container_of(rtdev, struct fec_enet_private, rtnet.dev); + + return fec_enet_ioctl(fep->netdev, rq, cmd); +} + +static void fec_enet_free_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + void *skb; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + unsigned int q, size; + + for (q = 0; q < fep->num_rx_queues; q++) { + rxq = fep->rx_queue[q]; + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { + skb = rxq->rx_skbuff[i]; + if (!skb) + goto skip; + rxq->rx_skbuff[i] = NULL; + dev_kfree_rtskb(skb); + size = RTSKB_SIZE; + + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + size - fep->rx_align, + DMA_FROM_DEVICE); + skip: + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); + } + } + + for (q = 0; q < fep->num_tx_queues; q++) { + txq = fep->tx_queue[q]; + for (i = 0; i < txq->bd.ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + if (!skb) + continue; + txq->tx_skbuff[i] = NULL; + dev_kfree_rtskb(skb); + } + } +} + +static void fec_enet_free_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { + txq = fep->tx_queue[i]; + dma_free_coherent(&fep->pdev->dev, + txq->bd.ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, + txq->tso_hdrs_dma); + } + + for (i = 0; i < fep->num_rx_queues; i++) + kfree(fep->rx_queue[i]); + for (i = 0; i < fep->num_tx_queues; i++) + kfree(fep->tx_queue[i]); +} + +static int fec_enet_alloc_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + int ret = 0; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->tx_queue[i] = txq; + txq->bd.ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = + (txq->bd.ring_size - txq->tx_stop_threshold) / 2; + + txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, + txq->bd.ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, + GFP_KERNEL); + if (!txq->tso_hdrs) { + ret = -ENOMEM; + goto alloc_failed; + } + } + + for (i = 0; i < fep->num_rx_queues; i++) { + fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), + GFP_KERNEL); + if (!fep->rx_queue[i]) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; + } + return ret; + +alloc_failed: + fec_enet_free_queue(ndev); + return ret; +} + +static int +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + unsigned int i; + struct rtskb *rtskb; + struct bufdesc *bdp; + struct fec_enet_priv_rx_q *rxq; + + rxq = fep->rx_queue[queue]; + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { + rtskb = rtnetdev_alloc_rtskb(&frt->dev, RTSKB_SIZE); + if (!rtskb) + goto err_alloc; + + if (fec_rt_new_rxbdp(ndev, bdp, rtskb)) { + dev_kfree_rtskb(rtskb); + goto err_alloc; + } + rxq->rx_rtbuff[i] = rtskb; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); + } + + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); + } + + /* Set the last buffer to wrap. */ + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + return 0; + + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int +fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + + txq = fep->tx_queue[queue]; + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { + txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!txq->tx_bounce[i]) + goto err_alloc; + + bdp->cbd_sc = cpu_to_fec16(0); + bdp->cbd_bufaddr = cpu_to_fec32(0); + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); + } + + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + } + + /* Set the last buffer to wrap. */ + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); + + return 0; + + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < fep->num_rx_queues; i++) + if (fec_enet_alloc_rxq_buffers(ndev, i)) + return -ENOMEM; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fec_enet_alloc_txq_buffers(ndev, i)) + return -ENOMEM; + return 0; +} + +static int +__fec_enet_open(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + bool reset_again; + + ret = pm_runtime_resume_and_get(&fep->pdev->dev); + if (ret < 0) + return ret; + + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) + goto clk_enable; + + /* During the first fec_enet_open call the PHY isn't probed at this + * point. Therefore the phy_reset_after_clk_enable() call within + * fec_enet_clk_enable() fails. As we need this reset in order to be + * sure the PHY is working correctly we check if we need to reset again + * later when the PHY is probed + */ + if (ndev->phydev && ndev->phydev->drv) + reset_again = false; + else + reset_again = true; + + /* I should reset the ring buffers here, but I don't yet know + * a simple way to do that. + */ + + ret = fec_enet_alloc_buffers(ndev); + if (ret) + goto err_enet_alloc; + + /* Init MAC prior to mii bus probe */ + fec_restart(ndev); + + /* Call phy_reset_after_clk_enable() again if it failed during + * phy_reset_after_clk_enable() before because the PHY wasn't probed. + */ + if (reset_again) + fec_enet_phy_reset_after_clk_enable(ndev); + + /* Probe and connect to PHY when open the interface */ + ret = fec_enet_mii_probe(ndev); + if (ret) + goto err_enet_mii_probe; + + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + + phy_start(ndev->phydev); + netif_tx_start_all_queues(ndev); + + device_set_wakeup_enable(&ndev->dev, fep->wol_flag & + FEC_WOL_FLAG_ENABLE); + + return 0; + +err_enet_mii_probe: + fec_enet_free_buffers(ndev); +err_enet_alloc: + fec_enet_clk_enable(ndev, false); +clk_enable: + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + return ret; +} + +static int +fec_enet_open(struct net_device *ndev) +{ + return -EBUSY; +} + +static int +fec_rt_open(struct rtnet_device *rtdev) +{ + struct fec_enet_private *fep; + int ret; + + fep = container_of(rtdev, struct fec_enet_private, rtnet.dev); + ret = __fec_enet_open(fep->netdev); + if (ret) + return ret; + + rt_stack_connect(rtdev, &STACK_manager); + rtnetif_start_queue(rtdev); + + return 0; +} + +static int +fec_enet_close(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + phy_stop(ndev->phydev); + + if (netif_device_present(ndev)) { + netif_tx_disable(ndev); + fec_stop(ndev); + } + + phy_disconnect(ndev->phydev); + + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + + fec_enet_update_ethtool_stats(ndev); + + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); + + fec_enet_free_buffers(ndev); + + return 0; +} + +static int +fec_rt_close(struct rtnet_device *rtdev) +{ + struct fec_enet_private *fep; + + fep = container_of(rtdev, struct fec_enet_private, rtnet.dev); + rtnetif_stop_queue(rtdev); + rtnetif_carrier_off(rtdev); + rt_stack_disconnect(rtdev); + + return fec_enet_close(fep->netdev); +} + +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ + +#define FEC_HASH_BITS 6 /* #bits in hash */ + +static void set_multicast_list(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct netdev_hw_addr *ha; + unsigned int crc, tmp; + unsigned char hash; + unsigned int hash_high = 0, hash_low = 0; + + if (ndev->flags & IFF_PROMISC) { + tmp = readl(fep->hwp + FEC_R_CNTRL); + tmp |= 0x8; + writel(tmp, fep->hwp + FEC_R_CNTRL); + return; + } + + tmp = readl(fep->hwp + FEC_R_CNTRL); + tmp &= ~0x8; + writel(tmp, fep->hwp + FEC_R_CNTRL); + + if (ndev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's + */ + writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + + return; + } + + /* Add the addresses in hash register */ + netdev_for_each_mc_addr(ha, ndev) { + /* calculate crc32 value of mac address */ + crc = ether_crc_le(ndev->addr_len, ha->addr); + + /* only upper 6 bits (FEC_HASH_BITS) are used + * which point to specific bit in the hash registers + */ + hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; + + if (hash > 31) + hash_high |= 1 << (hash - 32); + else + hash_low |= 1 << hash; + } + + writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); +} + +/* Set a MAC change in hardware. */ +static int +fec_set_mac_address(struct net_device *ndev, void *p) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct sockaddr *addr = p; + + if (addr) { + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + eth_hw_addr_set(ndev, addr->sa_data); + } + + /* Add netif status check here to avoid system hang in below case: + * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; + * After ethx down, fec all clocks are gated off and then register + * access causes system hang. + */ + if (!rtnetif_running(&frt->dev)) + return 0; + + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | + (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), + fep->hwp + FEC_ADDR_LOW); + writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), + fep->hwp + FEC_ADDR_HIGH); + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * fec_poll_controller - FEC Poll controller function + * @dev: The FEC network adapter + * + * Polled functionality used by netconsole and others in non interrupt mode + * + */ +static void fec_poll_controller(struct net_device *dev) +{ + int i; + struct fec_enet_private *fep = netdev_priv(dev); + + for (i = 0; i < FEC_IRQ_NUM; i++) { + if (fep->irq[i] > 0) { + disable_irq(fep->irq[i]); + fec_enet_interrupt(fep->irq[i], dev); + enable_irq(fep->irq[i]); + } + } +} +#endif + +static inline void fec_enet_set_netdev_features(struct net_device *netdev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + netdev->features = features; + + /* Receive checksum has been changed */ + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + else + fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; + } +} + +static int fec_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + struct fec_rt_data *frt = &fep->rtnet; + netdev_features_t changed = features ^ netdev->features; + + if (rtnetif_running(&frt->dev) && changed & NETIF_F_RXCSUM) { + rtnetif_stop_queue(&frt->dev); + fec_stop(netdev); + fec_enet_set_netdev_features(netdev, features); + fec_restart(netdev); + rtnetif_wake_queue(&frt->dev); + } else { + fec_enet_set_netdev_features(netdev, features); + } + + return 0; +} + +static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) +{ + struct vlan_ethhdr *vhdr; + unsigned short vlan_TCI = 0; + + if (skb->protocol == htons(ETH_P_ALL)) { + vhdr = (struct vlan_ethhdr *)(skb->data); + vlan_TCI = ntohs(vhdr->h_vlan_TCI); + } + + return vlan_TCI; +} + +static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 vlan_tag; + + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + return netdev_pick_tx(ndev, skb, NULL); + + vlan_tag = fec_enet_get_raw_vlan_tci(skb); + if (!vlan_tag) + return vlan_tag; + + return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; +} + +static const struct net_device_ops fec_netdev_ops = { + .ndo_open = fec_enet_open, + .ndo_stop = fec_enet_close, + .ndo_start_xmit = fec_enet_start_xmit, + .ndo_select_queue = fec_enet_select_queue, + .ndo_set_rx_mode = set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = fec_timeout, + .ndo_set_mac_address = fec_set_mac_address, + .ndo_do_ioctl = fec_enet_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fec_poll_controller, +#endif + .ndo_set_features = fec_set_features, +}; + +static const unsigned short offset_des_active_rxq[] = { + FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 +}; + +static const unsigned short offset_des_active_txq[] = { + FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 +}; + + /* + * XXX: We need to clean up on failure exits here. + * + */ +static int fec_enet_init(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct bufdesc *cbd_base; + dma_addr_t bd_dma; + int bd_size; + unsigned int i; + unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned dsize_log2 = __fls(dsize); + int ret; + + WARN_ON(dsize != (1 << dsize_log2)); +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) + fep->rx_align = 0xf; + fep->tx_align = 0xf; +#else + fep->rx_align = 0x3; + fep->tx_align = 0x3; +#endif + + /* Check mask of the streaming and coherent API */ + ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); + return ret; + } + + ret = fec_enet_alloc_queue(ndev); + if (ret) + return ret; + + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; + + /* Allocate memory for buffer descriptors. */ + cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); + if (!cbd_base) { + ret = -ENOMEM; + goto free_queue_mem; + } + + /* Get the Ethernet address */ + fec_get_mac(ndev); + /* make sure MAC we just acquired is programmed into the hw */ + fec_set_mac_address(ndev, NULL); + + memcpy(&frt->dev.dev_addr, ndev->dev_addr, ETH_ALEN); + + /* Set receive and transmit descriptor base. */ + for (i = 0; i < fep->num_rx_queues; i++) { + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; + unsigned size = dsize * rxq->bd.ring_size; + + rxq->bd.qid = i; + rxq->bd.base = cbd_base; + rxq->bd.cur = cbd_base; + rxq->bd.dma = bd_dma; + rxq->bd.dsize = dsize; + rxq->bd.dsize_log2 = dsize_log2; + rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); + } + + for (i = 0; i < fep->num_tx_queues; i++) { + struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; + unsigned size = dsize * txq->bd.ring_size; + + txq->bd.qid = i; + txq->bd.base = cbd_base; + txq->bd.cur = cbd_base; + txq->bd.dma = bd_dma; + txq->bd.dsize = dsize; + txq->bd.dsize_log2 = dsize_log2; + txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); + } + + + /* The FEC Ethernet specific entries in the device structure */ + ndev->watchdog_timeo = TX_TIMEOUT; + ndev->netdev_ops = &fec_netdev_ops; + ndev->ethtool_ops = &fec_enet_ethtool_ops; + + writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); + + if (fep->quirks & FEC_QUIRK_HAS_VLAN) + /* enable hw VLAN support */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + if (fep->quirks & FEC_QUIRK_HAS_CSUM) { + ndev->gso_max_segs = FEC_MAX_TSO_SEGS; + + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + } + + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + fep->tx_align = 0; + fep->rx_align = 0x3f; + } + + ndev->hw_features = ndev->features; + + fec_restart(ndev); + + if (fep->quirks & FEC_QUIRK_MIB_CLEAR) + fec_enet_clear_ethtool_stats(ndev); + else + fec_enet_update_ethtool_stats(ndev); + + return 0; + +free_queue_mem: + fec_enet_free_queue(ndev); + return ret; +} + +#ifdef CONFIG_OF +static int fec_reset_phy(struct platform_device *pdev) +{ + int err, phy_reset; + bool active_high = false; + int msec = 1, phy_post_delay = 0; + struct device_node *np = pdev->dev.of_node; + + if (!np) + return 0; + + err = of_property_read_u32(np, "phy-reset-duration", &msec); + /* A sane reset duration should not be longer than 1s */ + if (!err && msec > 1000) + msec = 1; + + phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); + if (phy_reset == -EPROBE_DEFER) + return phy_reset; + else if (!gpio_is_valid(phy_reset)) + return 0; + + err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); + /* valid reset duration should be less than 1s */ + if (!err && phy_post_delay > 1000) + return -EINVAL; + + active_high = of_property_read_bool(np, "phy-reset-active-high"); + + err = devm_gpio_request_one(&pdev->dev, phy_reset, + active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + "phy-reset"); + if (err) { + dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); + return err; + } + + if (msec > 20) + msleep(msec); + else + usleep_range(msec * 1000, msec * 1000 + 1000); + + gpio_set_value_cansleep(phy_reset, !active_high); + + if (!phy_post_delay) + return 0; + + if (phy_post_delay > 20) + msleep(phy_post_delay); + else + usleep_range(phy_post_delay * 1000, + phy_post_delay * 1000 + 1000); + + return 0; +} +#else /* CONFIG_OF */ +static int fec_reset_phy(struct platform_device *pdev) +{ + /* + * In case of platform probe, the reset has been done + * by machine code. + */ + return 0; +} +#endif /* CONFIG_OF */ + +static void +fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) +{ + struct device_node *np = pdev->dev.of_node; + + *num_tx = *num_rx = 1; + + if (!np || !of_device_is_available(np)) + return; + + /* parse the num of tx and rx queues */ + of_property_read_u32(np, "fsl,num-tx-queues", num_tx); + + of_property_read_u32(np, "fsl,num-rx-queues", num_rx); + + if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { + dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", + *num_tx); + *num_tx = 1; + return; + } + + if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { + dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", + *num_rx); + *num_rx = 1; + return; + } + +} + +static int fec_rt_init(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct rtnet_device *rtdev = &frt->dev; + int ret; + + rtdev->open = fec_rt_open; + rtdev->stop = fec_rt_close; + rtdev->do_ioctl = fec_rt_ioctl; + rtdev->hard_start_xmit = fec_rt_start_xmit; + rtdev->get_stats = fec_rt_stats; + rtdev->sysbind = &fep->pdev->dev; + + ret = rt_init_etherdev(rtdev, (RX_RING_SIZE + TX_RING_SIZE) * 2); + if (ret) + return ret; + + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + rtdm_lock_init(&frt->lock); + + ret = rt_register_rtnetdev(rtdev); + if (ret) { + rt_rtdev_disconnect(rtdev); + return ret; + } + + rtnetif_carrier_off(rtdev); + + return 0; +} + +static void fec_rt_destroy(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + struct rtnet_device *rtdev = &frt->dev; + int i; + + for (i = 0; i < fep->irqnr; i++) + rtdm_irq_free(&frt->irq_handle[i]); + + rtdm_nrtsig_destroy(&frt->mdio_sig); + rt_rtdev_disconnect(rtdev); + rt_unregister_rtnetdev(rtdev); + rtdev_destroy(rtdev); +} + +static int fec_enet_get_irq_cnt(struct platform_device *pdev) +{ + int irq_cnt = platform_irq_count(pdev); + + if (irq_cnt > FEC_IRQ_NUM) + irq_cnt = FEC_IRQ_NUM; /* last for pps */ + else if (irq_cnt == 2) + irq_cnt = 1; /* last for pps */ + else if (irq_cnt <= 0) + irq_cnt = 1; /* At least 1 irq is needed */ + return irq_cnt; +} + +static int fec_enet_init_stop_mode(struct fec_enet_private *fep, + struct device_node *np) +{ + struct device_node *gpr_np; + u32 out_val[3]; + int ret = 0; + + gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); + if (!gpr_np) + return 0; + + ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, + ARRAY_SIZE(out_val)); + if (ret) { + dev_dbg(&fep->pdev->dev, "no stop mode property\n"); + return ret; + } + + fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); + if (IS_ERR(fep->stop_gpr.gpr)) { + dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); + ret = PTR_ERR(fep->stop_gpr.gpr); + fep->stop_gpr.gpr = NULL; + goto out; + } + + fep->stop_gpr.reg = out_val[1]; + fep->stop_gpr.bit = out_val[2]; + +out: + of_node_put(gpr_np); + + return ret; +} + +static int +fec_probe(struct platform_device *pdev) +{ + struct fec_enet_private *fep; + struct fec_platform_data *pdata; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5,5,0) + phy_interface_t interface; +#endif + struct net_device *ndev; + int i, irq, ret = 0, eth_id; + const struct of_device_id *of_id; + static int dev_id; + struct device_node *np = pdev->dev.of_node, *phy_node; + int num_tx_qs; + int num_rx_qs; + char irq_name[8]; + int irq_cnt; + struct fec_devinfo *dev_info; + + fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); + + /* Init network device */ + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + + FEC_STATS_SIZE, num_tx_qs, num_rx_qs); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* setup board info structure */ + fep = netdev_priv(ndev); + fep->pdev = pdev; // warning must be done before fec_rt_init + + ret = fec_rt_init(ndev); + if (ret) + goto failed_rt_init; + + of_id = of_match_device(fec_dt_ids, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; + if (dev_info) + fep->quirks = dev_info->quirks; + + fep->netdev = ndev; + fep->num_rx_queues = num_rx_qs; + fep->num_tx_queues = num_tx_qs; + +#if !defined(CONFIG_M5272) + /* default enable pause frame auto negotiation */ + if (fep->quirks & FEC_QUIRK_HAS_GBIT) + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; +#endif + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + + fep->hwp = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(fep->hwp)) { + ret = PTR_ERR(fep->hwp); + goto failed_ioremap; + } + + fep->dev_id = dev_id++; + + platform_set_drvdata(pdev, ndev); + + if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + + if (of_get_property(np, "fsl,magic-packet", NULL)) + fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; + + ret = fec_enet_init_stop_mode(fep, np); + if (ret) + goto failed_stop_mode; + + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + dev_err(&pdev->dev, + "broken fixed-link specification\n"); + goto failed_phy; + } + phy_node = of_node_get(np); + } + fep->phy_node = phy_node; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5,5,0) + ret = of_get_phy_mode(pdev->dev.of_node, &interface); + if (ret) { +#else + ret = of_get_phy_mode(pdev->dev.of_node); + if (ret < 0) { +#endif + pdata = dev_get_platdata(&pdev->dev); + if (pdata) + fep->phy_interface = pdata->phy; + else + fep->phy_interface = PHY_INTERFACE_MODE_MII; + } else { +#if LINUX_VERSION_CODE > KERNEL_VERSION(5,5,0) + fep->phy_interface = interface; +#else + fep->phy_interface = ret; +#endif + } + + fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(fep->clk_ipg)) { + ret = PTR_ERR(fep->clk_ipg); + goto failed_clk; + } + + fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(fep->clk_ahb)) { + ret = PTR_ERR(fep->clk_ahb); + goto failed_clk; + } + + fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); + + /* enet_out is optional, depends on board */ + fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) + fep->clk_enet_out = NULL; + + /* + * We keep the companion PTP driver enabled even when + * operating the NIC in rt mode. The PHC is still available, + * although not providing rt guarantees. + */ + fep->ptp_clk_on = false; + mutex_init(&fep->ptp_clk_mutex); + + /* clk_ref is optional, depends on board */ + fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) + fep->clk_ref = NULL; + + fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); + if (IS_ERR(fep->clk_ptp)) { + fep->clk_ptp = NULL; + fep->bufdesc_ex = false; + } + + ret = fec_enet_clk_enable(ndev, true); + if (ret) + goto failed_clk; + + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; + + fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); + if (!IS_ERR(fep->reg_phy)) { + ret = regulator_enable(fep->reg_phy); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable phy regulator: %d\n", ret); + goto failed_regulator; + } + } else { + if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto failed_regulator; + } + fep->reg_phy = NULL; + } + + pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = fec_reset_phy(pdev); + if (ret) + goto failed_reset; + + irq_cnt = fec_enet_get_irq_cnt(pdev); + if (fep->bufdesc_ex) + fec_ptp_init(pdev, irq_cnt); + + ret = fec_enet_init(ndev); + if (ret) + goto failed_init; + + for (i = 0; i < irq_cnt; i++) { + snprintf(irq_name, sizeof(irq_name), "int%d", i); + irq = platform_get_irq_byname_optional(pdev, irq_name); + if (irq < 0) + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; + goto failed_irq; + } + ret = rtdm_irq_request(&fep->rtnet.irq_handle[i], irq, + fec_rt_interrupt, 0, ndev->name, ndev); + if (ret) + goto failed_irq; + + fep->irq[i] = irq; + fep->irqnr++; + } + + ret = fec_enet_mii_init(pdev); + if (ret) + goto failed_mii_init; + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&pdev->dev); + + eth_id = of_alias_get_id(pdev->dev.of_node, "ethernet"); + if (eth_id >= 0) + sprintf(ndev->name, "rteth%d", eth_id); + + ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; + + ret = register_netdev(ndev); + if (ret) + goto failed_register; + + device_init_wakeup(&ndev->dev, fep->wol_flag & + FEC_WOL_HAS_MAGIC_PACKET); + + if (fep->bufdesc_ex && fep->ptp_clock) + netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + + fep->rx_copybreak = COPYBREAK_DEFAULT; + INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; + +failed_register: + fec_enet_mii_remove(fep); +failed_mii_init: +failed_irq: +failed_init: + fec_ptp_stop(pdev); +failed_reset: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); +failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: + fec_enet_clk_enable(ndev, false); +failed_clk: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(phy_node); +failed_stop_mode: +failed_phy: + dev_id--; +failed_ioremap: + fec_rt_destroy(ndev); +failed_rt_init: + free_netdev(ndev); + dev_id--; + + return ret; +} + +static int +fec_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + struct device_node *np = pdev->dev.of_node; + int ret; + + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; + + cancel_work_sync(&fep->tx_timeout_work); + fec_ptp_stop(pdev); + + fec_rt_destroy(ndev); + unregister_netdev(ndev); + fec_enet_mii_remove(fep); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(fep->phy_node); + + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + free_netdev(ndev); + return 0; +} + +static int __maybe_unused fec_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + + rtnl_lock(); + if (rtnetif_running(&frt->dev)) { + if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) + fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; + phy_stop(ndev->phydev); + rtnetif_stop_queue(&frt->dev); + netif_device_detach(ndev); + rtnetif_wake_queue(&frt->dev); + fec_stop(ndev); + fec_enet_clk_enable(ndev, false); + if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + } + rtnl_unlock(); + + if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + regulator_disable(fep->reg_phy); + + /* SOC supply clock to phy, when clock is disabled, phy link down + * SOC control phy regulator, when regulator is disabled, phy link down + */ + if (fep->clk_enet_out || fep->reg_phy) + fep->link = 0; + + return 0; +} + +static int __maybe_unused fec_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_rt_data *frt = &fep->rtnet; + int ret; + int val; + + if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { + ret = regulator_enable(fep->reg_phy); + if (ret) + return ret; + } + + rtnl_lock(); + if (rtnetif_running(&frt->dev)) { + ret = fec_enet_clk_enable(ndev, true); + if (ret) { + rtnl_unlock(); + goto failed_clk; + } + if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { + fec_enet_stop_mode(fep, false); + + val = readl(fep->hwp + FEC_ECNTRL); + val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; + } else { + pinctrl_pm_select_default_state(&fep->pdev->dev); + } + fec_restart(ndev); + rtnetif_stop_queue(&frt->dev); + netif_device_attach(ndev); + rtnetif_wake_queue(&frt->dev); + phy_start(ndev->phydev); + } + rtnl_unlock(); + + return 0; + +failed_clk: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return ret; +} + +static int __maybe_unused fec_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + + return 0; +} + +static int __maybe_unused fec_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + return 0; + +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + return ret; +} + +static const struct dev_pm_ops fec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) +}; + +static struct platform_driver fec_driver = { + .driver = { + .name = DRIVER_NAME, + .pm = &fec_pm_ops, + .of_match_table = fec_dt_ids, + }, + .id_table = fec_devtype, + .probe = fec_probe, + .remove = fec_drv_remove, +}; + +module_platform_driver(fec_driver); + +MODULE_ALIAS("platform:"DRIVER_NAME); +MODULE_LICENSE("GPL"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c new file mode 100644 index 0000000..d71eac7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Fast Ethernet Controller (ENET) PTP driver for MX6x. + * + * Copyright (C) 2012 Freescale Semiconductor, Inc. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/fec.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/of_net.h> + +#include "fec.h" + +/* FEC 1588 register bits */ +#define FEC_T_CTRL_SLAVE 0x00002000 +#define FEC_T_CTRL_CAPTURE 0x00000800 +#define FEC_T_CTRL_RESTART 0x00000200 +#define FEC_T_CTRL_PERIOD_RST 0x00000030 +#define FEC_T_CTRL_PERIOD_EN 0x00000010 +#define FEC_T_CTRL_ENABLE 0x00000001 + +#define FEC_T_INC_MASK 0x0000007f +#define FEC_T_INC_OFFSET 0 +#define FEC_T_INC_CORR_MASK 0x00007f00 +#define FEC_T_INC_CORR_OFFSET 8 + +#define FEC_T_CTRL_PINPER 0x00000080 +#define FEC_T_TF0_MASK 0x00000001 +#define FEC_T_TF0_OFFSET 0 +#define FEC_T_TF1_MASK 0x00000002 +#define FEC_T_TF1_OFFSET 1 +#define FEC_T_TF2_MASK 0x00000004 +#define FEC_T_TF2_OFFSET 2 +#define FEC_T_TF3_MASK 0x00000008 +#define FEC_T_TF3_OFFSET 3 +#define FEC_T_TDRE_MASK 0x00000001 +#define FEC_T_TDRE_OFFSET 0 +#define FEC_T_TMODE_MASK 0x0000003C +#define FEC_T_TMODE_OFFSET 2 +#define FEC_T_TIE_MASK 0x00000040 +#define FEC_T_TIE_OFFSET 6 +#define FEC_T_TF_MASK 0x00000080 +#define FEC_T_TF_OFFSET 7 + +#define FEC_ATIME_CTRL 0x400 +#define FEC_ATIME 0x404 +#define FEC_ATIME_EVT_OFFSET 0x408 +#define FEC_ATIME_EVT_PERIOD 0x40c +#define FEC_ATIME_CORR 0x410 +#define FEC_ATIME_INC 0x414 +#define FEC_TS_TIMESTAMP 0x418 + +#define FEC_TGSR 0x604 +#define FEC_TCSR(n) (0x608 + n * 0x08) +#define FEC_TCCR(n) (0x60C + n * 0x08) +#define MAX_TIMER_CHANNEL 3 +#define FEC_TMODE_TOGGLE 0x05 +#define FEC_HIGH_PULSE 0x0F + +#define FEC_CC_MULT (1 << 31) +#define FEC_COUNTER_PERIOD (1 << 31) +#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC +#define FEC_CHANNLE_0 0 +#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 + +/** + * fec_ptp_enable_pps + * @fep: the fec_enet_private structure handle + * @enable: enable the channel pps output + * + * This function enble the PPS ouput on the timer channel. + */ +static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) +{ + unsigned long flags; + u32 val, tempval; + struct timespec64 ts; + u64 ns; + val = 0; + + if (fep->pps_enable == enable) + return 0; + + fep->pps_channel = DEFAULT_PPS_CHANNEL; + fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + if (enable) { + /* clear capture or output compare interrupt status if have. + */ + writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* It is recommended to double check the TMODE field in the + * TCSR register to be cleared before the first compare counter + * is written into TCCR register. Just add a double check. + */ + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + do { + val &= ~(FEC_T_TMODE_MASK); + writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + } while (val & FEC_T_TMODE_MASK); + + /* Dummy read counter to update the counter */ + timecounter_read(&fep->tc); + /* We want to find the first compare event in the next + * second point. So we need to know what the ptp time + * is now and how many nanoseconds is ahead to get next second. + * The remaining nanosecond ahead before the next second would be + * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds + * to current timer would be next second. + */ + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + tempval = readl(fep->hwp + FEC_ATIME); + /* Convert the ptp local counter to 1588 timestamp */ + ns = timecounter_cyc2time(&fep->tc, tempval); + ts = ns_to_timespec64(ns); + + /* The tempval is less than 3 seconds, and so val is less than + * 4 seconds. No overflow for 32bit calculation. + */ + val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; + + /* Need to consider the situation that the current time is + * very close to the second point, which means NSEC_PER_SEC + * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer + * is still running when we calculate the first compare event, it is + * possible that the remaining nanoseonds run out before the compare + * counter is calculated and written into TCCR register. To avoid + * this possibility, we will set the compare event to be the next + * of next second. The current setting is 31-bit timer and wrap + * around over 2 seconds. So it is okay to set the next of next + * seond for the timer. + */ + val += NSEC_PER_SEC; + + /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current + * ptp counter, which maybe cause 32-bit wrap. Since the + * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. + * We can ensure the wrap will not cause issue. If the offset + * is bigger than fep->cc.mask would be a error. + */ + val &= fep->cc.mask; + writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); + + /* Calculate the second the compare event timestamp */ + fep->next_counter = (val + fep->reload_period) & fep->cc.mask; + + /* * Enable compare event when overflow */ + val = readl(fep->hwp + FEC_ATIME_CTRL); + val |= FEC_T_CTRL_PINPER; + writel(val, fep->hwp + FEC_ATIME_CTRL); + + /* Compare channel setting. */ + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); + val &= ~(1 << FEC_T_TDRE_OFFSET); + val &= ~(FEC_T_TMODE_MASK); + val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); + writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* Write the second compare event timestamp and calculate + * the third timestamp. Refer the TCCR register detail in the spec. + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + } else { + writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); + } + + fep->pps_enable = enable; + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_read - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static u64 fec_ptp_read(const struct cyclecounter *cc) +{ + struct fec_enet_private *fep = + container_of(cc, struct fec_enet_private, cc); + u32 tempval; + + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + + return readl(fep->hwp + FEC_ATIME); +} + +/** + * fec_ptp_start_cyclecounter - create the cycle counter from hw + * @ndev: network device + * + * this function initializes the timecounter and cyclecounter + * structures for use in generated a ns counter from the arbitrary + * fixed point cycles registers in the hardware. + */ +void fec_ptp_start_cyclecounter(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned long flags; + int inc; + + inc = 1000000000 / fep->cycle_speed; + + /* grab the ptp lock */ + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* 1ns counter */ + writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); + + /* use 31-bit timer counter */ + writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); + + writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, + fep->hwp + FEC_ATIME_CTRL); + + memset(&fep->cc, 0, sizeof(fep->cc)); + fep->cc.read = fec_ptp_read; + fep->cc.mask = CLOCKSOURCE_MASK(31); + fep->cc.shift = 31; + fep->cc.mult = FEC_CC_MULT; + + /* reset the ns time counter */ + timecounter_init(&fep->tc, &fep->cc, 0); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); +} + +/** + * fec_ptp_adjfreq - adjust ptp cycle frequency + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * Adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + * + * Because ENET hardware frequency adjust is complex, + * using software method to do that. + */ +static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + unsigned long flags; + int neg_adj = 0; + u32 i, tmp; + u32 corr_inc, corr_period; + u32 corr_ns; + u64 lhs, rhs; + + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + + if (ppb == 0) + return 0; + + if (ppb < 0) { + ppb = -ppb; + neg_adj = 1; + } + + /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; + * Try to find the corr_inc between 1 to fep->ptp_inc to + * meet adjustment requirement. + */ + lhs = NSEC_PER_SEC; + rhs = (u64)ppb * (u64)fep->ptp_inc; + for (i = 1; i <= fep->ptp_inc; i++) { + if (lhs >= rhs) { + corr_inc = i; + corr_period = div_u64(lhs, rhs); + break; + } + lhs += NSEC_PER_SEC; + } + /* Not found? Set it to high value - double speed + * correct in every clock step. + */ + if (i > fep->ptp_inc) { + corr_inc = fep->ptp_inc; + corr_period = 1; + } + + if (neg_adj) + corr_ns = fep->ptp_inc - corr_inc; + else + corr_ns = fep->ptp_inc + corr_inc; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; + tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; + writel(tmp, fep->hwp + FEC_ATIME_INC); + corr_period = corr_period > 1 ? corr_period - 1 : corr_period; + writel(corr_period, fep->hwp + FEC_ATIME_CORR); + /* dummy read to update the timer. */ + timecounter_read(&fep->tc); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + timecounter_adjtime(&fep->tc, delta); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_gettime + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct fec_enet_private *adapter = + container_of(ptp, struct fec_enet_private, ptp_caps); + u64 ns; + unsigned long flags; + + mutex_lock(&adapter->ptp_clk_mutex); + /* Check the ptp clock */ + if (!adapter->ptp_clk_on) { + mutex_unlock(&adapter->ptp_clk_mutex); + return -EINVAL; + } + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * fec_ptp_settime + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int fec_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + + u64 ns; + unsigned long flags; + u32 counter; + + mutex_lock(&fep->ptp_clk_mutex); + /* Check the ptp clock */ + if (!fep->ptp_clk_on) { + mutex_unlock(&fep->ptp_clk_mutex); + return -EINVAL; + } + + ns = timespec64_to_ns(ts); + /* Get the timer value based on timestamp. + * Update the counter with the masked value. + */ + counter = ns & fep->cc.mask; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + writel(counter, fep->hwp + FEC_ATIME); + timecounter_init(&fep->tc, &fep->cc, ns); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); + return 0; +} + +/** + * fec_ptp_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + */ +static int fec_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + int ret = 0; + + if (rq->type == PTP_CLK_REQ_PPS) { + ret = fec_ptp_enable_pps(fep, on); + + return ret; + } + return -EOPNOTSUPP; +} + +/** + * fec_ptp_disable_hwts - disable hardware time stamping + * @ndev: pointer to net_device + */ +void fec_ptp_disable_hwts(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + fep->hwts_tx_en = 0; + fep->hwts_rx_en = 0; +} + +int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + fep->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + fep->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + fep->hwts_rx_en = 0; + break; + + default: + fep->hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (fep->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* + * fec_time_keep - call timecounter_read every second to avoid timer overrun + * because ENET just support 32bit counter, will timeout in 4s + */ +static void fec_time_keep(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); + unsigned long flags; + + mutex_lock(&fep->ptp_clk_mutex); + if (fep->ptp_clk_on) { + spin_lock_irqsave(&fep->tmreg_lock, flags); + timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + } + mutex_unlock(&fep->ptp_clk_mutex); + + schedule_delayed_work(&fep->time_keep, HZ); +} + +/* This function checks the pps event and reloads the timer compare counter. */ +static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct fec_enet_private *fep = netdev_priv(ndev); + u32 val; + u8 channel = fep->pps_channel; + struct ptp_clock_event event; + + val = readl(fep->hwp + FEC_TCSR(channel)); + if (val & FEC_T_TF_MASK) { + /* Write the next next compare(not the next according the spec) + * value to the register + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); + do { + writel(val, fep->hwp + FEC_TCSR(channel)); + } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); + + /* Update the counter; */ + fep->next_counter = (fep->next_counter + fep->reload_period) & + fep->cc.mask; + + event.type = PTP_CLOCK_PPS; + ptp_clock_event(fep->ptp_clock, &event); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +/** + * fec_ptp_init + * @pdev: The FEC network adapter + * @irq_idx: the interrupt index + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ + +void fec_ptp_init(struct platform_device *pdev, int irq_idx) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + int irq; + int ret; + + fep->ptp_caps.owner = THIS_MODULE; + strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); + + fep->ptp_caps.max_adj = 250000000; + fep->ptp_caps.n_alarm = 0; + fep->ptp_caps.n_ext_ts = 0; + fep->ptp_caps.n_per_out = 0; + fep->ptp_caps.n_pins = 0; + fep->ptp_caps.pps = 1; + fep->ptp_caps.adjfreq = fec_ptp_adjfreq; + fep->ptp_caps.adjtime = fec_ptp_adjtime; + fep->ptp_caps.gettime64 = fec_ptp_gettime; + fep->ptp_caps.settime64 = fec_ptp_settime; + fep->ptp_caps.enable = fec_ptp_enable; + + fep->cycle_speed = clk_get_rate(fep->clk_ptp); + if (!fep->cycle_speed) { + fep->cycle_speed = NSEC_PER_SEC; + dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n"); + } + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; + + spin_lock_init(&fep->tmreg_lock); + + fec_ptp_start_cyclecounter(ndev); + + INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); + + irq = platform_get_irq_byname_optional(pdev, "pps"); + if (irq < 0) + irq = platform_get_irq_optional(pdev, irq_idx); + /* Failure to get an irq is not fatal, + * only the PTP_CLOCK_PPS clock events should stop + */ + if (irq >= 0) { + ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, + 0, pdev->name, ndev); + if (ret < 0) + dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", + ret); + } + + fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); + if (IS_ERR(fep->ptp_clock)) { + fep->ptp_clock = NULL; + dev_err(&pdev->dev, "ptp_clock_register failed\n"); + } + + schedule_delayed_work(&fep->time_keep, HZ); +} + +void fec_ptp_stop(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + cancel_delayed_work_sync(&fep->time_keep); + if (fep->ptp_clock) + ptp_clock_unregister(fep->ptp_clock); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile new file mode 100644 index 0000000..49cdf50 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile @@ -0,0 +1,13 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_IGB) += rt_igb.o + +rt_igb-y := \ + e1000_82575.o \ + e1000_i210.o \ + e1000_mac.o \ + e1000_mbx.o \ + e1000_nvm.o \ + e1000_phy.o \ + igb_hwmon.o \ + igb_main.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c new file mode 100644 index 0000000..fff9e85 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c @@ -0,0 +1,2891 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2015 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* e1000_82575 + * e1000_82576 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/i2c.h> + +#include "e1000_mac.h" +#include "e1000_82575.h" +#include "e1000_i210.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); +static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * igb_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } else { + ret_val = igb_check_for_link_82575(hw); + } + + return 0; +} + +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_gs40g; + phy->ops.write_reg = igb_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id != M88E1111_I_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + igb_check_for_link_media_swap; + } + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i354: + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + break; + } + + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + switch (mac->type) { + case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; + case e1000_i350: + case e1000_i354: + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + if (mac->type == e1000_82580) { + switch (hw->device_id) { + /* feature not supported on these id's */ + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + break; + default: + hw->dev_spec._82575.mas_capable = true; + break; + } + } + return 0; +} + +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + return -E1000_ERR_MAC_INIT; + } + + /* Set media type */ + /* The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* for I2C based SGMII: */ + fallthrough; + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; + default: + break; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* Extra read required for some PHY's on i354 */ + if (hw->mac.type == e1000_i354) + igb_get_phy_id(hw); + + /* For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + /* This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return 0; +} + +/** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs, status; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } + } + + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + igb_clear_vfta_i350(hw); + else + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = igb_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = igb_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = 0; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present and turn on I2C */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + fallthrough; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = rd32(E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + wr32(E1000_PCS_ANADV, anadv_reg); + + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(reg_offset, reg_val); +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = rd32(E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_DTXSWC, dtxswc); + break; + case e1000_i354: + case e1000_i350: + dtxswc = rd32(E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + hw->dev_spec._82575.global_device_reset = false; + + /* due to hw errata, global device reset doesn't always + * work on 82580 + */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 11000); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + usleep_range(5000, 6000); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. + */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * __igb_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * igb_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __igb_access_emi_reg(hw, addr, data, true); +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = rd32(E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); +out: + + return 0; +} + +/** + * igb_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + if ((hw->phy.media_type != e1000_media_type_copper) || + (phy->id != M88E1543_E_PHY_ID)) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * igb_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + (phy->id != M88E1543_E_PHY_ID)) + goto out; + + ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +#ifdef CONFIG_IGB_HWMON +/** + * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return 0; +} + +/** + * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return 0; +} + +#endif +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_link_up_info_82575, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif +}; + +static struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h new file mode 100644 index 0000000..db4e9f4 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h @@ -0,0 +1,280 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Head, buf len */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_1588 (1 << 30) + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h new file mode 100644 index 0000000..1002cbc --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h @@ -0,0 +1,1018 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* Transmit Config Word */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_VERSION 0x0005 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 +#define M88E1543_E_PHY_ID 0x01410EA0 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* DMA Coalescing register fields */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h new file mode 100644 index 0000000..7bb117d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h @@ -0,0 +1,570 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <rtnet_port.h> + +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct rtnet_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + rtdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_HW_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c new file mode 100644 index 0000000..65d9316 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c @@ -0,0 +1,902 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* e1000_i210 + * e1000_i211 + */ + +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "e1000_hw.h" +#include "e1000_i210.h" + +static s32 igb_update_flash_i210(struct e1000_hw *hw); + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + igb_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw)) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x\n", + address, *data); + status = 0; + break; + } + } + } + if (status) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @words: number of words to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 words __always_unused, u16 *data) +{ + s32 ret_val = 0; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_INIT_CTRL_4: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_1_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_0_2_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = ID_LED_RESERVED_FFFF; + ret_val = 0; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) { + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = 0; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = 0; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + } + + if (!status) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = 0; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = 0; + break; + } + } + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = 0; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (!(hw->nvm.ops.acquire(hw))) { + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (!(hw->nvm.ops.acquire(hw))) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +static s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val) + hw_dbg("Flash update complete\n"); + else + hw_dbg("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __igb_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igb_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igb_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return ret_val; +} + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h new file mode 100644 index 0000000..3442b63 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h @@ -0,0 +1,93 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for i211 device */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c new file mode 100644 index 0000000..500c928 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c @@ -0,0 +1,1607 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include <linux/if_ether.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "e1000_mac.h" + +#include "igb.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static s32 igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + array_wr32(E1000_VFTA, offset, 0); + wrfl(); + } +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + array_wr32(E1000_VFTA, offset, value); + wrfl(); +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + array_wr32(E1000_VFTA, offset, 0); + + wrfl(); + } +} + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + for (i = 0; i < 10; i++) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vid: VLAN id to add or remove + * @add: if true add filter, if false remove + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +{ + u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; + u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + u32 vfta; + struct igb_adapter *adapter = hw->back; + s32 ret_val = 0; + + vfta = adapter->shadow_vfta[index]; + + /* bit was set/cleared before we started */ + if ((!!(vfta & mask)) == add) { + ret_val = -E1000_ERR_CONFIG; + } else { + if (add) + vfta |= mask; + else + vfta &= ~mask; + } + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + igb_write_vfta_i350(hw, index, vfta); + else + igb_write_vfta(hw, index, vfta); + adapter->shadow_vfta[index] = vfta; + + return ret_val; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + ret_val = igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static s32 igb_set_fc_watermarks(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); + + return ret_val; +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) { + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + + lan_offset, 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h new file mode 100644 index 0000000..b50d57c --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h @@ -0,0 +1,88 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw.h" + +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_defines.h" +#include "e1000_i210.h" + +/* Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +void igb_clear_vfta_i350(struct e1000_hw *hw); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c new file mode 100644 index 0000000..162cc49 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c @@ -0,0 +1,443 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000_mbx.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & (1 << vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = 0; + + return ret_val; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h new file mode 100644 index 0000000..d20af6b --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h @@ -0,0 +1,73 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 igb_check_for_msg(struct e1000_hw *, u16); +s32 igb_check_for_ack(struct e1000_hw *, u16); +s32 igb_check_for_rst(struct e1000_hw *, u16); +s32 igb_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c new file mode 100644 index 0000000..a8d0207 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c @@ -0,0 +1,803 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include <linux/if_ether.h> +#include <linux/delay.h> + +#include "e1000_mac.h" +#include "e1000_nvm.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + usleep_range(1000, 2000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output structure + * + * unsupported MAC types will return all 0 version structure + **/ +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + igb_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + fallthrough; + case e1000_i350: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h new file mode 100644 index 0000000..cb675be --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h @@ -0,0 +1,57 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c new file mode 100644 index 0000000..8d74089 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c @@ -0,0 +1,2513 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include <linux/if_ether.h> +#include <linux/delay.h> + +#include "e1000_mac.h" +#include "e1000_phy.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + hw_dbg("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) + return 0; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + fallthrough; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) + hw_dbg("Link taking longer than expected.\n"); + else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to write to + * upper half is page to use. + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igb_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + fallthrough; + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h new file mode 100644 index 0000000..6e7ac2f --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h @@ -0,0 +1,175 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 +#define GS40G_LINE_LB 0x4000 + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h new file mode 100644 index 0000000..0d1d140 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h @@ -0,0 +1,427 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) \ + (readl(hw->hw_addr + reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h new file mode 100644 index 0000000..4c65e31 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h @@ -0,0 +1,559 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac.h" +#include "e1000_82575.h" + +#include <linux/bitops.h> +#include <linux/if_vlan.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> +#include <linux/pci.h> +#include <linux/mdio.h> + +#include <rtdev.h> + +#undef CONFIG_IGB_HWMON + +struct igb_adapter; + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 vlans_enabled; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct rtskb *skb; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct rtskb *skb; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct rtnet_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device pointer for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + }; + /* RX */ + struct { + struct igb_rx_queue_stats rx_stats; + u16 rx_buffer_len; + }; + }; +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG +}; + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; + unsigned int n_hwmon; + }; +#endif + +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#define IGB_RETA_SIZE 128 + +/* board specific private data structure */ +struct igb_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct rtnet_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; + rtdm_irq_t msix_irq_handle[MAX_MSIX_ENTRIES]; + rtdm_irq_t irq_handle; + rtdm_nrtsig_t watchdog_nrtsig; + spinlock_t stats64_lock; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[16]; + + u32 max_frame_size; + u32 min_frame_size; + + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* OS defined structs */ + struct pci_dev *pdev; + + struct net_device_stats net_stats; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u16 tx_ring_count; + u16 rx_ring_count; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + + unsigned long last_rx_timestamp; + + char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff *igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; +}; + +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_DCA_ENABLED (1 << 1) +#define IGB_FLAG_QUAD_PORT_A (1 << 2) +#define IGB_FLAG_QUEUE_PAIRS (1 << 3) +#define IGB_FLAG_DMAC (1 << 4) +#define IGB_FLAG_PTP (1 << 5) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) +#define IGB_FLAG_WOL_SUPPORTED (1 << 8) +#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) +#define IGB_FLAG_MEDIA_RESET (1 << 10) +#define IGB_FLAG_MAS_CAPABLE (1 << 11) +#define IGB_FLAG_MAS_ENABLE (1 << 12) +#define IGB_FLAG_HAS_MSIX (1 << 13) +#define IGB_FLAG_EEE (1 << 14) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +#define IGB_TS_HDR_LEN 16 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; +extern char igb_driver_version[]; + +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +netdev_tx_t igb_xmit_frame_ring(struct rtskb *, struct igb_ring *); +void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct rtnet_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct rtskb *skb); +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, + struct rtskb *skb); +int igb_ptp_set_ts_config(struct rtnet_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct rtnet_device *netdev, struct ifreq *ifr); +#ifdef CONFIG_IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#endif +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline struct rtnet_device *txring_txq(const struct igb_ring *tx_ring) +{ + return tx_ring->netdev; +} + +#endif /* _IGB_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c new file mode 100644 index 0000000..44b6a68 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c @@ -0,0 +1,249 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/sysfs.h> +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/hwmon.h> +#include <linux/pci.h> + +#ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff->n_hwmon; + igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset + 1); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset + 1); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset + 1); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + + adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; + + ++adapter->igb_hwmon_buff->n_hwmon; + + return 0; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon; + struct i2c_client *client; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), + GFP_KERNEL); + if (!igb_hwmon) { + rc = -ENOMEM; + goto exit; + } + adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + /* init i2c_client */ + client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); + if (client == NULL) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device.\n"); + rc = -ENODEV; + goto exit; + } + adapter->i2c_client = client; + + igb_hwmon->groups[0] = &igb_hwmon->group; + igb_hwmon->group.attrs = igb_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + client->name, + igb_hwmon, + igb_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + rc = PTR_ERR(hwmon_dev); + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c new file mode 100644 index 0000000..d47c0bb --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c @@ -0,0 +1,5676 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2015 Intel Corporation. + * RTnet port 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com> + * Copyright(c) 2015 Gilles Chanteperdrix <gch@xenomai.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/bitops.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/netdevice.h> +#include <linux/ipv6.h> +#include <linux/slab.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/net_tstamp.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/if.h> +#include <linux/if_vlan.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/if_ether.h> +#include <linux/aer.h> +#include <linux/prefetch.h> +#include <linux/pm_runtime.h> +#include <linux/i2c.h> +#include "igb.h" + +#include <rtnet_port.h> + +// RTNET redefines +#ifdef NETIF_F_TSO +#undef NETIF_F_TSO +#define NETIF_F_TSO 0 +#endif + +#ifdef NETIF_F_TSO6 +#undef NETIF_F_TSO6 +#define NETIF_F_TSO6 0 +#endif + +#ifdef NETIF_F_HW_VLAN_TX +#undef NETIF_F_HW_VLAN_TX +#define NETIF_F_HW_VLAN_TX 0 +#endif + +#ifdef NETIF_F_HW_VLAN_RX +#undef NETIF_F_HW_VLAN_RX +#define NETIF_F_HW_VLAN_RX 0 +#endif + +#ifdef NETIF_F_HW_VLAN_FILTER +#undef NETIF_F_HW_VLAN_FILTER +#define NETIF_F_HW_VLAN_FILTER 0 +#endif + +#ifdef IGB_MAX_TX_QUEUES +#undef IGB_MAX_TX_QUEUES +#define IGB_MAX_TX_QUEUES 1 +#endif + +#ifdef IGB_MAX_RX_QUEUES +#undef IGB_MAX_RX_QUEUES +#define IGB_MAX_RX_QUEUES 1 +#endif + +#ifdef CONFIG_IGB_NAPI +#undef CONFIG_IGB_NAPI +#endif + +#ifdef IGB_HAVE_TX_TIMEOUT +#undef IGB_HAVE_TX_TIMEOUT +#endif + +#ifdef ETHTOOL_GPERMADDR +#undef ETHTOOL_GPERMADDR +#endif + +#ifdef CONFIG_PM +#undef CONFIG_PM +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#ifdef MAX_SKB_FRAGS +#undef MAX_SKB_FRAGS +#define MAX_SKB_FRAGS 1 +#endif + +#ifdef IGB_FRAMES_SUPPORT +#undef IGB_FRAMES_SUPPORT +#endif + +#define MAJ 5 +#define MIN 2 +#define BUILD 18 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +__stringify(BUILD) "-k" +char igb_driver_name[] = "rt_igb"; +char igb_driver_version[] = DRV_VERSION; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; +static const char igb_copyright[] = + "Copyright (c) 2007-2014 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +#define MAX_UNITS 8 +static int InterruptThrottle = 0; +module_param(InterruptThrottle, uint, 0); +MODULE_PARM_DESC(InterruptThrottle, "Throttle interrupts (boolean, false by default)"); + +static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void igb_remove(struct pci_dev *pdev); +static int igb_sw_init(struct igb_adapter *); +static int igb_open(struct rtnet_device *); +static int igb_close(struct rtnet_device *); +static void igb_configure(struct igb_adapter *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct rtnet_device *); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); +#else +static void igb_update_phy_info(unsigned long); +static void igb_watchdog(unsigned long); +#endif +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame(struct rtskb *skb, struct rtnet_device *); +static struct net_device_stats *igb_get_stats(struct rtnet_device *); +static int igb_intr(rtdm_irq_t *irq_handle); +static int igb_intr_msi(rtdm_irq_t *irq_handle); +static void igb_nrtsig_watchdog(rtdm_nrtsig_t *sig, void *data); +static irqreturn_t igb_msix_other(int irq, void *); +static int igb_msix_ring(rtdm_irq_t *irq_handle); +static void igb_poll(struct igb_q_vector *); +static bool igb_clean_tx_irq(struct igb_q_vector *); +static bool igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_ioctl(struct rtnet_device *, struct ifreq *ifr, int cmd); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct rtnet_device *netdev, + netdev_features_t features); +static int igb_vlan_rx_add_vid(struct rtnet_device *, __be16, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int igb_suspend(struct device *); +#endif +static int igb_resume(struct device *); +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; +#endif +static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void igb_netpoll(struct rtnet_device *); +#endif + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static const struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, +#ifdef CONFIG_PM + .driver.pm = &igb_pm_ops, +#endif + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int local_debug = -1; +module_param_named(debug, local_debug, int, 0); +MODULE_PARM_DESC(debug, "debug level (0=none,...,16=all)"); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igb_regdump - register printout routine */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igb_dump - Print registers, Tx-rings and Rx-rings */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + u16 i, n; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name\n"); + pr_info("%s\n", netdev->name); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !rtnetif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igb_tx_buffer *buffer_info; + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] " + "[bi->dma ] leng ntw timestamp " + "bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igb_tx_buffer *buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX" + " %p %016llX %p%s\n", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + 14, + true); + } + } + + /* Print RX Rings Summary */ + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] " + "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----" + "----------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igb_rx_buffer *buffer_info; + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGB_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + } + } + } + +exit: + return; +} + +/** + * igb_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + **/ +struct rtnet_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", + igb_driver_string, igb_driver_version); + pr_info("%s\n", igb_copyright); + + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = 0; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + fallthrough; + case e1000_82575: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + fallthrough; + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct rtnet_device *netdev = igb->netdev; + hw->hw_addr = NULL; + rtnetif_device_detach(netdev); + rtdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + array_wr32(E1000_IVAR0, index, ivar); +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + * bitmask for the EICR/EIMS/EIMC registers. To assign one + * or more queues to a vector, we write the appropriate bits + * into the MSIXBM register for that vector. + */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); + q_vector->eims_value = 1 << msix_vector; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = 1 << msix_vector; + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * @adapter: board private structure to initialize + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = 1 << vector; + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure to initialize + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int i, err = 0, vector = 0, free_vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = rtdm_irq_request(&adapter->msix_irq_handle[vector], + adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, q_vector); + if (err) + goto err_free; + } + + igb_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) + rtdm_irq_free(&adapter->msix_irq_handle[free_vector++]); +err_out: + return err; +} + +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); + igb_free_q_vector(adapter, v_idx); + } +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure to initialize + * + * This function resets the device so that it has 0 Rx queues, Tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) +{ + int err; + int numvecs, i; + + if (!msix) + goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igb_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct igb_q_vector *q_vector; + struct igb_ring *ring; + int ring_count, size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct igb_q_vector) + + (sizeof(struct igb_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + else + memset(q_vector, 0, size); + if (!q_vector) + return -ENOMEM; + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + /* On i350, i354, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter, msix); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igb_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * @adapter: board private structure to initialize + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + rt_stack_connect(netdev, &STACK_manager); + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); + } + + igb_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGB_FLAG_HAS_MSI) { + err = rtdm_irq_request(&adapter->irq_handle, + pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + err = rtdm_irq_request(&adapter->irq_handle, + pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + rtdm_irq_free(&adapter->msix_irq_handle[vector++]); + } else { + rtdm_irq_free(&adapter->irq_handle); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 regval = rd32(E1000_EIAM); + + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + + msleep(10); +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; + u32 regval = rd32(E1000_EIAC); + + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, old_vid, false); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + igb_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); + + igb_setup_link(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if (!(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + rtdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + rtdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + rtdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_ICR); + igb_irq_enable(adapter); + + rtnetif_start_queue(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + rtnetif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 11000); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + rtnetif_carrier_off(netdev); + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static void igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw = rd32(E1000_CONNSW); + + /* configure for SerDes media detect */ + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_SERDESD))) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } +} + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_i354: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + case e1000_i210: + case e1000_i211: + default: + pba = E1000_PBA_34K; + break; + } + + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && + (mac->type < e1000_82576)) { + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = rd32(E1000_PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(union e1000_adv_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + wr32(E1000_PBA, pba); + } + + /* flow control settings */ + /* The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - 2 * adapter->max_frame_size)); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if ((mac->type == e1000_82575) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_enable_mas(adapter); + } + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + /* Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } + if (!rtnetif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + igb_get_phy_info(hw); +} + + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + fallthrough; + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + break; + } +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + rtdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + rtdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + rtdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + rtdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + rtdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } +} + +static dma_addr_t igb_map_rtskb(struct rtnet_device *netdev, + struct rtskb *skb) +{ + struct igb_adapter *adapter = netdev->priv; + struct device *dev = &adapter->pdev->dev; + dma_addr_t addr; + + addr = dma_map_single(dev, skb->buf_start, RTSKB_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, addr)) { + dev_err(dev, "DMA map failed\n"); + return RTSKB_UNMAPPED; + } + return addr; +} + +static void igb_unmap_rtskb(struct rtnet_device *netdev, + struct rtskb *skb) +{ + struct igb_adapter *adapter = netdev->priv; + struct device *dev = &adapter->pdev->dev; + + dma_unmap_single(dev, skb->buf_dma_addr, RTSKB_SIZE, + DMA_BIDIRECTIONAL); +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtnet_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + int err, pci_using_dac; + u8 part_str[E1000_PBANUM_LENGTH]; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), + igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = rt_alloc_etherdev(sizeof(*adapter), + 2 * IGB_DEFAULT_RXD + IGB_DEFAULT_TXD); + if (!netdev) + goto err_alloc_etherdev; + + rtdev_alloc_name(netdev, "rteth%d"); + rt_rtdev_connect(netdev, &RTDEV_manager); + + netdev->vers = RTDEV_VERS_2_0; + netdev->sysbind = &pdev->dev; + + pci_set_drvdata(pdev, netdev); + adapter = rtnetdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_iomap(pdev, 0, 0); + if (!hw->hw_addr) + goto err_ioremap; + + netdev->open = igb_open; + netdev->stop = igb_close; + netdev->hard_start_xmit = igb_xmit_frame; + netdev->get_stats = igb_get_stats; + netdev->map_rtskb = igb_map_rtskb; + netdev->unmap_rtskb = igb_unmap_rtskb; + netdev->do_ioctl = igb_ioctl; +#if 0 + netdev->set_multicast_list = igb_set_multi; + netdev->set_mac_address = igb_set_mac; + netdev->change_mtu = igb_change_mtu; + + // No ethtool support for now + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; +#endif + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + +#if 0 + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum + */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + break; + } + + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) + timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); +#else /* < 4.14 */ + setup_timer(&adapter->watchdog_timer, igb_watchdog, + (unsigned long) adapter); + setup_timer(&adapter->phy_info_timer, igb_update_phy_info, + (unsigned long) adapter); +#endif /* < 4.14 */ + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + rtdm_nrtsig_init(&adapter->watchdog_nrtsig, + igb_nrtsig_watchdog, adapter); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + } + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + strcpy(netdev->name, "rteth%d"); + err = rt_register_rtnetdev(netdev); + if (err) + goto err_release_hw_control; + + /* carrier off reporting is important to ethtool even BEFORE open */ + rtnetif_carrier_off(netdev); + +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info, not applicable to i354 */ + if (hw->mac.type != e1000_i354) { + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? + "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? + "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? + "Width x1" : "unknown"), netdev->dev_addr); + } + + if ((hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: + if ((rd32(E1000_CTRL_EXT) & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; + } + } + pm_runtime_put_noidle(&pdev->dev); + return 0; + +err_release_hw_control: + igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + igb_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, hw->hw_addr); +err_ioremap: + rtdev_free(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + **/ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igb_remove(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + rtdev_down(netdev); + igb_down(adapter); + + pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); + /* The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + rt_rtdev_disconnect(netdev); + rt_unregister_rtnetdev(netdev); + + igb_clear_interrupt_scheme(adapter); + + pci_iounmap(pdev, hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(adapter->shadow_vfta); + rtdev_free(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void igb_probe_vfs(struct igb_adapter *adapter) +{ +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 max_rss_queues; + + max_rss_queues = 1; + adapter->rss_queues = max_rss_queues; + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_i211: + /* Device supports enough interrupts without queue pairing. */ + break; + case e1000_82576: + /* If VFs are going to be allocated with RSS queues then we + * should pair the queues in order to conserve interrupts due + * to limited supply. + */ + fallthrough; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + default: + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + break; + } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct rtnet_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + if (InterruptThrottle) { + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + } else { + adapter->rx_itr_setting = IGB_MIN_ITR_USECS; + adapter->tx_itr_setting = IGB_MIN_ITR_USECS; + } + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); + + igb_init_queue_configuration(adapter); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_ATOMIC); + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_probe_vfs(adapter); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type >= e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int __igb_open(struct rtnet_device *netdev, bool resuming) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + rtnetif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + /* Clear any pending interrupts. */ + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + rtnetif_start_queue(netdev); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return 0; + +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +static int igb_open(struct rtnet_device *netdev) +{ + return __igb_open(netdev, false); +} + +/** + * igb_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int __igb_close(struct rtnet_device *netdev, bool suspending) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); + igb_free_irq(adapter); + + rt_stack_disconnect(netdev); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; +} + +static int igb_close(struct rtnet_device *netdev) +{ + return __igb_close(netdev, false); +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl = 0; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + /* disable the queue */ + wr32(E1000_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; + u32 rss_key[10]; + + get_random_bytes(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + break; + default: + break; + } + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ + if (hw->mac.type != e1000_i211) + mrqc |= E1000_MRQC_ENABLE_RSS_4Q; + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to prevent packets larger than max_frame_size */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + wr32(E1000_RCTL, rctl); +} + +/** + * igb_rlpml_set - set maximum receive packet size + * @adapter: board private structure + * + * Configure maximum receivable packet size. + **/ +static void igb_rlpml_set(struct igb_adapter *adapter) +{ + u32 max_frame_size = adapter->max_frame_size; + struct e1000_hw *hw = &adapter->hw; + + wr32(E1000_RLPML, max_frame_size); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + if (hw->mac.type == e1000_i350) { + u32 dvmolr; + + dvmolr = rd32(E1000_DVMOLR(vfn)); + dvmolr |= E1000_DVMOLR_STRVLAN; + wr32(E1000_DVMOLR(vfn), dvmolr); + } + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + + ring->rx_buffer_len = max_t(u32, adapter->max_frame_size, + MAXIMUM_ETHERNET_VLAN_SIZE); + + /* disable the queue */ + wr32(E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* set descriptor configuration */ + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if we are supporting multiple queues */ + if (adapter->num_rx_queues > 1) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 0); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igb_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +void igb_unmap_and_free_tx_resource(struct igb_ring *ring, + struct igb_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + kfree_rtskb(tx_buffer->skb); + tx_buffer->skb = NULL; + } + tx_buffer->next_to_watch = NULL; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *buffer_info; + unsigned long size; + u16 i; + + if (!tx_ring->tx_buffer_info) + return; + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + } + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + unsigned long size; + u16 i; + + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + if (buffer_info->dma) + buffer_info->dma = 0; + + if (buffer_info->skb) { + kfree_rtskb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct rtnet_device *netdev) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +#if 0 + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +#else + igb_update_mc_addr_list(hw, NULL, 0); + return 0; +#endif +} + +/** + * igb_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_uc_addr_list(struct rtnet_device *netdev) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = 0; + unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + int count = 0; + + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) { + wr32(E1000_RAH(rar_entries), 0); + wr32(E1000_RAL(rar_entries), 0); + } + wrfl(); + + return count; +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct rtnet_device *netdev) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = 0; + u32 rctl, vmolr = 0; + int count; + + /* Check for Promiscuous and All Multicast modes */ + rctl = rd32(E1000_RCTL); + + /* clear the effected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + rctl |= E1000_RCTL_VFE; + } + wr32(E1000_RCTL, rctl); + + /* In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) + return; + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + wr32(E1000_VMOLR(vfn), vmolr); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + wvbr = rd32(E1000_WVBR); + if (!wvbr) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) +static void igb_update_phy_info(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); +#else /* < 4.14 */ +static void igb_update_phy_info(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; +#endif /* < 4.14 */ + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (!hw->mac.get_link_status) + return true; + fallthrough; + case e1000_media_type_internal_serdes: + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case e1000_media_type_unknown: + break; + } + + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!rtnetif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350 copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) + ret = !!(thstat & event); + } + + return ret; +} + +/** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + rtdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** + * igb_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0) +static void igb_watchdog(struct timer_list *t) +{ + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); +#else /* < 4.14 */ +static void igb_watchdog(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; +#endif /* < 4.14 */ + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_info *phy = &hw->phy; + struct rtnet_device *netdev = adapter->netdev; + u32 link; + int i; + u32 connsw; + + link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(adapter->pdev->dev.parent); + + if (!rtnetif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + rtdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igb_check_downshift(hw); + if (phy->speed_downgraded) + rtdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) + rtdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + rtnetif_carrier_on(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (rtnetif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + rtdev_err(netdev, "The network adapter was stopped because it overheated\n"); + } + + /* Links status message must follow this format */ + rtdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + rtnetif_carrier_off(netdev); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(adapter->pdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!rtnetif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!rtnetif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; + + if (!InterruptThrottle) + return; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = IGB_4K_ITR; + goto set_itr_val; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + **/ +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igb_set_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + if (!InterruptThrottle) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; + } + + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + + +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct rtskb *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + + return cmd_type; +} + +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; + + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct rtnet_device *netdev = tx_ring->netdev; + + rtnetif_stop_queue(netdev); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + rtnetif_wake_queue(netdev); + + tx_ring->tx_stats.restart_queue2++; + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +static void igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) +{ + struct rtskb *skb = first->skb; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + dma_addr_t dma; + unsigned int size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + + /* first descriptor is also last, set RS and EOP bits */ + cmd_type |= IGB_TXD_DCMD; + tx_desc = IGB_TX_DESC(tx_ring, i); + + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb->len; + + dma = rtskb_data_dma_addr(skb, 0); + + tx_buffer = first; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + /* set the timestamp */ + first->time_stamp = jiffies; + first->next_to_watch = tx_desc; + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + if (skb->xmit_stamp) + *skb->xmit_stamp = + cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + /* set next_to_watch value indicating a packet is present */ + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + + return; +} + +netdev_tx_t igb_xmit_frame_ring(struct rtskb *skb, + struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *first; + u32 tx_flags = 0; + u16 count = 2; + u8 hdr_len = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IGB_TX_FLAGS_IPV4; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = skb->protocol; + + igb_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; +} + +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct rtskb *skb) +{ + return adapter->tx_ring[0]; +} + +static netdev_tx_t igb_xmit_frame(struct rtskb *skb, + struct rtnet_device *netdev) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + + if (test_bit(__IGB_DOWN, &adapter->state)) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + kfree_rtskb(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + skb = rtskb_padto(skb, 17); + if (!skb) + return NETDEV_TX_OK; + } + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + igb_dump(adapter); + rtdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); +} + +/** + * igb_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats * +igb_get_stats(struct rtnet_device *netdev) +{ + struct igb_adapter *adapter = netdev->priv; + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void igb_update_stats(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct net_device_stats *net_stats; + u32 reg, mpc; + int i; + u64 bytes, packets; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + net_stats = &adapter->net_stats; + bytes = 0; + packets = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + bytes += ring->rx_stats.bytes; + packets += ring->rx_stats.packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + bytes += ring->tx_stats.bytes; + packets += ring->tx_stats.packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static void igb_nrtsig_watchdog(rtdm_nrtsig_t *sig, void *data) +{ + struct igb_adapter *adapter = data; + mod_timer(&adapter->watchdog_timer, jiffies + 1); +} + +static void igb_other_handler(struct igb_adapter *adapter, u32 icr, bool root) +{ + struct e1000_hw *hw = &adapter->hw; + + if (icr & E1000_ICR_DRSTA) + rtdm_schedule_nrt_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. + */ + igb_check_wvbr(adapter); + } + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (root) + mod_timer(&adapter->watchdog_timer, + jiffies + 1); + else + rtdm_nrtsig_pend(&adapter->watchdog_nrtsig); + } + } +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + igb_other_handler(adapter, icr, true); + + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = (q_vector->itr_val + 0x3) & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= E1000_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static int igb_msix_ring(rtdm_irq_t *ih) +{ + struct igb_q_vector *q_vector = + rtdm_irq_get_arg(ih, struct igb_q_vector); + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + igb_poll(q_vector); + + return RTDM_IRQ_HANDLED; +} + + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static int igb_intr_msi(rtdm_irq_t *ih) +{ + struct igb_adapter *adapter = + rtdm_irq_get_arg(ih, struct igb_adapter); + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + igb_other_handler(adapter, icr, false); + + igb_poll(q_vector); + + return RTDM_IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static int igb_intr(rtdm_irq_t *ih) +{ + struct igb_adapter *adapter = + rtdm_irq_get_arg(ih, struct igb_adapter); + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igb_write_itr(q_vector); + + igb_other_handler(adapter, icr, false); + + igb_poll(q_vector); + + return RTDM_IRQ_HANDLED; +} + +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if (adapter->num_q_vectors == 1) + igb_set_itr(q_vector); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static void igb_poll(struct igb_q_vector *q_vector) +{ + if (q_vector->tx.ring) + igb_clean_tx_irq(q_vector); + + if (q_vector->rx.ring) + igb_clean_rx_irq(q_vector, 64); + + igb_ring_irq_enable(q_vector); +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + smp_rmb(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + kfree_rtskb(tx_buffer->skb); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + rtnetif_stop_queue(tx_ring->netdev); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + rtnetif_carrier_ok(tx_ring->netdev) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (rtnetif_queue_stopped(tx_ring->netdev) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + rtnetif_wake_queue(tx_ring->netdev); + + tx_ring->tx_stats.restart_queue++; + } + } + + return !!budget; +} + +static struct rtskb *igb_fetch_rx_buffer(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + struct igb_rx_buffer *rx_buffer; + struct rtskb *skb; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + skb = rx_buffer->skb; + prefetchw(skb->data); + + /* pull the header of the skb in */ + rtskb_put(skb, le16_to_cpu(rx_desc->wb.upper.length)); + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + + return skb; +} + +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct rtskb *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + ring->rx_stats.csum_err++; + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct rtskb *skb) +{ + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct rtnet_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + kfree_rtskb(skb); + return true; + } + } + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct rtskb *skb) +{ + igb_rx_checksum(rx_ring, rx_desc, skb); + + skb->protocol = rt_eth_type_trans(skb, rx_ring->netdev); +} + +static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +{ + struct igb_ring *rx_ring = q_vector->rx.ring; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtskb *skb; + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!rx_desc->wb.upper.status_error) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + rmb(); + + /* retrieve a buffer from the ring */ + skb = igb_fetch_rx_buffer(rx_ring, rx_desc); + skb->time_stamp = time_stamp; + + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) { + kfree_rtskb(skb); + continue; + } + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + rtnetif_rx(skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + + if (total_packets) + rt_mark_stack_mgr(q_vector->adapter->netdev); + + return total_packets < budget; +} + +static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct igb_adapter *adapter = rx_ring->q_vector->adapter; + struct rtskb *skb = bi->skb; + dma_addr_t dma = bi->dma; + + if (dma) + return true; + + if (likely(!skb)) { + skb = rtnetdev_alloc_rtskb(adapter->netdev, + rx_ring->rx_buffer_len + NET_IP_ALIGN); + if (!skb) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + rtskb_reserve(skb, NET_IP_ALIGN); + skb->rtdev = adapter->netdev; + + bi->skb = skb; + bi->dma = rtskb_data_dma_addr(skb, 0); + } + + return true; +} + +/** + * igb_alloc_rx_buffers - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!igb_alloc_mapped_skb(rx_ring, bi)) + break; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int igb_mii_ioctl(struct rtnet_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int igb_ioctl(struct rtnet_device *netdev, struct ifreq *ifr, int cmd) +{ + if (rtdm_in_rt_context()) + return -ENOSYS; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + + default: + return -EOPNOTSUPP; + } +} + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_read_word(adapter->pdev, reg, value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_write_word(adapter->pdev, reg, *value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +static void igb_vlan_mode(struct rtnet_device *netdev, netdev_features_t features) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + igb_rlpml_set(adapter); +} + +static int igb_vlan_rx_add_vid(struct rtnet_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + igb_vfta_set(hw, vid, true); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + rtnetif_device_detach(netdev); + + if (rtnetif_running(netdev)) + __igb_close(netdev, true); + + igb_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + *enable_wake = wufc || adapter->en_mng_pt; + if (!*enable_wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int igb_suspend(struct device *dev) +{ + int retval; + bool wake; + struct pci_dev *pdev = to_pci_dev(dev); + + retval = __igb_shutdown(pdev, &wake, 0); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static int igb_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + wr32(E1000_WUS, ~0); + + if (netdev->flags & IFF_UP) { + rtnl_lock(); + err = __igb_open(netdev, true); + rtnl_unlock(); + if (err) + return err; + } + + rtnetif_device_attach(netdev); + return 0; +} + +static int igb_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int igb_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __igb_shutdown(pdev, &wake, 1); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int igb_runtime_resume(struct device *dev) +{ + return igb_resume(dev); +} +#endif /* CONFIG_PM */ + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + return 0; +} + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + + rtnetif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (rtnetif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igb_resume routine. + **/ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_aer_clear_nonfatal_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_aer_clear_nonfatal_status failed 0x%0x\n", + err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct rtnet_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = rtnetdev_priv(netdev); + + if (rtnetif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + rtnetif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); +} + +static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, + u8 qsel) +{ + u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * qsel; + else + rar_high |= E1000_RAH_POOL_1 << qsel; + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + + /* force threshold to 0. */ + wr32(E1000_DMCTXTH, 0); + + /* DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * pba - adapter->max_frame_size / 16; + if (hwm < 64 * (pba - 6)) + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - adapter->max_frame_size / 512; + if (dmac_thr < pba - 10) + dmac_thr = pba - 10; + reg = rd32(E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable + * coalescing(smart fifb)-UTRESH=0 + */ + wr32(E1000_DMCRTRH, 0); + + reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); + + wr32(E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from + * DMA coal + */ + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + + /* make low power state decision controlled + * by DMA coal + */ + reg = rd32(E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); + wr32(E1000_DMACR, 0); + } +} + +/** + * igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return 0; + } +} + +/** + * igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return 0; + +} + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct rtnet_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (rtnetif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (rtnetif_running(netdev)) + err = igb_open(netdev); + + return err; +} +/* igb_main.c */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c new file mode 100644 index 0000000..26aa66e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c @@ -0,0 +1,139 @@ +/* loopback.c + * + * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de> + * extended by Jose Carlos Billalabeitia and Jan Kiszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/printk.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> + +#include <linux/netdevice.h> + +#include <rtnet_port.h> +#include <stack_mgr.h> + +MODULE_AUTHOR("Maintainer: Jan Kiszka <Jan.Kiszka@web.de>"); +MODULE_DESCRIPTION("RTnet loopback driver"); +MODULE_LICENSE("GPL"); + +static struct rtnet_device *rt_loopback_dev; + +/*** + * rt_loopback_open + * @rtdev + */ +static int rt_loopback_open(struct rtnet_device *rtdev) +{ + rt_stack_connect(rtdev, &STACK_manager); + rtnetif_start_queue(rtdev); + + return 0; +} + +/*** + * rt_loopback_close + * @rtdev + */ +static int rt_loopback_close(struct rtnet_device *rtdev) +{ + rtnetif_stop_queue(rtdev); + rt_stack_disconnect(rtdev); + + return 0; +} + +/*** + * rt_loopback_xmit - begin packet transmission + * @skb: packet to be sent + * @dev: network device to which packet is sent + * + */ +static int rt_loopback_xmit(struct rtskb *rtskb, struct rtnet_device *rtdev) +{ + /* write transmission stamp - in case any protocol ever gets the idea to + ask the lookback device for this service... */ + if (rtskb->xmit_stamp) + *rtskb->xmit_stamp = + cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp); + + /* make sure that critical fields are re-intialised */ + rtskb->chain_end = rtskb; + + /* parse the Ethernet header as usual */ + rtskb->protocol = rt_eth_type_trans(rtskb, rtdev); + + rt_stack_deliver(rtskb); + + return 0; +} + +/*** + * loopback_init + */ +static int __init loopback_init(void) +{ + int err; + struct rtnet_device *rtdev; + + pr_info("initializing loopback interface...\n"); + + if ((rtdev = rt_alloc_etherdev(0, 1)) == NULL) + return -ENODEV; + + rt_rtdev_connect(rtdev, &RTDEV_manager); + + strcpy(rtdev->name, "rtlo"); + + rtdev->vers = RTDEV_VERS_2_0; + rtdev->open = &rt_loopback_open; + rtdev->stop = &rt_loopback_close; + rtdev->hard_start_xmit = &rt_loopback_xmit; + rtdev->flags |= IFF_LOOPBACK; + rtdev->flags &= ~IFF_BROADCAST; + rtdev->features |= NETIF_F_LLTX; + + if ((err = rt_register_rtnetdev(rtdev)) != 0) { + rtdev_free(rtdev); + return err; + } + + rt_loopback_dev = rtdev; + + return 0; +} + +/*** + * loopback_cleanup + */ +static void __exit loopback_cleanup(void) +{ + struct rtnet_device *rtdev = rt_loopback_dev; + + pr_info("removing loopback interface...\n"); + + rt_unregister_rtnetdev(rtdev); + rt_rtdev_disconnect(rtdev); + + rtdev_free(rtdev); +} + +module_init(loopback_init); +module_exit(loopback_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c new file mode 100644 index 0000000..77957d9 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c @@ -0,0 +1,1828 @@ +/* + * Cadence MACB/GEM Ethernet Controller driver + * + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * RTnet porting by Cristiano Mantovani & Stefano Banzi (Marposs SpA). + * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/circ_buf.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/dma-mapping.h> +#include <linux/platform_data/macb.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/pinctrl/consumer.h> + +#include <rtdev.h> +#include <rtdm/net.h> +#include <rtnet_port.h> +#include <rtskb.h> + +#include "rt_macb.h" + +#define MACB_RX_BUFFER_SIZE 128 +#define RX_BUFFER_MULTIPLE 64 /* bytes */ +#define RX_RING_SIZE 512 /* must be power of 2 */ +#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) + +#define TX_RING_SIZE 128 /* must be power of 2 */ +#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) + +/* level of occupied TX descriptors under which we wake up TX process */ +#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) + +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ + | MACB_BIT(ISR_ROVR)) +#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ + | MACB_BIT(ISR_RLE) \ + | MACB_BIT(TXERR)) +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) + +/* + * Graceful stop timeouts in us. We should allow up to + * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) + */ +#define MACB_HALT_TIMEOUT 1230 + +/* Ring buffer accessors */ +static unsigned int macb_tx_ring_wrap(unsigned int index) +{ + return index & (TX_RING_SIZE - 1); +} + +static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index) +{ + return &bp->tx_ring[macb_tx_ring_wrap(index)]; +} + +static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index) +{ + return &bp->tx_skb[macb_tx_ring_wrap(index)]; +} + +static unsigned int macb_rx_ring_wrap(unsigned int index) +{ + return index & (RX_RING_SIZE - 1); +} + +static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) +{ + return &bp->rx_ring[macb_rx_ring_wrap(index)]; +} + +static void *macb_rx_buffer(struct macb *bp, unsigned int index) +{ + return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); +} + +void rtmacb_set_hwaddr(struct macb *bp) +{ + u32 bottom; + u16 top; + + bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); + macb_or_gem_writel(bp, SA1B, bottom); + top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); + macb_or_gem_writel(bp, SA1T, top); + + /* Clear unused address register sets */ + macb_or_gem_writel(bp, SA2B, 0); + macb_or_gem_writel(bp, SA2T, 0); + macb_or_gem_writel(bp, SA3B, 0); + macb_or_gem_writel(bp, SA3T, 0); + macb_or_gem_writel(bp, SA4B, 0); + macb_or_gem_writel(bp, SA4T, 0); +} +EXPORT_SYMBOL_GPL(rtmacb_set_hwaddr); + +void rtmacb_get_hwaddr(struct macb *bp) +{ + struct macb_platform_data *pdata; + u32 bottom; + u16 top; + u8 addr[6]; + int i; + + pdata = dev_get_platdata(&bp->pdev->dev); + + /* Check all 4 address register for vaild address */ + for (i = 0; i < 4; i++) { + bottom = macb_or_gem_readl(bp, SA1B + i * 8); + top = macb_or_gem_readl(bp, SA1T + i * 8); + + if (pdata && pdata->rev_eth_addr) { + addr[5] = bottom & 0xff; + addr[4] = (bottom >> 8) & 0xff; + addr[3] = (bottom >> 16) & 0xff; + addr[2] = (bottom >> 24) & 0xff; + addr[1] = top & 0xff; + addr[0] = (top & 0xff00) >> 8; + } else { + addr[0] = bottom & 0xff; + addr[1] = (bottom >> 8) & 0xff; + addr[2] = (bottom >> 16) & 0xff; + addr[3] = (bottom >> 24) & 0xff; + addr[4] = top & 0xff; + addr[5] = (top >> 8) & 0xff; + } + + if (is_valid_ether_addr(addr)) { + memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + return; + } + } +} +EXPORT_SYMBOL_GPL(rtmacb_get_hwaddr); + +static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct macb *bp = bus->priv; + int value; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) + | MACB_BF(RW, MACB_MAN_READ) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_CODE))); + + /* wait for end of transfer */ + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) + cpu_relax(); + + value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); + + return value; +} + +static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct macb *bp = bus->priv; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) + | MACB_BF(RW, MACB_MAN_WRITE) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_CODE) + | MACB_BF(DATA, value))); + + /* wait for end of transfer */ + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) + cpu_relax(); + + return 0; +} + +/** + * macb_set_tx_clk() - Set a clock to a new frequency + * @clk Pointer to the clock to change + * @rate New frequency in Hz + * @dev Pointer to the struct rtnet_device + */ +static void macb_set_tx_clk(struct clk *clk, int speed, struct rtnet_device *dev) +{ + long ferr, rate, rate_rounded; + + switch (speed) { + case SPEED_10: + rate = 2500000; + break; + case SPEED_100: + rate = 25000000; + break; + case SPEED_1000: + rate = 125000000; + break; + default: + return; + } + + rate_rounded = clk_round_rate(clk, rate); + if (rate_rounded < 0) + return; + + /* RGMII allows 50 ppm frequency error. Test and warn if this limit + * is not satisfied. + */ + ferr = abs(rate_rounded - rate); + ferr = DIV_ROUND_UP(ferr, rate / 100000); + if (ferr > 5) + rtdev_warn(dev, "unable to generate target frequency: %ld Hz\n", + rate); + + if (clk_set_rate(clk, rate_rounded)) + rtdev_err(dev, "adjusting tx_clk failed.\n"); +} + +struct macb_dummy_netdev_priv { + struct rtnet_device *rtdev; +}; + +static void macb_handle_link_change(struct net_device *nrt_dev) +{ + struct macb_dummy_netdev_priv *p = netdev_priv(nrt_dev); + struct rtnet_device *dev = p->rtdev; + struct macb *bp = rtnetdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + unsigned long flags; + + int status_change = 0; + + rtdm_lock_get_irqsave(&bp->lock, flags); + + if (phydev->link) { + if ((bp->speed != phydev->speed) || + (bp->duplex != phydev->duplex)) { + u32 reg; + + reg = macb_readl(bp, NCFGR); + reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); + if (macb_is_gem(bp)) + reg &= ~GEM_BIT(GBE); + + if (phydev->duplex) + reg |= MACB_BIT(FD); + if (phydev->speed == SPEED_100) + reg |= MACB_BIT(SPD); + if (phydev->speed == SPEED_1000) + reg |= GEM_BIT(GBE); + + macb_or_gem_writel(bp, NCFGR, reg); + + bp->speed = phydev->speed; + bp->duplex = phydev->duplex; + status_change = 1; + } + } + + if (phydev->link != bp->link) { + if (!phydev->link) { + bp->speed = 0; + bp->duplex = -1; + } + bp->link = phydev->link; + + status_change = 1; + } + + rtdm_lock_put_irqrestore(&bp->lock, flags); + + if (!IS_ERR(bp->tx_clk)) + macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); + + if (status_change) { + if (phydev->link) { + rtnetif_carrier_on(dev); + rtdev_info(dev, "link up (%d/%s)\n", + phydev->speed, + phydev->duplex == DUPLEX_FULL ? + "Full" : "Half"); + } else { + rtnetif_carrier_off(dev); + rtdev_info(dev, "link down\n"); + } + } +} + +/* based on au1000_eth. c*/ +static int macb_mii_probe(struct rtnet_device *dev) +{ + struct macb *bp = rtnetdev_priv(dev); + struct macb_dummy_netdev_priv *p; + struct macb_platform_data *pdata; + struct phy_device *phydev; + struct net_device *dummy; + int phy_irq; + int ret; + + phydev = phy_find_first(bp->mii_bus); + if (!phydev) { + rtdev_err(dev, "no PHY found\n"); + return -ENXIO; + } + + pdata = dev_get_platdata(&bp->pdev->dev); + if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { + ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); + if (!ret) { + phy_irq = gpio_to_irq(pdata->phy_irq_pin); + phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + } + } + + dummy = alloc_etherdev(sizeof(*p)); + p = netdev_priv(dummy); + p->rtdev = dev; + bp->phy_phony_net_device = dummy; + + /* attach the mac to the phy */ + ret = phy_connect_direct(dummy, phydev, &macb_handle_link_change, + bp->phy_interface); + if (ret) { + rtdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + /* mask with MAC supported features */ + if (macb_is_gem(bp)) + phydev->supported &= PHY_GBIT_FEATURES; + else + phydev->supported &= PHY_BASIC_FEATURES; + + phydev->advertising = phydev->supported; + + bp->link = 0; + bp->speed = 0; + bp->duplex = -1; + bp->phy_dev = phydev; + + return 0; +} + +int rtmacb_mii_init(struct macb *bp) +{ + struct macb_platform_data *pdata; + struct device_node *np; + int err = -ENXIO, i; + + /* Enable management port */ + macb_writel(bp, NCR, MACB_BIT(MPE)); + + bp->mii_bus = mdiobus_alloc(); + if (bp->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; + } + + bp->mii_bus->name = "MACB_mii_bus"; + bp->mii_bus->read = &macb_mdio_read; + bp->mii_bus->write = &macb_mdio_write; + snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + bp->pdev->name, bp->pdev->id); + bp->mii_bus->priv = bp; + bp->mii_bus->parent = &bp->pdev->dev; + pdata = dev_get_platdata(&bp->pdev->dev); + + bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (!bp->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + np = bp->pdev->dev.of_node; + if (np) { + /* try dt phy registration */ + err = of_mdiobus_register(bp->mii_bus, np); + + /* fallback to standard phy registration if no phy were + found during dt phy registration */ + if (!err && !phy_find_first(bp->mii_bus)) { + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *phydev; + + phydev = mdiobus_scan(bp->mii_bus, i); + if (IS_ERR(phydev)) { + err = PTR_ERR(phydev); + break; + } + } + + if (err) + goto err_out_unregister_bus; + } + } else { + for (i = 0; i < PHY_MAX_ADDR; i++) + bp->mii_bus->irq[i] = PHY_POLL; + + if (pdata) + bp->mii_bus->phy_mask = pdata->phy_mask; + + err = mdiobus_register(bp->mii_bus); + } + + if (err) + goto err_out_free_mdio_irq; + + err = macb_mii_probe(bp->dev); + if (err) + goto err_out_unregister_bus; + + return 0; + +err_out_unregister_bus: + mdiobus_unregister(bp->mii_bus); +err_out_free_mdio_irq: + kfree(bp->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(bp->mii_bus); +err_out: + return err; +} +EXPORT_SYMBOL_GPL(rtmacb_mii_init); + +static void macb_update_stats(struct macb *bp) +{ + u32 __iomem *reg = bp->regs + MACB_PFR; + u32 *p = &bp->hw_stats.macb.rx_pause_frames; + u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; + + WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); + + for(; p < end; p++, reg++) + *p += __raw_readl(reg); +} + +static int macb_halt_tx(struct macb *bp) +{ + unsigned long halt_time, timeout; + u32 status; + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); + + timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); + do { + halt_time = jiffies; + status = macb_readl(bp, TSR); + if (!(status & MACB_BIT(TGO))) + return 0; + + usleep_range(10, 250); + } while (time_before(halt_time, timeout)); + + return -ETIMEDOUT; +} + +static void macb_tx_error_task(struct work_struct *work) +{ + struct macb *bp = container_of(work, struct macb, tx_error_task); + struct macb_tx_skb *tx_skb; + struct rtskb *skb; + unsigned int tail; + + rtdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", + bp->tx_tail, bp->tx_head); + + /* Make sure nobody is trying to queue up new packets */ + rtnetif_stop_queue(bp->dev); + + /* + * Stop transmission now + * (in case we have just queued new packets) + */ + if (macb_halt_tx(bp)) + /* Just complain for now, reinitializing TX path can be good */ + rtdev_err(bp->dev, "BUG: halt tx timed out\n"); + + /* No need for the lock here as nobody will interrupt us anymore */ + + /* + * Treat frames in TX queue including the ones that caused the error. + * Free transmit buffers in upper layer. + */ + for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { + struct macb_dma_desc *desc; + u32 ctrl; + + desc = macb_tx_desc(bp, tail); + ctrl = desc->ctrl; + tx_skb = macb_tx_skb(bp, tail); + skb = tx_skb->skb; + + if (ctrl & MACB_BIT(TX_USED)) { + rtdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", + macb_tx_ring_wrap(tail), skb->data); + bp->stats.tx_packets++; + bp->stats.tx_bytes += skb->len; + } else { + /* + * "Buffers exhausted mid-frame" errors may only happen + * if the driver is buggy, so complain loudly about those. + * Statistics are updated by hardware. + */ + if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) + rtdev_err(bp->dev, + "BUG: TX buffers exhausted mid-frame\n"); + + desc->ctrl = ctrl | MACB_BIT(TX_USED); + } + + dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, + DMA_TO_DEVICE); + tx_skb->skb = NULL; + dev_kfree_rtskb(skb); + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + /* Reinitialize the TX desc queue */ + macb_writel(bp, TBQP, bp->tx_ring_dma); + /* Make TX ring reflect state of hardware */ + bp->tx_head = bp->tx_tail = 0; + + /* Now we are ready to start transmission again */ + rtnetif_wake_queue(bp->dev); + + /* Housework before enabling TX IRQ */ + macb_writel(bp, TSR, macb_readl(bp, TSR)); + macb_writel(bp, IER, MACB_TX_INT_FLAGS); +} + +static void macb_tx_interrupt(struct macb *bp) +{ + unsigned int tail; + unsigned int head; + u32 status; + + status = macb_readl(bp, TSR); + macb_writel(bp, TSR, status); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(TCOMP)); + + rtdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", + (unsigned long)status); + + head = bp->tx_head; + for (tail = bp->tx_tail; tail != head; tail++) { + struct macb_tx_skb *tx_skb; + struct rtskb *skb; + struct macb_dma_desc *desc; + u32 ctrl; + + desc = macb_tx_desc(bp, tail); + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + ctrl = desc->ctrl; + + if (!(ctrl & MACB_BIT(TX_USED))) + break; + + tx_skb = macb_tx_skb(bp, tail); + skb = tx_skb->skb; + + rtdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", + macb_tx_ring_wrap(tail), skb->data); + dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, + DMA_TO_DEVICE); + bp->stats.tx_packets++; + bp->stats.tx_bytes += skb->len; + tx_skb->skb = NULL; + dev_kfree_rtskb(skb); + } + + bp->tx_tail = tail; + if (rtnetif_queue_stopped(bp->dev) + && CIRC_CNT(bp->tx_head, bp->tx_tail, + TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) + rtnetif_wake_queue(bp->dev); +} + +static void gem_rx_refill(struct macb *bp) +{ + unsigned int entry; + struct rtskb *skb; + dma_addr_t paddr; + + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + entry = macb_rx_ring_wrap(bp->rx_prepared_head); + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + bp->rx_prepared_head++; + + if (bp->rx_skbuff[entry] == NULL) { + /* allocate rtskb for this free entry in ring */ + skb = rtnetdev_alloc_rtskb(bp->dev, bp->rx_buffer_size); + if (unlikely(skb == NULL)) { + rtdev_err(bp->dev, + "Unable to allocate sk_buff\n"); + break; + } + + /* now fill corresponding descriptor entry */ + paddr = dma_map_single(&bp->pdev->dev, skb->data, + bp->rx_buffer_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, paddr)) { + dev_kfree_rtskb(skb); + break; + } + + bp->rx_skbuff[entry] = skb; + + if (entry == RX_RING_SIZE - 1) + paddr |= MACB_BIT(RX_WRAP); + bp->rx_ring[entry].addr = paddr; + bp->rx_ring[entry].ctrl = 0; + + /* properly align Ethernet header */ + rtskb_reserve(skb, NET_IP_ALIGN); + } + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + rtdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", + bp->rx_prepared_head, bp->rx_tail); +} + +/* Mark DMA descriptors from begin up to and not including end as unused */ +static void discard_partial_frame(struct macb *bp, unsigned int begin, + unsigned int end) +{ + unsigned int frag; + + for (frag = begin; frag != end; frag++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + /* + * When this happens, the hardware stats registers for + * whatever caused this is updated, so we don't have to record + * anything. + */ +} + +static int gem_rx(struct macb *bp, int budget, nanosecs_abs_t *time_stamp) +{ + unsigned int len; + unsigned int entry; + struct rtskb *skb; + struct macb_dma_desc *desc; + int count = 0, status; + + status = macb_readl(bp, RSR); + macb_writel(bp, RSR, status); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RCOMP)); + + while (count < budget) { + u32 addr, ctrl; + + entry = macb_rx_ring_wrap(bp->rx_tail); + desc = &bp->rx_ring[entry]; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + + if (!(addr & MACB_BIT(RX_USED))) + break; + + bp->rx_tail++; + count++; + + if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { + rtdev_err(bp->dev, + "not whole frame pointed by descriptor\n"); + bp->stats.rx_dropped++; + break; + } + skb = bp->rx_skbuff[entry]; + if (unlikely(!skb)) { + rtdev_err(bp->dev, + "inconsistent Rx descriptor chain\n"); + bp->stats.rx_dropped++; + break; + } + skb->time_stamp = *time_stamp; + /* now everything is ready for receiving packet */ + bp->rx_skbuff[entry] = NULL; + len = MACB_BFEXT(RX_FRMLEN, ctrl); + + rtdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); + + rtskb_put(skb, len); + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); + dma_unmap_single(&bp->pdev->dev, addr, + bp->rx_buffer_size, DMA_FROM_DEVICE); + + skb->protocol = rt_eth_type_trans(skb, bp->dev); + + bp->stats.rx_packets++; + bp->stats.rx_bytes += skb->len; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + rtdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", + skb->len, skb->csum); + print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->mac_header, 16, true); + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->data, 32, true); +#endif + + rtnetif_rx(skb); + } + + gem_rx_refill(bp); + + return count; +} + +static int macb_rx_frame(struct macb *bp, unsigned int first_frag, + unsigned int last_frag, nanosecs_abs_t *time_stamp) +{ + unsigned int len; + unsigned int frag; + unsigned int offset; + struct rtskb *skb; + struct macb_dma_desc *desc; + + desc = macb_rx_desc(bp, last_frag); + len = MACB_BFEXT(RX_FRMLEN, desc->ctrl); + + rtdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", + macb_rx_ring_wrap(first_frag), + macb_rx_ring_wrap(last_frag), len); + + /* + * The ethernet header starts NET_IP_ALIGN bytes into the + * first buffer. Since the header is 14 bytes, this makes the + * payload word-aligned. + * + * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy + * the two padding bytes into the skb so that we avoid hitting + * the slowpath in memcpy(), and pull them off afterwards. + */ + skb = rtnetdev_alloc_rtskb(bp->dev, len + NET_IP_ALIGN); + if (!skb) { + rtdev_notice(bp->dev, "Low memory, packet dropped.\n"); + bp->stats.rx_dropped++; + for (frag = first_frag; ; frag++) { + desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + if (frag == last_frag) + break; + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + return 1; + } + + offset = 0; + len += NET_IP_ALIGN; + skb->time_stamp = *time_stamp; + rtskb_put(skb, len); + + for (frag = first_frag; ; frag++) { + unsigned int frag_len = bp->rx_buffer_size; + + if (offset + frag_len > len) { + BUG_ON(frag != last_frag); + frag_len = len - offset; + } + memcpy(skb->data + offset, macb_rx_buffer(bp, frag), frag_len); + offset += bp->rx_buffer_size; + desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + + if (frag == last_frag) + break; + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + __rtskb_pull(skb, NET_IP_ALIGN); + skb->protocol = rt_eth_type_trans(skb, bp->dev); + + bp->stats.rx_packets++; + bp->stats.rx_bytes += skb->len; + rtdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", + skb->len, skb->csum); + rtnetif_rx(skb); + + return 0; +} + +static int macb_rx(struct macb *bp, int budget, nanosecs_abs_t *time_stamp) +{ + int received = 0; + unsigned int tail; + int first_frag = -1; + + for (tail = bp->rx_tail; budget > 0; tail++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, tail); + u32 addr, ctrl; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + + if (!(addr & MACB_BIT(RX_USED))) + break; + + if (ctrl & MACB_BIT(RX_SOF)) { + if (first_frag != -1) + discard_partial_frame(bp, first_frag, tail); + first_frag = tail; + } + + if (ctrl & MACB_BIT(RX_EOF)) { + int dropped; + BUG_ON(first_frag == -1); + + dropped = macb_rx_frame(bp, first_frag, tail, time_stamp); + first_frag = -1; + if (!dropped) { + received++; + budget--; + } + } + } + + if (first_frag != -1) + bp->rx_tail = first_frag; + else + bp->rx_tail = tail; + + return received; +} + +static int macb_interrupt(rtdm_irq_t *irq_handle) +{ + void *dev_id = rtdm_irq_get_arg(irq_handle, void); + nanosecs_abs_t time_stamp = rtdm_clock_read(); + struct rtnet_device *dev = dev_id; + struct macb *bp = rtnetdev_priv(dev); + unsigned received = 0; + u32 status, ctrl; + + status = macb_readl(bp, ISR); + + if (unlikely(!status)) + return RTDM_IRQ_NONE; + + rtdm_lock_get(&bp->lock); + + while (status) { + /* close possible race with dev_close */ + if (unlikely(!rtnetif_running(dev))) { + macb_writel(bp, IDR, -1); + break; + } + + rtdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status); + + if (status & MACB_BIT(RCOMP)) { + received += bp->macbgem_ops.mog_rx(bp, 100 - received, + &time_stamp); + } + + if (unlikely(status & (MACB_TX_ERR_FLAGS))) { + macb_writel(bp, IDR, MACB_TX_INT_FLAGS); + rtdm_schedule_nrt_work(&bp->tx_error_task); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_TX_ERR_FLAGS); + + break; + } + + if (status & MACB_BIT(TCOMP)) + macb_tx_interrupt(bp); + + /* + * Link change detection isn't possible with RMII, so we'll + * add that if/when we get our hands on a full-blown MII PHY. + */ + + if (status & MACB_BIT(RXUBR)) { + ctrl = macb_readl(bp, NCR); + macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); + macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RXUBR)); + } + + if (status & MACB_BIT(ISR_ROVR)) { + /* We missed at least one packet */ + if (macb_is_gem(bp)) + bp->hw_stats.gem.rx_overruns++; + else + bp->hw_stats.macb.rx_overruns++; + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(ISR_ROVR)); + } + + if (status & MACB_BIT(HRESP)) { + /* + * TODO: Reset the hardware, and maybe move the + * rtdev_err to a lower-priority context as well + * (work queue?) + */ + rtdev_err(dev, "DMA bus error: HRESP not OK\n"); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(HRESP)); + } + + status = macb_readl(bp, ISR); + } + + rtdm_lock_put(&bp->lock); + + if (received) + rt_mark_stack_mgr(dev); + + return RTDM_IRQ_HANDLED; +} + +static int macb_start_xmit(struct rtskb *skb, struct rtnet_device *dev) +{ + struct macb *bp = rtnetdev_priv(dev); + dma_addr_t mapping; + unsigned int len, entry; + struct macb_dma_desc *desc; + struct macb_tx_skb *tx_skb; + u32 ctrl; + unsigned long flags; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + rtdev_vdbg(bp->dev, + "start_xmit: len %u head %p data %p tail %p end %p\n", + skb->len, skb->head, skb->data, + rtskb_tail_pointer(skb), rtskb_end_pointer(skb)); + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, 16, true); +#endif + + len = skb->len; + rtdm_lock_get_irqsave(&bp->lock, flags); + + /* This is a hard error, log it. */ + if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) { + rtnetif_stop_queue(dev); + rtdm_lock_put_irqrestore(&bp->lock, flags); + rtdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); + rtdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", + bp->tx_head, bp->tx_tail); + return RTDEV_TX_BUSY; + } + + entry = macb_tx_ring_wrap(bp->tx_head); + rtdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); + mapping = dma_map_single(&bp->pdev->dev, skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + dev_kfree_rtskb(skb); + goto unlock; + } + + bp->tx_head++; + tx_skb = &bp->tx_skb[entry]; + tx_skb->skb = skb; + tx_skb->mapping = mapping; + rtdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", + skb->data, (unsigned long)mapping); + + ctrl = MACB_BF(TX_FRMLEN, len); + ctrl |= MACB_BIT(TX_LAST); + if (entry == (TX_RING_SIZE - 1)) + ctrl |= MACB_BIT(TX_WRAP); + + desc = &bp->tx_ring[entry]; + desc->addr = mapping; + desc->ctrl = ctrl; + + /* Make newly initialized descriptor visible to hardware */ + wmb(); + + rtskb_tx_timestamp(skb); + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); + + if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) + rtnetif_stop_queue(dev); + +unlock: + rtdm_lock_put_irqrestore(&bp->lock, flags); + + return RTDEV_TX_OK; +} + +static void macb_init_rx_buffer_size(struct macb *bp, size_t size) +{ + if (!macb_is_gem(bp)) { + bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; + } else { + bp->rx_buffer_size = size; + + if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { + rtdev_dbg(bp->dev, + "RX buffer must be multiple of %d bytes, expanding\n", + RX_BUFFER_MULTIPLE); + bp->rx_buffer_size = + roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); + } + } + + rtdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", + bp->dev->mtu, bp->rx_buffer_size); +} + +static void gem_free_rx_buffers(struct macb *bp) +{ + struct rtskb *skb; + struct macb_dma_desc *desc; + dma_addr_t addr; + int i; + + if (!bp->rx_skbuff) + return; + + for (i = 0; i < RX_RING_SIZE; i++) { + skb = bp->rx_skbuff[i]; + + if (skb == NULL) + continue; + + desc = &bp->rx_ring[i]; + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); + dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, + DMA_FROM_DEVICE); + dev_kfree_rtskb(skb); + skb = NULL; + } + + kfree(bp->rx_skbuff); + bp->rx_skbuff = NULL; +} + +static void macb_free_rx_buffers(struct macb *bp) +{ + if (bp->rx_buffers) { + dma_free_coherent(&bp->pdev->dev, + RX_RING_SIZE * bp->rx_buffer_size, + bp->rx_buffers, bp->rx_buffers_dma); + bp->rx_buffers = NULL; + } +} + +static void macb_free_consistent(struct macb *bp) +{ + if (bp->tx_skb) { + kfree(bp->tx_skb); + bp->tx_skb = NULL; + } + bp->macbgem_ops.mog_free_rx_buffers(bp); + if (bp->rx_ring) { + dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, + bp->rx_ring, bp->rx_ring_dma); + bp->rx_ring = NULL; + } + if (bp->tx_ring) { + dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, + bp->tx_ring, bp->tx_ring_dma); + bp->tx_ring = NULL; + } +} + +static int gem_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * sizeof(struct rtskb *); + bp->rx_skbuff = kzalloc(size, GFP_KERNEL); + if (!bp->rx_skbuff) + return -ENOMEM; + else + rtdev_dbg(bp->dev, + "Allocated %d RX struct rtskb entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); + return 0; +} + +static int macb_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * bp->rx_buffer_size; + bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->rx_buffers_dma, GFP_KERNEL); + if (!bp->rx_buffers) + return -ENOMEM; + else + rtdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + return 0; +} + +static int macb_alloc_consistent(struct macb *bp) +{ + int size; + + size = TX_RING_SIZE * sizeof(struct macb_tx_skb); + bp->tx_skb = kmalloc(size, GFP_KERNEL); + if (!bp->tx_skb) + goto out_err; + + size = RX_RING_BYTES; + bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->rx_ring_dma, GFP_KERNEL); + if (!bp->rx_ring) + goto out_err; + rtdev_dbg(bp->dev, + "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); + + size = TX_RING_BYTES; + bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->tx_ring_dma, GFP_KERNEL); + if (!bp->tx_ring) + goto out_err; + rtdev_dbg(bp->dev, + "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); + + if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) + goto out_err; + + return 0; + +out_err: + macb_free_consistent(bp); + return -ENOMEM; +} + +static void gem_init_rings(struct macb *bp) +{ + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + bp->tx_ring[i].addr = 0; + bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); + } + bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + + bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; + + gem_rx_refill(bp); +} + +static void macb_init_rings(struct macb *bp) +{ + int i; + dma_addr_t addr; + + addr = bp->rx_buffers_dma; + for (i = 0; i < RX_RING_SIZE; i++) { + bp->rx_ring[i].addr = addr; + bp->rx_ring[i].ctrl = 0; + addr += bp->rx_buffer_size; + } + bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); + + for (i = 0; i < TX_RING_SIZE; i++) { + bp->tx_ring[i].addr = 0; + bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); + } + bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + + bp->rx_tail = bp->tx_head = bp->tx_tail = 0; +} + +static void macb_reset_hw(struct macb *bp) +{ + /* + * Disable RX and TX (XXX: Should we halt the transmission + * more gracefully?) + */ + macb_writel(bp, NCR, 0); + + /* Clear the stats registers (XXX: Update stats first?) */ + macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); + + /* Clear all status flags */ + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); + + /* Disable all interrupts */ + macb_writel(bp, IDR, -1); + macb_readl(bp, ISR); +} + +static u32 gem_mdc_clk_div(struct macb *bp) +{ + u32 config; + unsigned long pclk_hz = clk_get_rate(bp->pclk); + + if (pclk_hz <= 20000000) + config = GEM_BF(CLK, GEM_CLK_DIV8); + else if (pclk_hz <= 40000000) + config = GEM_BF(CLK, GEM_CLK_DIV16); + else if (pclk_hz <= 80000000) + config = GEM_BF(CLK, GEM_CLK_DIV32); + else if (pclk_hz <= 120000000) + config = GEM_BF(CLK, GEM_CLK_DIV48); + else if (pclk_hz <= 160000000) + config = GEM_BF(CLK, GEM_CLK_DIV64); + else + config = GEM_BF(CLK, GEM_CLK_DIV96); + + return config; +} + +static u32 macb_mdc_clk_div(struct macb *bp) +{ + u32 config; + unsigned long pclk_hz; + + if (macb_is_gem(bp)) + return gem_mdc_clk_div(bp); + + pclk_hz = clk_get_rate(bp->pclk); + if (pclk_hz <= 20000000) + config = MACB_BF(CLK, MACB_CLK_DIV8); + else if (pclk_hz <= 40000000) + config = MACB_BF(CLK, MACB_CLK_DIV16); + else if (pclk_hz <= 80000000) + config = MACB_BF(CLK, MACB_CLK_DIV32); + else + config = MACB_BF(CLK, MACB_CLK_DIV64); + + return config; +} + +/* + * Get the DMA bus width field of the network configuration register that we + * should program. We find the width from decoding the design configuration + * register to find the maximum supported data bus width. + */ +static u32 macb_dbw(struct macb *bp) +{ + if (!macb_is_gem(bp)) + return 0; + + switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { + case 4: + return GEM_BF(DBW, GEM_DBW128); + case 2: + return GEM_BF(DBW, GEM_DBW64); + case 1: + default: + return GEM_BF(DBW, GEM_DBW32); + } +} + +/* + * Configure the receive DMA engine + * - use the correct receive buffer size + * - set the possibility to use INCR16 bursts + * (if not supported by FIFO, it will fallback to default) + * - set both rx/tx packet buffers to full memory size + * These are configurable parameters for GEM. + */ +static void macb_configure_dma(struct macb *bp) +{ + u32 dmacfg; + + if (macb_is_gem(bp)) { + dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); + dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); + dmacfg |= GEM_BF(FBLDO, 16); + dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); + dmacfg &= ~GEM_BIT(ENDIA); + gem_writel(bp, DMACFG, dmacfg); + } +} + +/* + * Configure peripheral capacities according to integration options used + */ +static void macb_configure_caps(struct macb *bp) +{ + if (macb_is_gem(bp)) { + if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0) + bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; + } + rtdev_vdbg(bp->dev, "Capabilities : %X\n", bp->caps); +} + +static void macb_init_hw(struct macb *bp) +{ + u32 config; + + macb_reset_hw(bp); + rtmacb_set_hwaddr(bp); + + config = macb_mdc_clk_div(bp); + config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ + config |= MACB_BIT(PAE); /* PAuse Enable */ + config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ + if (bp->dev->flags & IFF_PROMISC) + config |= MACB_BIT(CAF); /* Copy All Frames */ + if (!(bp->dev->flags & IFF_BROADCAST)) + config |= MACB_BIT(NBC); /* No BroadCast */ + config |= macb_dbw(bp); + macb_writel(bp, NCFGR, config); + bp->speed = SPEED_10; + bp->duplex = DUPLEX_HALF; + + macb_configure_dma(bp); + macb_configure_caps(bp); + + /* Initialize TX and RX buffers */ + macb_writel(bp, RBQP, bp->rx_ring_dma); + macb_writel(bp, TBQP, bp->tx_ring_dma); + + /* Enable TX and RX */ + macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); + + /* Enable interrupts */ + macb_writel(bp, IER, (MACB_RX_INT_FLAGS + | MACB_TX_INT_FLAGS + | MACB_BIT(HRESP))); + +} + +static int macb_open(struct rtnet_device *dev) +{ + struct macb *bp = rtnetdev_priv(dev); + size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; + int err; + + rt_stack_connect(dev, &STACK_manager); + + rtdev_dbg(bp->dev, "open\n"); + + /* carrier starts down */ + rtnetif_carrier_off(dev); + + /* if the phy is not yet register, retry later*/ + if (!bp->phy_dev) + return -EAGAIN; + + /* RX buffers initialization */ + macb_init_rx_buffer_size(bp, bufsz); + + err = macb_alloc_consistent(bp); + if (err) { + rtdev_err(dev, "Unable to allocate DMA memory (error %d)\n", + err); + return err; + } + + bp->macbgem_ops.mog_init_rings(bp); + macb_init_hw(bp); + + /* schedule a link state check */ + phy_start(bp->phy_dev); + + rtnetif_start_queue(dev); + + return 0; +} + +static int macb_close(struct rtnet_device *dev) +{ + struct macb *bp = rtnetdev_priv(dev); + unsigned long flags; + + rtnetif_stop_queue(dev); + + if (bp->phy_dev) + phy_stop(bp->phy_dev); + + rtdm_lock_get_irqsave(&bp->lock, flags); + macb_reset_hw(bp); + rtnetif_carrier_off(dev); + rtdm_lock_put_irqrestore(&bp->lock, flags); + + macb_free_consistent(bp); + + rt_stack_disconnect(dev); + + return 0; +} + +static void gem_update_stats(struct macb *bp) +{ + u32 __iomem *reg = bp->regs + GEM_OTX; + u32 *p = &bp->hw_stats.gem.tx_octets_31_0; + u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1; + + for (; p < end; p++, reg++) + *p += __raw_readl(reg); +} + +static struct net_device_stats *gem_get_stats(struct macb *bp) +{ + struct gem_stats *hwstat = &bp->hw_stats.gem; + struct net_device_stats *nstat = &bp->stats; + + gem_update_stats(bp); + + nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + + hwstat->rx_alignment_errors + + hwstat->rx_resource_errors + + hwstat->rx_overruns + + hwstat->rx_oversize_frames + + hwstat->rx_jabbers + + hwstat->rx_undersized_frames + + hwstat->rx_length_field_frame_errors); + nstat->tx_errors = (hwstat->tx_late_collisions + + hwstat->tx_excessive_collisions + + hwstat->tx_underrun + + hwstat->tx_carrier_sense_errors); + nstat->multicast = hwstat->rx_multicast_frames; + nstat->collisions = (hwstat->tx_single_collision_frames + + hwstat->tx_multiple_collision_frames + + hwstat->tx_excessive_collisions); + nstat->rx_length_errors = (hwstat->rx_oversize_frames + + hwstat->rx_jabbers + + hwstat->rx_undersized_frames + + hwstat->rx_length_field_frame_errors); + nstat->rx_over_errors = hwstat->rx_resource_errors; + nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; + nstat->rx_frame_errors = hwstat->rx_alignment_errors; + nstat->rx_fifo_errors = hwstat->rx_overruns; + nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; + nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; + nstat->tx_fifo_errors = hwstat->tx_underrun; + + return nstat; +} + +struct net_device_stats *rtmacb_get_stats(struct rtnet_device *dev) +{ + struct macb *bp = rtnetdev_priv(dev); + struct net_device_stats *nstat = &bp->stats; + struct macb_stats *hwstat = &bp->hw_stats.macb; + + if (macb_is_gem(bp)) + return gem_get_stats(bp); + + /* read stats from hardware */ + macb_update_stats(bp); + + /* Convert HW stats into netdevice stats */ + nstat->rx_errors = (hwstat->rx_fcs_errors + + hwstat->rx_align_errors + + hwstat->rx_resource_errors + + hwstat->rx_overruns + + hwstat->rx_oversize_pkts + + hwstat->rx_jabbers + + hwstat->rx_undersize_pkts + + hwstat->sqe_test_errors + + hwstat->rx_length_mismatch); + nstat->tx_errors = (hwstat->tx_late_cols + + hwstat->tx_excessive_cols + + hwstat->tx_underruns + + hwstat->tx_carrier_errors); + nstat->collisions = (hwstat->tx_single_cols + + hwstat->tx_multiple_cols + + hwstat->tx_excessive_cols); + nstat->rx_length_errors = (hwstat->rx_oversize_pkts + + hwstat->rx_jabbers + + hwstat->rx_undersize_pkts + + hwstat->rx_length_mismatch); + nstat->rx_over_errors = hwstat->rx_resource_errors + + hwstat->rx_overruns; + nstat->rx_crc_errors = hwstat->rx_fcs_errors; + nstat->rx_frame_errors = hwstat->rx_align_errors; + nstat->rx_fifo_errors = hwstat->rx_overruns; + /* XXX: What does "missed" mean? */ + nstat->tx_aborted_errors = hwstat->tx_excessive_cols; + nstat->tx_carrier_errors = hwstat->tx_carrier_errors; + nstat->tx_fifo_errors = hwstat->tx_underruns; + /* Don't know about heartbeat or window errors... */ + + return nstat; +} +EXPORT_SYMBOL_GPL(rtmacb_get_stats); + +int rtmacb_ioctl(struct rtnet_device *dev, unsigned cmd, void *rq) +{ + struct macb *bp = rtnetdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!rtnetif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} +EXPORT_SYMBOL_GPL(rtmacb_ioctl); + +#if defined(CONFIG_OF) +static const struct of_device_id macb_dt_ids[] = { + { .compatible = "cdns,at32ap7000-macb" }, + { .compatible = "cdns,at91sam9260-macb" }, + { .compatible = "cdns,macb" }, + { .compatible = "cdns,pc302-gem" }, + { .compatible = "cdns,gem" }, + { .compatible = "atmel,sama5d3-gem" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, macb_dt_ids); +#endif + +static int __init macb_probe(struct platform_device *pdev) +{ + struct macb_platform_data *pdata; + struct resource *regs; + struct rtnet_device *dev; + struct macb *bp; + struct phy_device *phydev; + u32 config; + int err = -ENXIO; + struct pinctrl *pinctrl; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + const char *mac; +#endif + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) { + dev_err(&pdev->dev, "no mmio resource defined\n"); + goto err_out; + } + + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) { + err = PTR_ERR(pinctrl); + if (err == -EPROBE_DEFER) + goto err_out; + + dev_warn(&pdev->dev, "No pinctrl provided\n"); + } + + err = -ENOMEM; + dev = rt_alloc_etherdev(sizeof(*bp), RX_RING_SIZE * 2 + TX_RING_SIZE); + if (!dev) + goto err_out; + + rtdev_alloc_name(dev, "rteth%d"); + rt_rtdev_connect(dev, &RTDEV_manager); + dev->vers = RTDEV_VERS_2_0; + dev->sysbind = &pdev->dev; + + /* TODO: Actually, we have some interesting features... */ + dev->features |= 0; + + bp = rtnetdev_priv(dev); + bp->pdev = pdev; + bp->dev = dev; + + rtdm_lock_init(&bp->lock); + INIT_WORK(&bp->tx_error_task, macb_tx_error_task); + + bp->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(bp->pclk)) { + err = PTR_ERR(bp->pclk); + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); + goto err_out_free_dev; + } + + bp->hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(bp->hclk)) { + err = PTR_ERR(bp->hclk); + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); + goto err_out_free_dev; + } + + bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + + err = clk_prepare_enable(bp->pclk); + if (err) { + dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + goto err_out_free_dev; + } + + err = clk_prepare_enable(bp->hclk); + if (err) { + dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); + goto err_out_disable_pclk; + } + + if (!IS_ERR(bp->tx_clk)) { + err = clk_prepare_enable(bp->tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", + err); + goto err_out_disable_hclk; + } + } + + bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); + if (!bp->regs) { + dev_err(&pdev->dev, "failed to map registers, aborting.\n"); + err = -ENOMEM; + goto err_out_disable_clocks; + } + + dev->irq = platform_get_irq(pdev, 0); + rt_stack_connect(dev, &STACK_manager); + + err = rtdm_irq_request(&bp->irq_handle, dev->irq, macb_interrupt, 0, + dev->name, dev); + if (err) { + dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", + dev->irq, err); + goto err_out_disable_clocks; + } + + dev->open = macb_open; + dev->stop = macb_close; + dev->hard_start_xmit = macb_start_xmit; + dev->do_ioctl = rtmacb_ioctl; + dev->get_stats = rtmacb_get_stats; + + dev->base_addr = regs->start; + + /* setup appropriated routines according to adapter type */ + if (macb_is_gem(bp)) { + bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = gem_init_rings; + bp->macbgem_ops.mog_rx = gem_rx; + } else { + bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = macb_init_rings; + bp->macbgem_ops.mog_rx = macb_rx; + } + + /* Set MII management clock divider */ + config = macb_mdc_clk_div(bp); + config |= macb_dbw(bp); + macb_writel(bp, NCFGR, config); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) + err = of_get_mac_address(pdev->dev.of_node, bp->dev->dev_addr); + if (err) +#else + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + memcpy(bp->dev->dev_addr, mac, ETH_ALEN); + else +#endif + rtmacb_get_hwaddr(bp); + + err = of_get_phy_mode(pdev->dev.of_node); + if (err < 0) { + pdata = dev_get_platdata(&pdev->dev); + if (pdata && pdata->is_rmii) + bp->phy_interface = PHY_INTERFACE_MODE_RMII; + else + bp->phy_interface = PHY_INTERFACE_MODE_MII; + } else { + bp->phy_interface = err; + } + + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII)); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) +#if defined(CONFIG_ARCH_AT91) + macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | + MACB_BIT(CLKEN))); +#else + macb_or_gem_writel(bp, USRIO, 0); +#endif + else +#if defined(CONFIG_ARCH_AT91) + macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); +#else + macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); +#endif + + err = rt_register_rtnetdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_irq_free; + } + + err = rtmacb_mii_init(bp); + if (err) + goto err_out_unregister_netdev; + + platform_set_drvdata(pdev, dev); + + rtnetif_carrier_off(dev); + + rtdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", + macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, + dev->irq, dev->dev_addr); + + phydev = bp->phy_dev; + rtdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; + +err_out_unregister_netdev: + rt_unregister_rtnetdev(dev); +err_out_irq_free: + rtdm_irq_free(&bp->irq_handle); +err_out_disable_clocks: + if (!IS_ERR(bp->tx_clk)) + clk_disable_unprepare(bp->tx_clk); +err_out_disable_hclk: + clk_disable_unprepare(bp->hclk); +err_out_disable_pclk: + clk_disable_unprepare(bp->pclk); +err_out_free_dev: + rtdev_free(dev); +err_out: + return err; +} + +static int __exit macb_remove(struct platform_device *pdev) +{ + struct rtnet_device *dev; + struct macb *bp; + + dev = platform_get_drvdata(pdev); + + if (dev) { + bp = rtnetdev_priv(dev); + if (bp->phy_dev) + phy_disconnect(bp->phy_dev); + mdiobus_unregister(bp->mii_bus); + if (bp->phy_phony_net_device) + free_netdev(bp->phy_phony_net_device); + kfree(bp->mii_bus->irq); + rt_rtdev_disconnect(dev); + rtdm_irq_free(&bp->irq_handle); + mdiobus_free(bp->mii_bus); + rt_unregister_rtnetdev(dev); + if (!IS_ERR(bp->tx_clk)) + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + rtdev_free(dev); + } + + return 0; +} + +static struct platform_driver macb_driver = { + .remove = __exit_p(macb_remove), + .driver = { + .name = "macb", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(macb_dt_ids), + }, +}; + +static bool found; +static int __init macb_driver_init(void) +{ + found = platform_driver_probe(&macb_driver, macb_probe) == 0; + return 0; +} +module_init(macb_driver_init); + +static void __exit macb_driver_exit(void) +{ + if (found) + platform_driver_unregister(&macb_driver); +} +module_exit(macb_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); +MODULE_ALIAS("platform:macb"); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile new file mode 100644 index 0000000..4edf7ad --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile @@ -0,0 +1,5 @@ +ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include + +obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MPC52XX_FEC) += rt_mpc52xx_fec.o + +rt_mpc52xx_fec-y := mpc52xx_fec.o diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README new file mode 100644 index 0000000..17bc72d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README @@ -0,0 +1,67 @@ +This is the RTnet driver for the MPC 52xx FEC for the Linux kernel +2.4.25 (DENX linuxppc_2_4_devel tree). Unfortunately, the kernel +requires a small patch to permit kernel modules accessing BESTCOMM +functions (the FEC driver is not supported as module). The patch +linuxppc_2_4_devel-fec.patch is available in this directory. + +Apply the FEC module patch mentioned above and then prepare and +build the Linux kernel and Xenomai as usual: + + $ export CROSS_COMPILE=ppc_6xx- + $ export QUILT_PATCHES=quilt_patches + $ export DESTDIR=/opt/eldk/ppc_6xx + + $ cd linuxppc_2_4_devel + $ export KERNELDIR=$PWD + $ patch -p1 < <path>/linuxppc_2_4_devel-fec.patch + $ make TQM5200_config + + $ cd ../xenomai + $ export XENODIR=$PWD + $ ./scripts/prepare-kernel.sh --linux=../linuxppc_2_4_devel --arch=ppc + + $ cd $KERNELDIR + $ make menuconfig + ... check loadable module support ... + ... exit and save default configuration ... + $ make dep + $ make uImage + $ cp -pv arch/ppc/boot/images/uImage /tftpboot + + $ $XENODIR + $ ./configure --host=ppc-linux --prefix=/usr/xenomai + $ make + $ make install + +This is the build process for the TQM5200-Board using the ELDK 3.1.1. + +The file rt_mpc52xx_fec.h contains a few configuration option. +Please customize them according to your needs (according to your +standard Linux kernel configuration): + + $ cat rt_mpc52xx_fec.h + ... + /* Define board specific options */ + #define CONFIG_RTNET_USE_MDIO + #define CONFIG_RTNET_FEC_GENERIC_PHY + #define CONFIG_RTNET_FEC_LXT971 + #undef CONFIG_RTNET_FEC_DP83847 + +Then configure and cross compile RTnet as shown: + + $ ./configure --host=ppc-linux --with-linux=$KERNELDIR \ + --with-rtext-config=$DESTDIR/usr/xenomai/bin/xeno-config \ + --disable-e1000 --disable-8139 --disable-8139too \ + --enable-mpc52xx-fec --enable-eepro100 \ + --prefix=/usr/xenomai --enable-proxy + $ make + $ make install + +Note that RTnet gets installed into $DESTDIR/usr/xenomai, including +the kernel modules. + +Also be aware that the MPC52xx has only one on-chip ethernet port. +Driver development using a ram-disk based system might be cumbersome. +It is convenient to use a supported PCI ethernet card, e.g. an +EEPRO100, and to mount the root file-system via NFS. + diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c new file mode 100644 index 0000000..5e8f16d --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c @@ -0,0 +1,1985 @@ +/* + * arch/ppc/5xxx_io/fec.c + * + * Driver for the MPC5200 Fast Ethernet Controller + * Support for MPC5100 FEC has been removed, contact the author if you need it + * + * Author: Dale Farnsworth <dfarnsworth@mvista.com> + * + * 2003 (c) MontaVista, Software, Inc. This file is licensed under the terms + * of the GNU General Public License version 2. This program is licensed + * "as is" without any warranty of any kind, whether express or implied. + * + * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/5xxx_io/fec.c". + * Copyright (c) 2008 Wolfgang Grandegger <wg@denx.de> + */ + +/* #define PARANOID_CHECKS*/ +/* #define MUST_ALIGN_TRANSMIT_DATA*/ +#define MUST_UNALIGN_RECEIVE_DATA +/* #define EXIT_ISR_AT_MEMORY_SQUEEZE*/ +/* #define DISPLAY_WARNINGS*/ + +#ifdef ORIGINAL_CODE +static const char *version = "fec.c v0.2\n"; +#endif /* ORIGINAL_CODE */ + +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/ptrace.h> +#include <linux/ioport.h> +#include <linux/in.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <asm/system.h> +#include <asm/bitops.h> +#include <linux/spinlock.h> +#include <asm/io.h> +#include <asm/dma.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/crc32.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <asm/delay.h> +#include <rtnet_port.h> +#include "rt_mpc52xx_fec.h" +#ifdef CONFIG_UBOOT +#include <asm/ppcboot.h> +#endif + +#ifdef CONFIG_XENO_DRIVERS_NET_FASTROUTE +#error "Fast Routing on MPC5200 ethernet not supported" +#endif + +MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>"); +MODULE_DESCRIPTION("RTnet driver for MPC52xx FEC"); +MODULE_LICENSE("GPL"); + +static unsigned int rx_pool_size = 0; +MODULE_PARM(rx_pool_size, "i"); +MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size"); + +#define printk(fmt,args...) rtdm_printk (fmt ,##args) + +static struct rtnet_device *mpc5xxx_fec_dev; +static int mpc5xxx_fec_interrupt(rtdm_irq_t *irq_handle); +static int mpc5xxx_fec_receive_interrupt(rtdm_irq_t *irq_handle); +static int mpc5xxx_fec_transmit_interrupt(rtdm_irq_t *irq_handle); +static struct net_device_stats *mpc5xxx_fec_get_stats(struct rtnet_device *dev); +#ifdef ORIGINAL_CODE +static void mpc5xxx_fec_set_multicast_list(struct rtnet_device *dev); +#endif /* ORIGINAL_CODE */ +static void mpc5xxx_fec_reinit(struct rtnet_device* dev); +static int mpc5xxx_fec_setup(struct rtnet_device *dev, int reinit); +static int mpc5xxx_fec_cleanup(struct rtnet_device *dev, int reinit); + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +static void mpc5xxx_fec_mii(struct rtnet_device *dev); +#ifdef ORIGINAL_CODE +static int mpc5xxx_fec_ioctl(struct rtnet_device *, struct ifreq *rq, int cmd); +static int mpc5xxx_netdev_ethtool_ioctl(struct rtnet_device *dev, void *useraddr); +#endif /* ORIGINAL_CODE */ +static void mdio_timer_callback(unsigned long data); +static void mii_display_status(struct rtnet_device *dev); +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET +static void mpc5xxx_mdio_callback(uint regval, struct rtnet_device *dev, uint data); +static int mpc5xxx_mdio_read(struct rtnet_device *dev, int phy_id, int location); +#endif + +static void mpc5xxx_fec_update_stat(struct rtnet_device *); + +/* MII processing. We keep this as simple as possible. Requests are + * placed on the list (if there is room). When the request is finished + * by the MII, an optional function may be called. + */ +typedef struct mii_list { + uint mii_regval; + void (*mii_func)(uint val, struct rtnet_device *dev, uint data); + struct mii_list *mii_next; + uint mii_data; +} mii_list_t; + +#define NMII 20 +mii_list_t mii_cmds[NMII]; +mii_list_t *mii_free; +mii_list_t *mii_head; +mii_list_t *mii_tail; + +typedef struct mdio_read_data { + u16 regval; + struct task_struct *sleeping_task; +} mdio_read_data_t; + +static int mii_queue(struct rtnet_device *dev, int request, + void (*func)(uint, struct rtnet_device *, uint), uint data); + +/* Make MII read/write commands for the FEC. + * */ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ + (VAL & 0xffff)) +#define mk_mii_end 0 + +/* Register definitions for the PHY. +*/ + +#define MII_REG_CR 0 /* Control Register */ +#define MII_REG_SR 1 /* Status Register */ +#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ +#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ +#define MII_REG_ANAR 4 /* A-N Advertisement Register */ +#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ +#define MII_REG_ANER 6 /* A-N Expansion Register */ +#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ +#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ + +/* values for phy_status */ + +#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ +#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ +#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ +#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ +#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ +#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ +#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ + +#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ +#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ +#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ +#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ +#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ +#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ +#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ +#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ + +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +u8 mpc5xxx_fec_mac_addr[6]; +u8 null_mac[6]; + +#ifdef ORIGINAL_CODE +static void mpc5xxx_fec_tx_timeout(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + + priv->stats.tx_errors++; + + if (!priv->tx_full) + rtnetif_wake_queue(dev); +} +#endif /* ORIGINAL_CODE */ + +static void +mpc5xxx_fec_set_paddr(struct rtnet_device *dev, u8 *mac) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + + out_be32(&fec->paddr1, (mac[0]<<24) | (mac[1]<<16) + | (mac[2]<<8) | (mac[3]<<0)); + out_be32(&fec->paddr2, (mac[4]<<24) | (mac[5]<<16) | 0x8808); +} + +#ifdef ORIGINAL_CODE +static int +mpc5xxx_fec_set_mac_address(struct rtnet_device *dev, void *addr) +{ + struct sockaddr *sock = (struct sockaddr *)addr; + + mpc5xxx_fec_set_paddr(dev, sock->sa_data); + return 0; +} +#endif /* ORIGINAL_CODE */ + +/* This function is called to start or restart the FEC during a link + * change. This happens on fifo errors or when switching between half + * and full duplex. + */ +static void +mpc5xxx_fec_restart(struct rtnet_device *dev, int duplex) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + u32 rcntrl; + u32 tcntrl; + int i; + +#if MPC5xxx_FEC_DEBUG > 1 + printk("mpc5xxx_fec_restart\n"); +#endif + out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & 0x700000); + out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & 0x700000); + out_be32(&fec->reset_cntrl, 0x1000000); + + /* Whack a reset. We should wait for this. */ + out_be32(&fec->ecntrl, MPC5xxx_FEC_ECNTRL_RESET); + for (i = 0; i < MPC5xxx_FEC_RESET_DELAY; ++i) { + if ((in_be32(&fec->ecntrl) & MPC5xxx_FEC_ECNTRL_RESET) == 0) + break; + udelay(1); + } + if (i == MPC5xxx_FEC_RESET_DELAY) + printk ("FEC Reset timeout!\n"); + + /* Set station address. */ + out_be32(&fec->paddr1, *(u32 *)&dev->dev_addr[0]); + out_be32(&fec->paddr2, + ((*(u16 *)&dev->dev_addr[4]) << 16) | 0x8808); + +#ifdef ORIGINAL_CODE + mpc5xxx_fec_set_multicast_list(dev); +#endif /* ORIGINAL_CODE */ + + rcntrl = MPC5xxx_FEC_RECV_BUFFER_SIZE << 16; /* max frame length */ + rcntrl |= MPC5xxx_FEC_RCNTRL_FCE; +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + rcntrl |= MPC5xxx_FEC_RCNTRL_MII_MODE; +#endif + if (duplex) + tcntrl = MPC5xxx_FEC_TCNTRL_FDEN; /* FD enable */ + else { + rcntrl |= MPC5xxx_FEC_RCNTRL_DRT; + tcntrl = 0; + } + out_be32(&fec->r_cntrl, rcntrl); + out_be32(&fec->x_cntrl, tcntrl); + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Set MII speed. */ + out_be32(&fec->mii_speed, priv->phy_speed); +#endif + + priv->full_duplex = duplex; +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + priv->duplex_change = 0; +#endif +#if MPC5xxx_FEC_DEBUG > 4 + printk("%s: duplex set to %d\n", dev->name, priv->full_duplex); +#endif + + /* Clear any outstanding interrupt. */ + out_be32(&fec->ievent, 0xffffffff); /* clear intr events */ + + /* Enable interrupts we wish to service. + */ +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + out_be32(&fec->imask, 0xf0fe0000); /* enable all intr but tfint */ +#else + out_be32(&fec->imask, 0xf07e0000); /* enable all intr but tfint */ +#endif + + /* And last, enable the transmit and receive processing. + */ + out_be32(&fec->ecntrl, MPC5xxx_FEC_ECNTRL_ETHER_EN); + out_be32(&fec->r_des_active, 0x01000000); + + /* The tx ring is no longer full. */ + if (priv->tx_full) + { + priv->tx_full = 0; + rtnetif_wake_queue(dev); + } +} + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +static void +mpc5xxx_fec_mii(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + mii_list_t *mip; + uint mii_reg; + + mii_reg = in_be32(&fec->mii_data); + + if ((mip = mii_head) == NULL) { + printk("MII and no head!\n"); + return; + } +#if MPC5xxx_FEC_DEBUG > 4 + printk("mpc5xxx_fec_mii %08x %08x %08x\n", + mii_reg, (u32)mip->mii_func, mip->mii_data); +#endif + + if (mip->mii_func != NULL) + (*(mip->mii_func))(mii_reg, dev, mip->mii_data); + + mii_head = mip->mii_next; + mip->mii_next = mii_free; + mii_free = mip; + + if ((mip = mii_head) != NULL) + out_be32(&fec->mii_data, mip->mii_regval); +} + +static int +mii_queue(struct rtnet_device *dev, int regval, void (*func)(uint, struct rtnet_device *, uint), uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + rtdm_lockctx_t context; + mii_list_t *mip; + int retval; + +#if MPC5xxx_FEC_DEBUG > 4 + printk("mii_queue: %08x %08x %08x\n", regval, (u32)func, data); +#endif + + /* Add PHY address to register command. + */ + regval |= priv->phy_addr << 23; + + retval = 0; + + rtdm_lock_get_irqsave(&priv->lock, context); + + if ((mip = mii_free) != NULL) { + mii_free = mip->mii_next; + mip->mii_regval = regval; + mip->mii_func = func; + mip->mii_next = NULL; + mip->mii_data = data; + if (mii_head) { + mii_tail->mii_next = mip; + mii_tail = mip; + } else { + mii_head = mii_tail = mip; + out_be32(&fec->mii_data, regval); + } + } else + retval = 1; + + rtdm_lock_put_irqrestore(&priv->lock, context); + + return retval; +} + +static void mii_do_cmd(struct rtnet_device *dev, const phy_cmd_t *c) +{ + int k; + + if (!c) + return; + + for (k = 0; (c+k)->mii_data != mk_mii_end; k++) + mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0); +} + +static void mii_parse_sr(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint s = priv->phy_status; + + s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); + + if (mii_reg & 0x0004) + s |= PHY_STAT_LINK; + if (mii_reg & 0x0010) + s |= PHY_STAT_FAULT; + if (mii_reg & 0x0020) + s |= PHY_STAT_ANC; + + priv->phy_status = s; + priv->link = (s & PHY_STAT_LINK) ? 1 : 0; +} + +static void mii_parse_cr(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint s = priv->phy_status; + + s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); + + if (mii_reg & 0x1000) + s |= PHY_CONF_ANE; + if (mii_reg & 0x4000) + s |= PHY_CONF_LOOP; + + priv->phy_status = s; +} + +static void mii_parse_anar(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint s = priv->phy_status; + + s &= ~(PHY_CONF_SPMASK); + + if (mii_reg & 0x0020) + s |= PHY_CONF_10HDX; + if (mii_reg & 0x0040) + s |= PHY_CONF_10FDX; + if (mii_reg & 0x0080) + s |= PHY_CONF_100HDX; + if (mii_reg & 0x0100) + s |= PHY_CONF_100FDX; + + priv->phy_status = s; +} + +/* ------------------------------------------------------------------------- */ +/* Generic PHY support. Should work for all PHYs, but does not support link + * change interrupts. + */ +#ifdef CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY + +static phy_info_t phy_info_generic = { + 0x00000000, /* 0-->match any PHY */ + "GENERIC", + + (const phy_cmd_t []) { /* config */ + /* advertise only half-duplex capabilities */ + { mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF), + mii_parse_anar }, + + /* enable auto-negotiation */ + { mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup */ + /* restart auto-negotiation */ + { mk_mii_write(MII_BMCR, (BMCR_ANENABLE | BMCR_ANRESTART)), + NULL }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* We don't actually use the ack_int table with a generic + * PHY, but putting a reference to mii_parse_sr here keeps + * us from getting a compiler warning about unused static + * functions in the case where we only compile in generic + * PHY support. + */ + { mk_mii_read(MII_BMSR), mii_parse_sr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown */ + { mk_mii_end, } + }, +}; +#endif /* CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY */ + +/* ------------------------------------------------------------------------- */ +/* The Level one LXT971 is used on some of my custom boards */ + +#ifdef CONFIG_XENO_DRIVERS_NET_FEC_LXT971 + +/* register definitions for the 971 */ + +#define MII_LXT971_PCR 16 /* Port Control Register */ +#define MII_LXT971_SR2 17 /* Status Register 2 */ +#define MII_LXT971_IER 18 /* Interrupt Enable Register */ +#define MII_LXT971_ISR 19 /* Interrupt Status Register */ +#define MII_LXT971_LCR 20 /* LED Control Register */ +#define MII_LXT971_TCR 30 /* Transmit Control Register */ + +static void mii_parse_lxt971_sr2(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint s = priv->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x4000) { + if (mii_reg & 0x0200) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } + else { + if (mii_reg & 0x0200) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + if (mii_reg & 0x0008) + s |= PHY_STAT_FAULT; + + /* Record the new full_duplex value only if the link is up + * (so we don't bother restarting the driver on duplex + * changes when the link is down). + */ + if (priv->link) { + int prev_duplex = priv->full_duplex; + priv->full_duplex = ((mii_reg & 0x0200) != 0); + if (priv->full_duplex != prev_duplex) { + /* trigger a restart with changed duplex */ + priv->duplex_change = 1; +#if MPC5xxx_FEC_DEBUG > 1 + printk("%s: duplex change: %s\n", + dev->name, priv->full_duplex ? "full" : "half"); +#endif + } + } + priv->phy_status = s; +} + +static phy_info_t phy_info_lxt971 = { + 0x0001378e, + "LXT971", + + (const phy_cmd_t []) { /* config */ +#ifdef MPC5100_FIX10HDX + { mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10 Mbps, HD */ +#else +/* { mk_mii_write(MII_REG_ANAR, 0x0A1), NULL }, *//* 10/100, HD */ + { mk_mii_write(MII_REG_ANAR, 0x01E1), NULL }, /* 10/100, FD */ +#endif + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + + /* Somehow does the 971 tell me that the link is down + * the first read after power-up. + * read here to get a valid value in ack_int */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, +#if defined(CONFIG_UC101) + { mk_mii_write(MII_LXT971_LCR, 0x4122), NULL }, /* LED settings */ +#endif + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* find out the current status */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, + + /* we only need to read ISR to acknowledge */ + + { mk_mii_read(MII_LXT971_ISR), NULL }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_XENO_DRIVERS_NET_FEC_LXT971 */ + +/* ----------------------------------------------------------------- */ +/* The National Semiconductor DP83847 is used on a INKA 4X0 board */ +/* ----------------------------------------------------------------- */ + +#ifdef CONFIG_XENO_DRIVERS_NET_FEC_DP83847 + +/* Register definitions */ +#define MII_DP83847_PHYSTS 0x10 /* PHY Status Register */ + +static void mii_parse_dp83847_physts(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint s = priv->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x2) { + if (mii_reg & 0x4) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + else { + if (mii_reg & 0x4) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } + if (mii_reg & 0x40) + s |= PHY_STAT_FAULT; + + priv->full_duplex = ((mii_reg & 0x4) != 0); + + priv->phy_status = s; +} + +static phy_info_t phy_info_dp83847 = { + 0x020005c3, + "DP83847", + + (const phy_cmd_t []) { /* config */ + { mk_mii_write(MII_REG_ANAR, 0x01E1), NULL }, /* Auto-Negociation Register Control set to */ + /* auto-negociate 10/100MBps, Half/Full duplex */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup */ + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_DP83847_PHYSTS), mii_parse_dp83847_physts }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_DP83847_PHYSTS), mii_parse_dp83847_physts }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_end, } + } +}; + +#endif /* CONFIG_XENO_DRIVERS_NET_FEC_DP83847 */ + +static phy_info_t *phy_info[] = { + +#ifdef CONFIG_XENO_DRIVERS_NET_FEC_LXT971 + &phy_info_lxt971, +#endif /* CONFIG_XENO_DRIVERS_NET_FEC_LXT971 */ + +#ifdef CONFIG_XENO_DRIVERS_NET_FEC_DP83847 + &phy_info_dp83847, +#endif /* CONFIG_XENO_DRIVERS_NET_FEC_DP83847 */ + +#ifdef CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY + /* Generic PHY support. This must be the last PHY in the table. + * It will be used to support any PHY that doesn't match a previous + * entry in the table. + */ + &phy_info_generic, +#endif /* CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY */ + + NULL +}; + +static void mii_display_config(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint s = priv->phy_status; + + printk("%s: config: auto-negotiation ", dev->name); + + if (s & PHY_CONF_ANE) + printk("on"); + else + printk("off"); + + if (s & PHY_CONF_100FDX) + printk(", 100FDX"); + if (s & PHY_CONF_100HDX) + printk(", 100HDX"); + if (s & PHY_CONF_10FDX) + printk(", 10FDX"); + if (s & PHY_CONF_10HDX) + printk(", 10HDX"); + if (!(s & PHY_CONF_SPMASK)) + printk(", No speed/duplex selected?"); + + if (s & PHY_CONF_LOOP) + printk(", loopback enabled"); + + printk(".\n"); + + priv->sequence_done = 1; +} + +static void mii_queue_config(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + + priv->phy_task.routine = (void *)mii_display_config; + priv->phy_task.data = dev; + schedule_task(&priv->phy_task); +} + + +phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config }, + { mk_mii_end, } }; + + +/* Read remainder of PHY ID. +*/ +static void +mii_discover_phy3(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + int i; + + priv->phy_id |= (mii_reg & 0xffff); + + for (i = 0; phy_info[i]; i++) { + if (phy_info[i]->id == (priv->phy_id >> 4) || !phy_info[i]->id) + break; + if (phy_info[i]->id == 0) /* check generic entry */ + break; + } + + if (!phy_info[i]) + panic("%s: PHY id 0x%08x is not supported!\n", + dev->name, priv->phy_id); + + priv->phy = phy_info[i]; + priv->phy_id_done = 1; + + printk("%s: Phy @ 0x%x, type %s (0x%08x)\n", + dev->name, priv->phy_addr, priv->phy->name, priv->phy_id); +#if defined(CONFIG_UC101) + mii_do_cmd(dev, priv->phy->startup); +#endif +} + +/* Scan all of the MII PHY addresses looking for someone to respond + * with a valid ID. This usually happens quickly. + */ +static void +mii_discover_phy(uint mii_reg, struct rtnet_device *dev, uint data) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + uint phytype; + +#if MPC5xxx_FEC_DEBUG > 4 + printk("mii_discover_phy\n"); +#endif + + if ((phytype = (mii_reg & 0xffff)) != 0xffff) { + /* Got first part of ID, now get remainder. + */ + priv->phy_id = phytype << 16; + mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3, 0); + } else { + priv->phy_addr++; + if (priv->phy_addr < 32) + mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), + mii_discover_phy, 0); + else + printk("fec: No PHY device found.\n"); + } +} + +static void +mpc5xxx_fec_link_up(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)(dev->priv); + + printk("mpc5xxx_fec_link_up: link_up=%d\n", priv->link_up); +#ifdef ORIGINAL_CODE + priv->link_up = 0; +#endif /* ORIGINAL_CODE */ + mii_display_status(dev); + if (priv->duplex_change) { +#if MPC5xxx_FEC_DEBUG > 1 + printk("%s: restarting with %s duplex...\n", + dev->name, priv->full_duplex ? "full" : "half"); +#endif + mpc5xxx_fec_restart(dev, priv->full_duplex); + } +} + +/* + * Execute the ack_int command set and schedules next timer call back. + */ +static void mdio_timer_callback(unsigned long data) +{ + struct rtnet_device *dev = (struct rtnet_device *)data; + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)(dev->priv); + mii_do_cmd(dev, priv->phy->ack_int); + + if (priv->link_up) { +#ifdef ORIGINAL_CODE + priv->link_up_task.routine = (void *)mpc5xxx_fec_link_up; + priv->link_up_task.data = dev; + schedule_task(&priv->link_up_task); +#else + mpc5xxx_fec_link_up(dev); + return; +#endif /* ORIGINAL_CODE */ + } + /* Reschedule in 1 second */ + priv->phy_timer_list.expires = jiffies + (1000 * HZ / 1000); + add_timer(&priv->phy_timer_list); +} + +/* + * Displays the current status of the PHY. + */ +static void mii_display_status(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = dev->priv; + uint s = priv->phy_status; + + printk("%s: status: ", dev->name); + + if (!priv->link) { + printk("link down"); + } else { + printk("link up"); + + switch(s & PHY_STAT_SPMASK) { + case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break; + case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break; + case PHY_STAT_10FDX: printk(", 10 Mbps Full Duplex"); break; + case PHY_STAT_10HDX: printk(", 10 Mbps Half Duplex"); break; + default: + printk(", Unknown speed/duplex"); + } + + if (s & PHY_STAT_ANC) + printk(", auto-negotiation complete"); + } + + if (s & PHY_STAT_FAULT) + printk(", remote fault"); + + printk(".\n"); +} +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + +#define RFIFO_DATA 0xf0003184 +#define TFIFO_DATA 0xf00031a4 + +/* + * Initialize FEC receive task. + * Returns task number of FEC receive task. + * Returns -1 on failure + */ +int +mpc5xxx_fec_rx_task_setup(int num_bufs, int maxbufsize) +{ + static TaskSetupParamSet_t params; + int tasknum; + + params.NumBD = num_bufs; + params.Size.MaxBuf = maxbufsize; + params.StartAddrSrc = RFIFO_DATA; + params.IncrSrc = 0; + params.SzSrc = 4; + params.IncrDst = 4; + params.SzDst = 4; + + tasknum = TaskSetup(TASK_FEC_RX, ¶ms); + + /* clear pending interrupt bits */ + TaskIntClear(tasknum); + + return tasknum; +} + +/* + * Initialize FEC transmit task. + * Returns task number of FEC transmit task. + * Returns -1 on failure + */ +int +mpc5xxx_fec_tx_task_setup(int num_bufs) +{ + static TaskSetupParamSet_t params; + int tasknum; + + params.NumBD = num_bufs; + params.IncrSrc = 4; + params.SzSrc = 4; + params.StartAddrDst = TFIFO_DATA; + params.IncrDst = 0; + params.SzDst = 4; + + tasknum = TaskSetup(TASK_FEC_TX, ¶ms); + + /* clear pending interrupt bits */ + TaskIntClear(tasknum); + + return tasknum; +} + + + +#ifdef PARANOID_CHECKS +static volatile int tx_fifo_cnt, tx_fifo_ipos, tx_fifo_opos; +static volatile int rx_fifo_opos; +#endif + +static struct rtskb *tx_fifo_skb[MPC5xxx_FEC_TBD_NUM]; +static struct rtskb *rx_fifo_skb[MPC5xxx_FEC_RBD_NUM]; +static BDIdx mpc5xxx_bdi_tx = 0; + + +static int +mpc5xxx_fec_setup(struct rtnet_device *dev, int reinit) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_xlb *xlb = (struct mpc5xxx_xlb *)MPC5xxx_XLB; + struct rtskb *skb; + int i; + struct mpc5xxx_rbuf *rbuf; + struct mpc5xxx_fec *fec = priv->fec; + u32 u32_value; + u16 u16_value; + +#if MPC5xxx_FEC_DEBUG > 1 + printk("mpc5xxx_fec_setup\n"); +#endif + + mpc5xxx_fec_set_paddr(dev, dev->dev_addr); + + /* + * Initialize receive queue + */ + priv->r_tasknum = mpc5xxx_fec_rx_task_setup(MPC5xxx_FEC_RBD_NUM, + MPC5xxx_FEC_RECV_BUFFER_SIZE_BC); + TaskBDReset(priv->r_tasknum); + for(i=0;i<MPC5xxx_FEC_RBD_NUM;i++) { + BDIdx bdi_a; + if(!reinit) { + skb = dev_alloc_rtskb(sizeof *rbuf, dev); + if (skb == 0) + goto eagain; +#ifdef MUST_UNALIGN_RECEIVE_DATA + rtskb_reserve(skb,2); +#endif + rbuf = (struct mpc5xxx_rbuf *)rtskb_put(skb, sizeof *rbuf); + rx_fifo_skb[i]=skb; + } + else { + skb=rx_fifo_skb[i]; + rbuf = (struct mpc5xxx_rbuf *)skb->data; + } + bdi_a = TaskBDAssign(priv->r_tasknum, + (void*)virt_to_phys((void *)&rbuf->data), + 0, sizeof *rbuf, MPC5xxx_FEC_RBD_INIT); + if(bdi_a<0) + panic("mpc5xxx_fec_setup: error while TaskBDAssign, err=%i\n",(int)bdi_a); + } +#ifdef PARANOID_CHECKS + rx_fifo_opos = 0; +#endif + + /* + * Initialize transmit queue + */ + if(!reinit) { + priv->t_tasknum = mpc5xxx_fec_tx_task_setup(MPC5xxx_FEC_TBD_NUM); + TaskBDReset(priv->t_tasknum); + mpc5xxx_bdi_tx = 0; + for(i=0;i<MPC5xxx_FEC_TBD_NUM;i++) tx_fifo_skb[i]=0; +#ifdef PARANOID_CHECKS + tx_fifo_cnt = tx_fifo_ipos = tx_fifo_opos = 0; +#endif + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + if (reinit) { + if (!priv->sequence_done) { + if (!priv->phy) { + printk("mpc5xxx_fec_setup: PHY not configured\n"); + return -ENODEV; /* No PHY we understand */ + } + + mii_do_cmd(dev, priv->phy->config); + mii_do_cmd(dev, phy_cmd_config); /* display configuration */ + while(!priv->sequence_done) + schedule(); + + mii_do_cmd(dev, priv->phy->startup); + } + } +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + dev->irq = MPC5xxx_FEC_IRQ; + priv->r_irq = MPC5xxx_SDMA_IRQ_BASE + priv->r_tasknum; + priv->t_irq = MPC5xxx_SDMA_IRQ_BASE + priv->t_tasknum; + + if ((i = rtdm_irq_request(&priv->irq_handle, dev->irq, + mpc5xxx_fec_interrupt, 0, + "rteth_err", dev))) { + printk(KERN_ERR "FEC interrupt allocation failed\n"); + return i; + } + + if ((i = rtdm_irq_request(&priv->r_irq_handle, priv->r_irq, + mpc5xxx_fec_receive_interrupt, 0, + "rteth_recv", dev))) { + printk(KERN_ERR "FEC receive task interrupt allocation failed\n"); + return i; + } + + if ((i = rtdm_irq_request(&priv->t_irq_handle, priv->t_irq, + mpc5xxx_fec_transmit_interrupt, 0, + "rteth_xmit", dev))) { + printk(KERN_ERR "FEC transmit task interrupt allocation failed\n"); + return i; + } + + rt_stack_connect(dev, &STACK_manager); + + u32_value = in_be32(&priv->gpio->port_config); +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + u32_value |= 0x00050000; /* 100MBit with MD */ +#else + u32_value |= 0x00020000; /* 10MBit with 7-wire */ +#endif + out_be32(&priv->gpio->port_config, u32_value); + + } + + out_be32(&fec->op_pause, 0x00010020); /* change to 0xffff0020 ??? */ + out_be32(&fec->rfifo_cntrl, 0x0f240000); + out_be32(&fec->rfifo_alarm, 0x0000030c); + out_be32(&fec->tfifo_cntrl, 0x0f240000); + out_be32(&fec->tfifo_alarm, 0x00000100); + out_be32(&fec->x_wmrk, 0x3); /* xmit fifo watermark = 256 */ + out_be32(&fec->xmit_fsm, 0x03000000); /* enable crc generation */ + out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */ + out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */ + +#ifdef CONFIG_MPC5200 + /* Disable COMM Bus Prefetch */ + u16_value = in_be16(&priv->sdma->PtdCntrl); + u16_value |= 1; + out_be16(&priv->sdma->PtdCntrl, u16_value); + + /* Disable (or enable?) BestComm XLB address snooping */ + out_be32(&xlb->config, in_be32(&xlb->config) | MPC5200B_XLB_CONF_BSDIS); +#endif + + if(!reinit) { +#if !defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) + mpc5xxx_fec_restart (dev, 0); /* always use half duplex mode only */ +#else +#ifdef CONFIG_UBOOT + extern unsigned char __res[]; + bd_t *bd = (bd_t *)__res; +#define MPC5xxx_IPBFREQ bd->bi_ipbfreq +#else +#define MPC5xxx_IPBFREQ CONFIG_PPC_5xxx_IPBFREQ +#endif + + for (i=0; i<NMII-1; i++) + mii_cmds[i].mii_next = &mii_cmds[i+1]; + mii_free = mii_cmds; + + priv->phy_speed = (((MPC5xxx_IPBFREQ >> 20) / 5) << 1); + + /*mpc5xxx_fec_restart (dev, 0);*/ /* half duplex, negotiate speed */ + mpc5xxx_fec_restart (dev, 1); /* full duplex, negotiate speed */ + + /* Queue up command to detect the PHY and initialize the + * remainder of the interface. + */ + priv->phy_id_done = 0; + priv->phy_addr = 0; + mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy, 0); + + priv->old_status = 0; + + /* + * Read MIB counters in order to reset them, + * then zero all the stats fields in memory + */ + mpc5xxx_fec_update_stat(dev); + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + if (reinit) { + if (!priv->sequence_done) { + if (!priv->phy) { + printk("mpc5xxx_fec_open: PHY not configured\n"); + return -ENODEV; /* No PHY we understand */ + } + + mii_do_cmd(dev, priv->phy->config); + mii_do_cmd(dev, phy_cmd_config); /* display configuration */ + while(!priv->sequence_done) + schedule(); + + mii_do_cmd(dev, priv->phy->startup); + + /* + * Currently, MII link interrupts are not supported, + * so start the 100 msec timer to monitor the link up event. + */ + init_timer(&priv->phy_timer_list); + + priv->phy_timer_list.expires = jiffies + (100 * HZ / 1000); + priv->phy_timer_list.data = (unsigned long)dev; + priv->phy_timer_list.function = mdio_timer_callback; + add_timer(&priv->phy_timer_list); + + printk("%s: Waiting for the link to be up...\n", dev->name); + while (priv->link == 0) { + schedule(); + } + mii_display_status(dev); + if (priv->full_duplex == 0) { /* FD is not negotiated, restart the fec in HD */ + mpc5xxx_fec_restart(dev, 0); + } + } + } +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ +#endif + } + else { + mpc5xxx_fec_restart (dev, 0); + } + + rtnetif_start_queue(dev); + + TaskStart(priv->r_tasknum, TASK_AUTOSTART_ENABLE, + priv->r_tasknum, TASK_INTERRUPT_ENABLE); + + if(reinit) { + TaskStart(priv->t_tasknum, TASK_AUTOSTART_ENABLE, + priv->t_tasknum, TASK_INTERRUPT_ENABLE); + } + + return 0; + +eagain: + printk("mpc5xxx_fec_setup: failed\n"); + for (i=0; i<MPC5xxx_FEC_RBD_NUM; i++) { + skb = rx_fifo_skb[i]; + if (skb == 0) + break; + dev_kfree_rtskb(skb); + } + TaskBDReset(priv->r_tasknum); + + return -EAGAIN; +} + +static int +mpc5xxx_fec_open(struct rtnet_device *dev) +{ + return mpc5xxx_fec_setup(dev,0); +} + +/* This will only be invoked if your driver is _not_ in XOFF state. + * What this means is that you need not check it, and that this + * invariant will hold if you make sure that the netif_*_queue() + * calls are done at the proper times. + */ +static int +mpc5xxx_fec_hard_start_xmit(struct rtskb *skb, struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + rtdm_lockctx_t context; + int pad; + short length; + BDIdx bdi_a; + +#if MPC5xxx_FEC_DEBUG > 4 + printk("mpc5xxx_fec_hard_start_xmit:\n"); + printk("dev %08x, priv %08x, skb %08x\n", + (u32)dev, (u32)priv, (u32)skb); +#endif +#if MPC5xxx_FEC_DEBUG > 0 + if (fec_start_status(&priv->t_queue) & MPC5xxx_FEC_TBD_TFD) + panic("MPC5xxx transmit queue overrun\n"); +#endif + + length = skb->len; +#ifdef MUST_ALIGN_TRANSMIT_DATA + pad = (int)skb->data & 3; + if (pad) { + void *old_data = skb->data; + rtskb_push(skb, pad); + memcpy(skb->data, old_data, length); + rtskb_trim(skb, length); + } +#endif + /* Zero out up to the minimum length ethernet packet size, + * so we don't inadvertently expose sensitive data + */ + pad = ETH_ZLEN - skb->len; + if (pad > 0) { + skb = rtskb_padto(skb, ETH_ZLEN); + if (skb == 0) { + printk("rtskb_padto failed\n"); + return 0; + } + length += pad; + } + + flush_dcache_range((u32)skb->data, (u32)skb->data + length); + + rtdm_lock_get_irqsave(&priv->lock, context); + + bdi_a = TaskBDAssign(priv->t_tasknum,(void*)virt_to_phys((void *)skb->data), + NULL,length,MPC5xxx_FEC_TBD_INIT); + +#ifdef PARANOID_CHECKS + /* check for other errors during assignment*/ + if((bdi_a<0)||(bdi_a>=MPC5xxx_FEC_TBD_NUM)) + panic("mpc5xxx_fec_hard_start_xmit: error while TaskBDAssign, err=%i\n",(int)bdi_a); + + /* sanity check: bdi must always equal tx_fifo_ipos*/ + if(bdi_a!=tx_fifo_ipos) + panic("bdi_a!=tx_fifo_ipos: %i, %i\n",(int)bdi_a,tx_fifo_ipos); + + tx_fifo_cnt++; + tx_fifo_ipos++; + if(tx_fifo_ipos==MPC5xxx_FEC_TBD_NUM) tx_fifo_ipos=0; + + /* check number of BDs in use*/ + if(TaskBDInUse(priv->t_tasknum)!=tx_fifo_cnt) + panic("TaskBDInUse != tx_fifo_cnt: %i %i\n",TaskBDInUse(priv->t_tasknum),tx_fifo_cnt); +#endif + + tx_fifo_skb[bdi_a]=skb; + +#ifdef ORIGINAL_CODE + dev->trans_start = jiffies; +#endif /* ORIGINAL_CODE */ + + /* Get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + + TaskStart(priv->t_tasknum, TASK_AUTOSTART_ENABLE, priv->t_tasknum, TASK_INTERRUPT_ENABLE); + + if(TaskBDInUse(priv->t_tasknum)==MPC5xxx_FEC_TBD_NUM) { + priv->tx_full = 1; + rtnetif_stop_queue(dev); + } + rtdm_lock_put_irqrestore(&priv->lock, context); + + return 0; +} + +/* This handles SDMA transmit task interrupts + */ +static int +mpc5xxx_fec_transmit_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + BDIdx bdi_r; + + rtdm_lock_get(&priv->lock); + + while(TaskBDInUse(priv->t_tasknum)) { + + /* relase BD*/ + bdi_r = TaskBDRelease(priv->t_tasknum); + + /* we are done if we can't release any more BDs*/ + if(bdi_r==TASK_ERR_BD_BUSY) break; + /* if(bdi_r<0) break;*/ + +#ifdef PARANOID_CHECKS + /* check for other errors during release*/ + if((bdi_r<0)||(bdi_r>=MPC5xxx_FEC_TBD_NUM)) + panic("mpc5xxx_fec_transmit_interrupt: error while TaskBDRelease, err=%i\n",(int)bdi_r); + + tx_fifo_cnt--; + tx_fifo_opos++; + if(tx_fifo_opos==MPC5xxx_FEC_TBD_NUM) tx_fifo_opos=0; + + /* sanity check: bdi_r must always equal tx_fifo_opos*/ + if(bdi_r!=tx_fifo_opos) { + panic("bdi_r!=tx_fifo_opos: %i, %i\n",(int)bdi_r,tx_fifo_opos); + } + + /* check number of BDs in use*/ + if(TaskBDInUse(priv->t_tasknum)!=tx_fifo_cnt) + panic("TaskBDInUse != tx_fifo_cnt: %i %i\n",TaskBDInUse(priv->t_tasknum),tx_fifo_cnt); +#endif + + if((tx_fifo_skb[mpc5xxx_bdi_tx])==0) + panic("skb confusion in tx\n"); + + dev_kfree_rtskb(tx_fifo_skb[mpc5xxx_bdi_tx]); + tx_fifo_skb[mpc5xxx_bdi_tx]=0; + + mpc5xxx_bdi_tx = bdi_r; + + if(TaskBDInUse(priv->t_tasknum)<MPC5xxx_FEC_TBD_NUM/2) + priv->tx_full = 0; + + } + + if (rtnetif_queue_stopped(dev) && !priv->tx_full) + rtnetif_wake_queue(dev); + + rtdm_lock_put(&priv->lock); + + return RTDM_IRQ_HANDLED; +} + +static BDIdx mpc5xxx_bdi_rx = 0; + +static int +mpc5xxx_fec_receive_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct rtskb *skb; + struct rtskb *nskb; + struct mpc5xxx_rbuf *rbuf; + struct mpc5xxx_rbuf *nrbuf; + u32 status; + int length; + BDIdx bdi_a, bdi_r; + int discard = 0; + int dropped = 0; + int packets = 0; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + while(1) { + + /* release BD*/ + bdi_r = TaskBDRelease(priv->r_tasknum); + + /* we are done if we can't release any more BDs*/ + if(bdi_r==TASK_ERR_BD_BUSY) break; + +#ifdef PARANOID_CHECKS + /* check for other errors during release*/ + if((bdi_r<0)||(bdi_r>=MPC5xxx_FEC_RBD_NUM)) + panic("mpc5xxx_fec_receive_interrupt: error while TaskBDRelease, err=%i\n",(int)bdi_r); + + rx_fifo_opos++; + if(rx_fifo_opos==MPC5xxx_FEC_RBD_NUM) rx_fifo_opos=0; + + if(bdi_r != rx_fifo_opos) + panic("bdi_r != rx_fifo_opos: %i, %i\n",bdi_r, rx_fifo_opos); +#endif + + /* get BD status in order to determine length*/ + status = TaskGetBD(priv->r_tasknum,mpc5xxx_bdi_rx)->Status; + + /* determine packet length and pointer to socket buffer / actual data*/ + skb = rx_fifo_skb[mpc5xxx_bdi_rx]; + length = (status & 0xffff) - 4; + rbuf = (struct mpc5xxx_rbuf *)skb->data; + +#ifndef EXIT_ISR_AT_MEMORY_SQUEEZE + /* in case of a memory squeeze, we just drop all packets, because*/ + /* subsequent allocations will also fail.*/ + if(discard!=3) { +#endif + + /* check for frame errors*/ + if(status&0x00370000) { + /* frame error, drop */ +#ifdef DISPLAY_WARNINGS + if(status&MPC5xxx_FEC_FRAME_LG) + printk("%s: Frame length error, dropping packet (status=0x%08x)\n",dev->name,status); + if(status&MPC5xxx_FEC_FRAME_NO) + printk("%s: Non-octet aligned frame error, dropping packet (status=0x%08x)\n",dev->name,status); + if(status&MPC5xxx_FEC_FRAME_CR) + printk("%s: Frame CRC error, dropping packet (status=0x%08x)\n",dev->name,status); + if(status&MPC5xxx_FEC_FRAME_OV) + printk("%s: FIFO overrun error, dropping packet (status=0x%08x)\n",dev->name,status); + if(status&MPC5xxx_FEC_FRAME_TR) + printk("%s: Frame truncated error, dropping packet (status=0x%08x)\n",dev->name,status); +#endif + discard=1; + } + else if (length>(MPC5xxx_FEC_RECV_BUFFER_SIZE-4)) { + /* packet too big, drop */ +#ifdef DISPLAY_WARNINGS + printk("%s: Frame too big, dropping packet (length=%i)\n",dev->name,length); +#endif + discard=2; + } + else { + /* allocate replacement skb */ + nskb = dev_alloc_rtskb(sizeof *nrbuf, dev); + if (nskb == NULL) { + /* memory squeeze, drop */ + discard=3; + dropped++; + } + else { + discard=0; + } + } + +#ifndef EXIT_ISR_AT_MEMORY_SQUEEZE + } + else { + dropped++; + } +#endif + + if (discard) { + priv->stats.rx_dropped++; + nrbuf = (struct mpc5xxx_rbuf *)skb->data; + } + else { +#ifdef MUST_UNALIGN_RECEIVE_DATA + rtskb_reserve(nskb,2); +#endif + nrbuf = (struct mpc5xxx_rbuf *)rtskb_put(nskb, sizeof *nrbuf); + + /* only invalidate the number of bytes in dcache actually received*/ +#ifdef MUST_UNALIGN_RECEIVE_DATA + invalidate_dcache_range((u32)rbuf - 2, (u32)rbuf + length); +#else + invalidate_dcache_range((u32)rbuf, (u32)rbuf + length); +#endif + rtskb_trim(skb, length); + skb->protocol = rt_eth_type_trans(skb, dev); + skb->time_stamp = time_stamp; + rtnetif_rx(skb); + packets++; +#ifdef ORIGINAL_CODE + dev->last_rx = jiffies; +#endif /* ORIGINAL_CODE */ + rx_fifo_skb[mpc5xxx_bdi_rx] = nskb; + } + + /* Assign new socket buffer to BD*/ + bdi_a = TaskBDAssign(priv->r_tasknum, (void*)virt_to_phys((void *)&nrbuf->data), + 0, sizeof *nrbuf, MPC5xxx_FEC_RBD_INIT); + +#ifdef PARANOID_CHECKS + /* check for errors during assignment*/ + if((bdi_a<0)||(bdi_r>=MPC5xxx_FEC_RBD_NUM)) + panic("mpc5xxx_fec_receive_interrupt: error while TaskBDAssign, err=%i\n",(int)bdi_a); + + /* check if Assign/Release sequence numbers are ok*/ + if(((bdi_a+1)%MPC5xxx_FEC_RBD_NUM) != bdi_r) + panic("bdi_a+1 != bdi_r: %i %i\n",(int)((bdi_a+1)%MPC5xxx_FEC_RBD_NUM),(int)bdi_r); +#endif + + mpc5xxx_bdi_rx = bdi_r; + +#ifdef EXIT_ISR_AT_MEMORY_SQUEEZE + /* if we couldn't get memory for a new socket buffer, then it doesn't*/ + /* make sense to proceed.*/ + if (discard==3) + break; +#endif + + } + +#ifdef DISPLAY_WARNINGS + if(dropped) { + printk("%s: Memory squeeze, dropped %i packets\n",dev->name,dropped); + } +#endif + TaskStart(priv->r_tasknum, TASK_AUTOSTART_ENABLE, priv->r_tasknum, TASK_INTERRUPT_ENABLE); + + if (packets > 0) + rt_mark_stack_mgr(dev); + return RTDM_IRQ_HANDLED; +} + + +static void +mpc5xxx_fec_reinit(struct rtnet_device *dev) +{ + int retval; + printk("mpc5xxx_fec_reinit\n"); + mpc5xxx_fec_cleanup(dev,1); + retval=mpc5xxx_fec_setup(dev,1); + if(retval) panic("reinit failed\n"); +} + + +static int +mpc5xxx_fec_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + int ievent; + +#if MPC5xxx_FEC_DEBUG > 4 + printk("mpc5xxx_fec_interrupt:\n"); +#endif + + ievent = in_be32(&fec->ievent); + out_be32(&fec->ievent, ievent); /* clear pending events */ + + if (ievent & (MPC5xxx_FEC_IEVENT_RFIFO_ERROR | + MPC5xxx_FEC_IEVENT_XFIFO_ERROR)) { + if (ievent & MPC5xxx_FEC_IEVENT_RFIFO_ERROR) + printk(KERN_WARNING "MPC5xxx_FEC_IEVENT_RFIFO_ERROR\n"); + if (ievent & MPC5xxx_FEC_IEVENT_XFIFO_ERROR) + printk(KERN_WARNING "MPC5xxx_FEC_IEVENT_XFIFO_ERROR\n"); + mpc5xxx_fec_reinit(dev); + } + else if (ievent & MPC5xxx_FEC_IEVENT_MII) { +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + mpc5xxx_fec_mii(dev); +#else + printk("%s[%d] %s: unexpected MPC5xxx_FEC_IEVENT_MII\n", + __FILE__, __LINE__, __FUNCTION__); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + } + + return RTDM_IRQ_HANDLED; +} + +static int +mpc5xxx_fec_cleanup(struct rtnet_device *dev, int reinit) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + unsigned long timeout; + int i; + + priv->open_time = 0; +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + priv->sequence_done = 0; +#endif + + rtnetif_stop_queue(dev); + + /* Wait for rx queue to drain */ + if(!reinit) { + timeout = jiffies + 2*HZ; + while (TaskBDInUse(priv->t_tasknum) && (jiffies < timeout)) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/10); + } + } + + /* Disable FEC interrupts */ + out_be32(&fec->imask, 0x0); + + /* Stop FEC */ + out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~0x2); + + /* Disable the rx and tx queues. */ + TaskStop(priv->r_tasknum); + TaskStop(priv->t_tasknum); + + /* Release irqs */ + if(!reinit) { + rtdm_irq_disable(&priv->irq_handle); + rtdm_irq_disable(&priv->r_irq_handle); + rtdm_irq_disable(&priv->t_irq_handle); + rtdm_irq_free(&priv->irq_handle); + rtdm_irq_free(&priv->r_irq_handle); + rtdm_irq_free(&priv->t_irq_handle); + rt_stack_disconnect(dev); + } + + /* Free rx Buffers */ + if(!reinit) { + for (i=0; i<MPC5xxx_FEC_RBD_NUM; i++) { + dev_kfree_rtskb(rx_fifo_skb[i]); + } + } + + mpc5xxx_fec_get_stats(dev); + + return 0; +} + +static int +mpc5xxx_fec_close(struct rtnet_device *dev) +{ + int ret = mpc5xxx_fec_cleanup(dev,0); + return ret; +} + +/* + * Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats *mpc5xxx_fec_get_stats(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct net_device_stats *stats = &priv->stats; + struct mpc5xxx_fec *fec = priv->fec; + + stats->rx_bytes = in_be32(&fec->rmon_r_octets); + stats->rx_packets = in_be32(&fec->rmon_r_packets); + stats->rx_errors = stats->rx_packets - ( + in_be32(&fec->ieee_r_frame_ok) + + in_be32(&fec->rmon_r_mc_pkt)); + stats->tx_bytes = in_be32(&fec->rmon_t_octets); + stats->tx_packets = in_be32(&fec->rmon_t_packets); + stats->tx_errors = stats->tx_packets - ( + in_be32(&fec->ieee_t_frame_ok) + + in_be32(&fec->rmon_t_col) + + in_be32(&fec->ieee_t_1col) + + in_be32(&fec->ieee_t_mcol) + + in_be32(&fec->ieee_t_def)); + stats->multicast = in_be32(&fec->rmon_r_mc_pkt); + stats->collisions = in_be32(&fec->rmon_t_col); + + /* detailed rx_errors: */ + stats->rx_length_errors = in_be32(&fec->rmon_r_undersize) + + in_be32(&fec->rmon_r_oversize) + + in_be32(&fec->rmon_r_frag) + + in_be32(&fec->rmon_r_jab); + stats->rx_over_errors = in_be32(&fec->r_macerr); + stats->rx_crc_errors = in_be32(&fec->ieee_r_crc); + stats->rx_frame_errors = in_be32(&fec->ieee_r_align); + stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop); + stats->rx_missed_errors = in_be32(&fec->rmon_r_drop); + + /* detailed tx_errors: */ + stats->tx_aborted_errors = 0; + stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr); + stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop) + + in_be32(&fec->ieee_t_macerr); + stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe); + stats->tx_window_errors = in_be32(&fec->ieee_t_lcol); + + return stats; +} + +static void +mpc5xxx_fec_update_stat(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct net_device_stats *stats = &priv->stats; + struct mpc5xxx_fec *fec = priv->fec; + + out_be32(&fec->mib_control, MPC5xxx_FEC_MIB_DISABLE); + memset_io(&fec->rmon_t_drop, 0, + (u32)&fec->reserved10 - (u32)&fec->rmon_t_drop); + out_be32(&fec->mib_control, 0); + memset(stats, 0, sizeof *stats); + mpc5xxx_fec_get_stats(dev); +} + +#ifdef ORIGINAL_CODE +/* + * Set or clear the multicast filter for this adaptor. + */ +static void +mpc5xxx_fec_set_multicast_list(struct rtnet_device *dev) +{ + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + struct mpc5xxx_fec *fec = priv->fec; + u32 u32_value; + + if (dev->flags & IFF_PROMISC) { + printk("%s: Promiscuous mode enabled.\n", dev->name); + u32_value = in_be32(&fec->r_cntrl); + u32_value |= MPC5xxx_FEC_RCNTRL_PROM; + out_be32(&fec->r_cntrl, u32_value); + } + else if (dev->flags & IFF_ALLMULTI) { + u32_value = in_be32(&fec->r_cntrl); + u32_value &= ~MPC5xxx_FEC_RCNTRL_PROM; + out_be32(&fec->r_cntrl, u32_value); + out_be32(&fec->gaddr1, 0xffffffff); + out_be32(&fec->gaddr2, 0xffffffff); + } + else { + u32 crc; + int i; + struct dev_mc_list *dmi; + u32 gaddr1 = 0x00000000; + u32 gaddr2 = 0x00000000; + + dmi = dev->mc_list; + for (i=0; i<dev->mc_count; i++) { + crc = ether_crc_le(6, dmi->dmi_addr) >> 26; + if (crc >= 32) + gaddr1 |= 1 << (crc-32); + else + gaddr2 |= 1 << crc; + dmi = dmi->next; + } + out_be32(&fec->gaddr1, gaddr1); + out_be32(&fec->gaddr2, gaddr2); + } +} +#endif /* ORIGINAL_CODE */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET +static void mpc5xxx_mdio_callback(uint regval, struct rtnet_device *dev, uint data) +{ + mdio_read_data_t* mrd = (mdio_read_data_t *)data; + mrd->regval = 0xFFFF & regval; + wake_up_process(mrd->sleeping_task); +} + +static int mpc5xxx_mdio_read(struct rtnet_device *dev, int phy_id, int location) +{ + uint retval; + mdio_read_data_t* mrd = (mdio_read_data_t *)kmalloc(sizeof(*mrd), + GFP_KERNEL); + + mrd->sleeping_task = current; + set_current_state(TASK_INTERRUPTIBLE); + mii_queue(dev, mk_mii_read(location), + mpc5xxx_mdio_callback, (unsigned int) mrd); + schedule(); + + retval = mrd->regval; + + kfree(mrd); + + return retval; +} +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX +static void mpc5xxx_mdio_write(struct rtnet_device *dev, int phy_id, int location, int value) +{ + mii_queue(dev, mk_mii_write(location, value), NULL, 0); +} +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET */ +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +#ifdef ORIGINAL_CODE +static int +mpc5xxx_netdev_ethtool_ioctl(struct rtnet_device *dev, void *useraddr) +{ +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX + struct mpc5xxx_fec_priv *private = (struct mpc5xxx_fec_priv *)dev->priv; +#endif + u32 ethcmd; + + if (copy_from_user(ðcmd, useraddr, sizeof ethcmd)) + return -EFAULT; + + switch (ethcmd) { + + /* Get driver info */ + case ETHTOOL_GDRVINFO:{ + struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO }; + strncpy(info.driver, "gt64260", + sizeof info.driver - 1); + strncpy(info.version, version, + sizeof info.version - 1); + if (copy_to_user(useraddr, &info, sizeof info)) + return -EFAULT; + return 0; + } + /* get settings */ +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX + case ETHTOOL_GSET:{ + struct ethtool_cmd ecmd = { ETHTOOL_GSET }; + spin_lock_irq(&private->lock); + mii_ethtool_gset(&private->mii_if, &ecmd); + spin_unlock_irq(&private->lock); + if (copy_to_user(useraddr, &ecmd, sizeof ecmd)) + return -EFAULT; + return 0; + } + /* set settings */ + case ETHTOOL_SSET:{ + int r; + struct ethtool_cmd ecmd; + if (copy_from_user(&ecmd, useraddr, sizeof ecmd)) + return -EFAULT; + spin_lock_irq(&private->lock); + r = mii_ethtool_sset(&private->mii_if, &ecmd); + spin_unlock_irq(&private->lock); + return r; + } + /* restart autonegotiation */ + case ETHTOOL_NWAY_RST:{ + return mii_nway_restart(&private->mii_if); + } + /* get link status */ + case ETHTOOL_GLINK:{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + edata.data = mii_link_ok(&private->mii_if); + if (copy_to_user(useraddr, &edata, sizeof edata)) + return -EFAULT; + return 0; + } +#endif + /* get message-level */ + case ETHTOOL_GMSGLVL:{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + edata.data = 0; /* XXX */ + if (copy_to_user(useraddr, &edata, sizeof edata)) + return -EFAULT; + return 0; + } + /* set message-level */ + case ETHTOOL_SMSGLVL:{ + struct ethtool_value edata; + if (copy_from_user(&edata, useraddr, sizeof edata)) + return -EFAULT; +/* debug = edata.data; *//* XXX */ + return 0; + } + } + return -EOPNOTSUPP; +} + +static int +mpc5xxx_fec_ioctl(struct rtnet_device *dev, struct ifreq *rq, int cmd) +{ +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX + struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data; + int phy = dev->base_addr & 0x1f; +#endif + int retval; + + switch (cmd) { + case SIOCETHTOOL: + retval = mpc5xxx_netdev_ethtool_ioctl( + dev, (void *) rq->ifr_data); + break; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */ + data->phy_id = phy; + /* Fall through */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + case SIOCDEVPRIVATE + 1: /* for binary compat, remove in 2.5 */ + data->val_out = + mpc5xxx_mdio_read(dev, data->phy_id&0x1f, + data->reg_num&0x1f); + retval = 0; + break; + + case SIOCSMIIREG: /* Write MII PHY register. */ + case SIOCDEVPRIVATE + 2: /* for binary compat, remove in 2.5 */ + if (!capable(CAP_NET_ADMIN)) { + retval = -EPERM; + } else { + mpc5xxx_mdio_write(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + retval = 0; + } + break; +#endif + + default: + retval = -EOPNOTSUPP; + break; + } + return retval; +} + +static void __init +mpc5xxx_fec_str2mac(char *str, unsigned char *mac) +{ + int i; + u64 val64; + + val64 = simple_strtoull(str, NULL, 16); + + for (i = 0; i < 6; i++) + mac[5-i] = val64 >> (i*8); +} + +static int __init +mpc5xxx_fec_mac_setup(char *mac_address) +{ + mpc5xxx_fec_str2mac(mac_address, mpc5xxx_fec_mac_addr); + return 0; +} + +__setup("mpc5xxx_mac=", mpc5xxx_fec_mac_setup); +#endif /* ORIGINAL_CODE */ + +static int __init +mpc5xxx_fec_init(void) +{ + struct mpc5xxx_fec *fec; + struct rtnet_device *dev; + struct mpc5xxx_fec_priv *priv; + int err = 0; + +#if MPC5xxx_FEC_DEBUG > 1 + printk("mpc5xxx_fec_init\n"); +#endif + + if (!rx_pool_size) + rx_pool_size = MPC5xxx_FEC_RBD_NUM * 2; + + dev = rt_alloc_etherdev(sizeof(*priv), rx_pool_size + MPC5xxx_FEC_TBD_NUM); + if (!dev) + return -EIO; + rtdev_alloc_name(dev, "rteth%d"); + memset(dev->priv, 0, sizeof(*priv)); + rt_rtdev_connect(dev, &RTDEV_manager); + dev->vers = RTDEV_VERS_2_0; + + + mpc5xxx_fec_dev = dev; + priv = (struct mpc5xxx_fec_priv *)dev->priv; +#if MPC5xxx_FEC_DEBUG > 1 + printk("fec_priv %08x\n", (u32)priv); +#endif + priv->fec = fec = (struct mpc5xxx_fec *)MPC5xxx_FEC; + priv->gpio = (struct mpc5xxx_gpio *)MPC5xxx_GPIO; + priv->sdma = (struct mpc5xxx_sdma *)MPC5xxx_SDMA; + + rtdm_lock_init(&priv->lock); + dev->open = mpc5xxx_fec_open; + dev->stop = mpc5xxx_fec_close; + dev->hard_start_xmit = mpc5xxx_fec_hard_start_xmit; + //FIXME dev->hard_header = &rt_eth_header; + dev->get_stats = mpc5xxx_fec_get_stats; +#ifdef ORIGINAL_CODE + dev->do_ioctl = mpc5xxx_fec_ioctl; + dev->set_mac_address = mpc5xxx_fec_set_mac_address; + dev->set_multicast_list = mpc5xxx_fec_set_multicast_list; + + dev->tx_timeout = mpc5xxx_fec_tx_timeout; + dev->watchdog_timeo = MPC5xxx_FEC_WATCHDOG_TIMEOUT; +#endif /* ORIGINAL_CODE */ + dev->flags &= ~IFF_RUNNING; + + if ((err = rt_register_rtnetdev(dev))) + goto abort; + +#ifdef CONFIG_XENO_DRIVERS_NET_FASTROUTE + dev->accept_fastpath = mpc5xxx_fec_accept_fastpath; +#endif + if (memcmp(mpc5xxx_fec_mac_addr, null_mac, 6) != 0) + memcpy(dev->dev_addr, mpc5xxx_fec_mac_addr, 6); + else { + *(u32 *)&dev->dev_addr[0] = in_be32(&fec->paddr1); + *(u16 *)&dev->dev_addr[4] = in_be16((u16*)&fec->paddr2); + } + + /* + * Read MIB counters in order to reset them, + * then zero all the stats fields in memory + */ + mpc5xxx_fec_update_stat(dev); + + return 0; + +abort: + rtdev_free(dev); + + return err; +} + +static void __exit +mpc5xxx_fec_uninit(void) +{ + struct rtnet_device *dev = mpc5xxx_fec_dev; + struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv; + + rt_stack_disconnect(dev); + rt_unregister_rtnetdev(dev); + rt_rtdev_disconnect(dev); + printk("%s: unloaded\n", dev->name); + rtdev_free(dev); + dev->priv = NULL; +} + +static int __init +mpc5xxx_fec_module_init(void) +{ + return mpc5xxx_fec_init(); +} + +static void __exit +mpc5xxx_fec_module_exit(void) +{ + mpc5xxx_fec_uninit(); +} + +module_init(mpc5xxx_fec_module_init); +module_exit(mpc5xxx_fec_module_exit); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h new file mode 100644 index 0000000..db21607 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h @@ -0,0 +1,428 @@ +/* + * arch/ppc/5xxx_io/fec.h + * + * Header file for the MPC5xxx Fast Ethernet Controller driver + * + * Author: Dale Farnsworth <dfarnsworth@mvista.com> + * + * Copyright 2003 MontaVista Software + * + * 2003 (c) MontaVista, Software, Inc. This file is licensed under the terms + * of the GNU General Public License version 2. This program is licensed + * "as is" without any warranty of any kind, whether express or implied. + */ + +#ifndef __RT_MPC52XX_FEC_H_ +#define __RT_MPC52XX_FEC_H_ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/skbuff.h> +#include <asm/mpc5xxx.h> +#include <bestcomm_api.h> + +/* Define board specific options */ +#define CONFIG_XENO_DRIVERS_NET_USE_MDIO +#define CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY +#define CONFIG_XENO_DRIVERS_NET_FEC_LXT971 +#undef CONFIG_XENO_DRIVERS_NET_FEC_DP83847 + +/* Tunable constants */ +#define MPC5xxx_FEC_RECV_BUFFER_SIZE 1518 /* max receive packet size */ +#define MPC5xxx_FEC_RECV_BUFFER_SIZE_BC 2048 /* max receive packet size */ +#define MPC5xxx_FEC_TBD_NUM 256 /* max transmit packets */ +#define MPC5xxx_FEC_RBD_NUM 256 /* max receive packets */ + +struct mpc5xxx_fec { + volatile u32 fec_id; /* FEC + 0x000 */ + volatile u32 ievent; /* FEC + 0x004 */ + volatile u32 imask; /* FEC + 0x008 */ + + volatile u32 reserved0[1]; /* FEC + 0x00C */ + volatile u32 r_des_active; /* FEC + 0x010 */ + volatile u32 x_des_active; /* FEC + 0x014 */ + volatile u32 r_des_active_cl; /* FEC + 0x018 */ + volatile u32 x_des_active_cl; /* FEC + 0x01C */ + volatile u32 ivent_set; /* FEC + 0x020 */ + volatile u32 ecntrl; /* FEC + 0x024 */ + + volatile u32 reserved1[6]; /* FEC + 0x028-03C */ + volatile u32 mii_data; /* FEC + 0x040 */ + volatile u32 mii_speed; /* FEC + 0x044 */ + volatile u32 mii_status; /* FEC + 0x048 */ + + volatile u32 reserved2[5]; /* FEC + 0x04C-05C */ + volatile u32 mib_data; /* FEC + 0x060 */ + volatile u32 mib_control; /* FEC + 0x064 */ + + volatile u32 reserved3[6]; /* FEC + 0x068-7C */ + volatile u32 r_activate; /* FEC + 0x080 */ + volatile u32 r_cntrl; /* FEC + 0x084 */ + volatile u32 r_hash; /* FEC + 0x088 */ + volatile u32 r_data; /* FEC + 0x08C */ + volatile u32 ar_done; /* FEC + 0x090 */ + volatile u32 r_test; /* FEC + 0x094 */ + volatile u32 r_mib; /* FEC + 0x098 */ + volatile u32 r_da_low; /* FEC + 0x09C */ + volatile u32 r_da_high; /* FEC + 0x0A0 */ + + volatile u32 reserved4[7]; /* FEC + 0x0A4-0BC */ + volatile u32 x_activate; /* FEC + 0x0C0 */ + volatile u32 x_cntrl; /* FEC + 0x0C4 */ + volatile u32 backoff; /* FEC + 0x0C8 */ + volatile u32 x_data; /* FEC + 0x0CC */ + volatile u32 x_status; /* FEC + 0x0D0 */ + volatile u32 x_mib; /* FEC + 0x0D4 */ + volatile u32 x_test; /* FEC + 0x0D8 */ + volatile u32 fdxfc_da1; /* FEC + 0x0DC */ + volatile u32 fdxfc_da2; /* FEC + 0x0E0 */ + volatile u32 paddr1; /* FEC + 0x0E4 */ + volatile u32 paddr2; /* FEC + 0x0E8 */ + volatile u32 op_pause; /* FEC + 0x0EC */ + + volatile u32 reserved5[4]; /* FEC + 0x0F0-0FC */ + volatile u32 instr_reg; /* FEC + 0x100 */ + volatile u32 context_reg; /* FEC + 0x104 */ + volatile u32 test_cntrl; /* FEC + 0x108 */ + volatile u32 acc_reg; /* FEC + 0x10C */ + volatile u32 ones; /* FEC + 0x110 */ + volatile u32 zeros; /* FEC + 0x114 */ + volatile u32 iaddr1; /* FEC + 0x118 */ + volatile u32 iaddr2; /* FEC + 0x11C */ + volatile u32 gaddr1; /* FEC + 0x120 */ + volatile u32 gaddr2; /* FEC + 0x124 */ + volatile u32 random; /* FEC + 0x128 */ + volatile u32 rand1; /* FEC + 0x12C */ + volatile u32 tmp; /* FEC + 0x130 */ + + volatile u32 reserved6[3]; /* FEC + 0x134-13C */ + volatile u32 fifo_id; /* FEC + 0x140 */ + volatile u32 x_wmrk; /* FEC + 0x144 */ + volatile u32 fcntrl; /* FEC + 0x148 */ + volatile u32 r_bound; /* FEC + 0x14C */ + volatile u32 r_fstart; /* FEC + 0x150 */ + volatile u32 r_count; /* FEC + 0x154 */ + volatile u32 r_lag; /* FEC + 0x158 */ + volatile u32 r_read; /* FEC + 0x15C */ + volatile u32 r_write; /* FEC + 0x160 */ + volatile u32 x_count; /* FEC + 0x164 */ + volatile u32 x_lag; /* FEC + 0x168 */ + volatile u32 x_retry; /* FEC + 0x16C */ + volatile u32 x_write; /* FEC + 0x170 */ + volatile u32 x_read; /* FEC + 0x174 */ + + volatile u32 reserved7[2]; /* FEC + 0x178-17C */ + volatile u32 fm_cntrl; /* FEC + 0x180 */ + volatile u32 rfifo_data; /* FEC + 0x184 */ + volatile u32 rfifo_status; /* FEC + 0x188 */ + volatile u32 rfifo_cntrl; /* FEC + 0x18C */ + volatile u32 rfifo_lrf_ptr; /* FEC + 0x190 */ + volatile u32 rfifo_lwf_ptr; /* FEC + 0x194 */ + volatile u32 rfifo_alarm; /* FEC + 0x198 */ + volatile u32 rfifo_rdptr; /* FEC + 0x19C */ + volatile u32 rfifo_wrptr; /* FEC + 0x1A0 */ + volatile u32 tfifo_data; /* FEC + 0x1A4 */ + volatile u32 tfifo_status; /* FEC + 0x1A8 */ + volatile u32 tfifo_cntrl; /* FEC + 0x1AC */ + volatile u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */ + volatile u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */ + volatile u32 tfifo_alarm; /* FEC + 0x1B8 */ + volatile u32 tfifo_rdptr; /* FEC + 0x1BC */ + volatile u32 tfifo_wrptr; /* FEC + 0x1C0 */ + + volatile u32 reset_cntrl; /* FEC + 0x1C4 */ + volatile u32 xmit_fsm; /* FEC + 0x1C8 */ + + volatile u32 reserved8[3]; /* FEC + 0x1CC-1D4 */ + volatile u32 rdes_data0; /* FEC + 0x1D8 */ + volatile u32 rdes_data1; /* FEC + 0x1DC */ + volatile u32 r_length; /* FEC + 0x1E0 */ + volatile u32 x_length; /* FEC + 0x1E4 */ + volatile u32 x_addr; /* FEC + 0x1E8 */ + volatile u32 cdes_data; /* FEC + 0x1EC */ + volatile u32 status; /* FEC + 0x1F0 */ + volatile u32 dma_control; /* FEC + 0x1F4 */ + volatile u32 des_cmnd; /* FEC + 0x1F8 */ + volatile u32 data; /* FEC + 0x1FC */ + + volatile u32 rmon_t_drop; /* FEC + 0x200 */ + volatile u32 rmon_t_packets; /* FEC + 0x204 */ + volatile u32 rmon_t_bc_pkt; /* FEC + 0x208 */ + volatile u32 rmon_t_mc_pkt; /* FEC + 0x20C */ + volatile u32 rmon_t_crc_align; /* FEC + 0x210 */ + volatile u32 rmon_t_undersize; /* FEC + 0x214 */ + volatile u32 rmon_t_oversize; /* FEC + 0x218 */ + volatile u32 rmon_t_frag; /* FEC + 0x21C */ + volatile u32 rmon_t_jab; /* FEC + 0x220 */ + volatile u32 rmon_t_col; /* FEC + 0x224 */ + volatile u32 rmon_t_p64; /* FEC + 0x228 */ + volatile u32 rmon_t_p65to127; /* FEC + 0x22C */ + volatile u32 rmon_t_p128to255; /* FEC + 0x230 */ + volatile u32 rmon_t_p256to511; /* FEC + 0x234 */ + volatile u32 rmon_t_p512to1023; /* FEC + 0x238 */ + volatile u32 rmon_t_p1024to2047; /* FEC + 0x23C */ + volatile u32 rmon_t_p_gte2048; /* FEC + 0x240 */ + volatile u32 rmon_t_octets; /* FEC + 0x244 */ + volatile u32 ieee_t_drop; /* FEC + 0x248 */ + volatile u32 ieee_t_frame_ok; /* FEC + 0x24C */ + volatile u32 ieee_t_1col; /* FEC + 0x250 */ + volatile u32 ieee_t_mcol; /* FEC + 0x254 */ + volatile u32 ieee_t_def; /* FEC + 0x258 */ + volatile u32 ieee_t_lcol; /* FEC + 0x25C */ + volatile u32 ieee_t_excol; /* FEC + 0x260 */ + volatile u32 ieee_t_macerr; /* FEC + 0x264 */ + volatile u32 ieee_t_cserr; /* FEC + 0x268 */ + volatile u32 ieee_t_sqe; /* FEC + 0x26C */ + volatile u32 t_fdxfc; /* FEC + 0x270 */ + volatile u32 ieee_t_octets_ok; /* FEC + 0x274 */ + + volatile u32 reserved9[2]; /* FEC + 0x278-27C */ + volatile u32 rmon_r_drop; /* FEC + 0x280 */ + volatile u32 rmon_r_packets; /* FEC + 0x284 */ + volatile u32 rmon_r_bc_pkt; /* FEC + 0x288 */ + volatile u32 rmon_r_mc_pkt; /* FEC + 0x28C */ + volatile u32 rmon_r_crc_align; /* FEC + 0x290 */ + volatile u32 rmon_r_undersize; /* FEC + 0x294 */ + volatile u32 rmon_r_oversize; /* FEC + 0x298 */ + volatile u32 rmon_r_frag; /* FEC + 0x29C */ + volatile u32 rmon_r_jab; /* FEC + 0x2A0 */ + + volatile u32 rmon_r_resvd_0; /* FEC + 0x2A4 */ + + volatile u32 rmon_r_p64; /* FEC + 0x2A8 */ + volatile u32 rmon_r_p65to127; /* FEC + 0x2AC */ + volatile u32 rmon_r_p128to255; /* FEC + 0x2B0 */ + volatile u32 rmon_r_p256to511; /* FEC + 0x2B4 */ + volatile u32 rmon_r_p512to1023; /* FEC + 0x2B8 */ + volatile u32 rmon_r_p1024to2047; /* FEC + 0x2BC */ + volatile u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */ + volatile u32 rmon_r_octets; /* FEC + 0x2C4 */ + volatile u32 ieee_r_drop; /* FEC + 0x2C8 */ + volatile u32 ieee_r_frame_ok; /* FEC + 0x2CC */ + volatile u32 ieee_r_crc; /* FEC + 0x2D0 */ + volatile u32 ieee_r_align; /* FEC + 0x2D4 */ + volatile u32 r_macerr; /* FEC + 0x2D8 */ + volatile u32 r_fdxfc; /* FEC + 0x2DC */ + volatile u32 ieee_r_octets_ok; /* FEC + 0x2E0 */ + + volatile u32 reserved10[6]; /* FEC + 0x2E4-2FC */ + + volatile u32 reserved11[64]; /* FEC + 0x300-3FF */ +}; + +#define MPC5xxx_FEC_MIB_DISABLE 0x80000000 + +#define MPC5xxx_FEC_IEVENT_HBERR 0x80000000 +#define MPC5xxx_FEC_IEVENT_BABR 0x40000000 +#define MPC5xxx_FEC_IEVENT_BABT 0x20000000 +#define MPC5xxx_FEC_IEVENT_GRA 0x10000000 +#define MPC5xxx_FEC_IEVENT_TFINT 0x08000000 +#define MPC5xxx_FEC_IEVENT_MII 0x00800000 +#define MPC5xxx_FEC_IEVENT_LATE_COL 0x00200000 +#define MPC5xxx_FEC_IEVENT_COL_RETRY_LIM 0x00100000 +#define MPC5xxx_FEC_IEVENT_XFIFO_UN 0x00080000 +#define MPC5xxx_FEC_IEVENT_XFIFO_ERROR 0x00040000 +#define MPC5xxx_FEC_IEVENT_RFIFO_ERROR 0x00020000 + +#define MPC5xxx_FEC_IMASK_HBERR 0x80000000 +#define MPC5xxx_FEC_IMASK_BABR 0x40000000 +#define MPC5xxx_FEC_IMASK_BABT 0x20000000 +#define MPC5xxx_FEC_IMASK_GRA 0x10000000 +#define MPC5xxx_FEC_IMASK_MII 0x00800000 +#define MPC5xxx_FEC_IMASK_LATE_COL 0x00200000 +#define MPC5xxx_FEC_IMASK_COL_RETRY_LIM 0x00100000 +#define MPC5xxx_FEC_IMASK_XFIFO_UN 0x00080000 +#define MPC5xxx_FEC_IMASK_XFIFO_ERROR 0x00040000 +#define MPC5xxx_FEC_IMASK_RFIFO_ERROR 0x00020000 + +#define MPC5xxx_FEC_RCNTRL_MAX_FL_SHIFT 16 +#define MPC5xxx_FEC_RCNTRL_LOOP 0x01 +#define MPC5xxx_FEC_RCNTRL_DRT 0x02 +#define MPC5xxx_FEC_RCNTRL_MII_MODE 0x04 +#define MPC5xxx_FEC_RCNTRL_PROM 0x08 +#define MPC5xxx_FEC_RCNTRL_BC_REJ 0x10 +#define MPC5xxx_FEC_RCNTRL_FCE 0x20 + +#define MPC5xxx_FEC_TCNTRL_GTS 0x00000001 +#define MPC5xxx_FEC_TCNTRL_HBC 0x00000002 +#define MPC5xxx_FEC_TCNTRL_FDEN 0x00000004 +#define MPC5xxx_FEC_TCNTRL_TFC_PAUSE 0x00000008 +#define MPC5xxx_FEC_TCNTRL_RFC_PAUSE 0x00000010 + +#define MPC5xxx_FEC_ECNTRL_RESET 0x00000001 +#define MPC5xxx_FEC_ECNTRL_ETHER_EN 0x00000002 + +#define MPC5xxx_FEC_RESET_DELAY 50 /* uS */ + + +/* Receive & Transmit Buffer Descriptor definitions */ +struct mpc5xxx_fec_bd { + volatile u32 status; + volatile u32 data; +}; + +/* Receive data buffer format */ +struct mpc5xxx_rbuf { + u8 data[MPC5xxx_FEC_RECV_BUFFER_SIZE_BC]; +}; + +struct fec_queue { + volatile struct mpc5xxx_fec_bd *bd_base; + struct rtskb **skb_base; + u16 last_index; + u16 start_index; + u16 finish_index; +}; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | ADVERTISE_10HALF | \ + ADVERTISE_CSMA) + +#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | ADVERTISE_10FULL | \ + MII_ADVERTISE_HALF) +#ifdef PHY_INTERRUPT +#define MII_ADVERTISE_DEFAULT MII_ADVERTISE_ALL +#else +#define MII_ADVERTISE_DEFAULT MII_ADVERTISE_HALF +#endif + +typedef struct { + uint mii_data; + void (*funct)(uint mii_reg, struct rtnet_device *dev, uint data); +} phy_cmd_t; + +typedef struct { + uint id; + char *name; + + const phy_cmd_t *config; + const phy_cmd_t *startup; + const phy_cmd_t *ack_int; + const phy_cmd_t *shutdown; +} phy_info_t; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +struct mpc5xxx_fec_priv { + int full_duplex; + int tx_full; + int r_tasknum; + int t_tasknum; + int r_irq; + int t_irq; + rtdm_irq_t irq_handle; + rtdm_irq_t r_irq_handle; + rtdm_irq_t t_irq_handle; + u32 last_transmit_time; + u32 last_receive_time; + struct mpc5xxx_fec *fec; + struct mpc5xxx_sram_fec *sram; + struct mpc5xxx_gpio *gpio; + struct mpc5xxx_sdma *sdma; + struct fec_queue r_queue; + struct rtskb *rskb[MPC5xxx_FEC_RBD_NUM]; + struct fec_queue t_queue; + struct rtskb *tskb[MPC5xxx_FEC_TBD_NUM]; + rtdm_lock_t lock; + unsigned long open_time; + struct net_device_stats stats; +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + uint phy_id; + uint phy_id_done; + uint phy_status; + uint phy_speed; + phy_info_t *phy; + struct tq_struct phy_task; + volatile uint sequence_done; + uint link; + uint phy_addr; + + struct tq_struct link_up_task; + int duplex_change; + int link_up; + + struct timer_list phy_timer_list; + u16 old_status; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ +}; + +struct mpc5xxx_sram_fec { + volatile struct mpc5xxx_fec_bd tbd[MPC5xxx_FEC_TBD_NUM]; + volatile struct mpc5xxx_fec_bd rbd[MPC5xxx_FEC_RBD_NUM]; +}; + +#define MPC5xxx_FEC_RBD_READY 0x40000000 +#define MPC5xxx_FEC_RBD_RFD 0x08000000 /* receive frame done */ + +#define MPC5xxx_FEC_RBD_INIT MPC5xxx_FEC_RBD_READY + +#define MPC5xxx_FEC_TBD_READY 0x40000000 +#define MPC5xxx_FEC_TBD_TFD 0x08000000 /* transmit frame done */ +#define MPC5xxx_FEC_TBD_INT 0x04000000 /* Interrupt */ + +#define MPC5xxx_FEC_TBD_INIT (MPC5xxx_FEC_TBD_INT | MPC5xxx_FEC_TBD_TFD | \ + MPC5xxx_FEC_TBD_READY) + + + +/* MII-related definitions */ +#define MPC5xxx_FEC_MII_DATA_ST 0x40000000 /* Start frame */ +#define MPC5xxx_FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */ +#define MPC5xxx_FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */ +#define MPC5xxx_FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */ +#define MPC5xxx_FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */ +#define MPC5xxx_FEC_MII_DATA_TA 0x00020000 /* Turnaround */ +#define MPC5xxx_FEC_MII_DATA_DATAMSK 0x00000fff /* PHY data mask */ + +#define MPC5xxx_FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */ +#define MPC5xxx_FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */ + +#define MPC5xxx_FEC_MII_SPEED (5 * 2) + +const char mpc5xxx_fec_name[] = "eth0"; + +struct mibCounters { + unsigned int byteReceived; + unsigned int byteSent; + unsigned int framesReceived; + unsigned int framesSent; + unsigned int totalByteReceived; + unsigned int totalFramesReceived; + unsigned int broadcastFramesReceived; + unsigned int multicastFramesReceived; + unsigned int cRCError; + unsigned int oversizeFrames; + unsigned int fragments; + unsigned int jabber; + unsigned int collision; + unsigned int lateCollision; + unsigned int frames64; + unsigned int frames65_127; + unsigned int frames128_255; + unsigned int frames256_511; + unsigned int frames512_1023; + unsigned int frames1024_MaxSize; + unsigned int macRxError; + unsigned int droppedFrames; + unsigned int outMulticastFrames; + unsigned int outBroadcastFrames; + unsigned int undersizeFrames; +}; + +#define MPC5xxx_FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) + + +#define MPC5xxx_FEC_FRAME_LAST 0x08000000 /* Last */ +#define MPC5xxx_FEC_FRAME_M 0x01000000 /* M? */ +#define MPC5xxx_FEC_FRAME_BC 0x00800000 /* Broadcast */ +#define MPC5xxx_FEC_FRAME_MC 0x00400000 /* Multicast */ +#define MPC5xxx_FEC_FRAME_LG 0x00200000 /* Length error */ +#define MPC5xxx_FEC_FRAME_NO 0x00100000 /* Non-octet aligned frame error */ +#define MPC5xxx_FEC_FRAME_CR 0x00040000 /* CRC frame error */ +#define MPC5xxx_FEC_FRAME_OV 0x00020000 /* Overrun error */ +#define MPC5xxx_FEC_FRAME_TR 0x00010000 /* Truncated error */ + + + +#endif /* __RT_MPC52XX_FEC_H_ */ diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c new file mode 100644 index 0000000..5167dd7 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c @@ -0,0 +1,2235 @@ +/* + * Fast Ethernet Controller (FCC) driver for Motorola MPC8260. + * Copyright (c) 2000 MontaVista Software, Inc. Dan Malek (dmalek@jlc.net) + * + * This version of the driver is a combination of the 8xx fec and + * 8260 SCC Ethernet drivers. This version has some additional + * configuration options, which should probably be moved out of + * here. This driver currently works for the EST SBC8260, + * SBS Diablo/BCM, Embedded Planet RPX6, TQM8260, and others. + * + * Right now, I am very watseful with the buffers. I allocate memory + * pages and then divide them into 2K frame buffers. This way I know I + * have buffers large enough to hold one frame within one buffer descriptor. + * Once I get this working, I will use 64 or 128 byte CPM buffers, which + * will be much more memory efficient and will easily handle lots of + * small packets. Since this is a cache coherent processor and CPM, + * I could also preallocate SKB's and use them directly on the interface. + * + * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/8260_io/fcc_enet.c". + * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de) + */ + +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/immap_8260.h> +#include <asm/pgtable.h> +#include <asm/mpc8260.h> +#include <asm/irq.h> +#include <asm/bitops.h> +#include <asm/cpm_8260.h> + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +#error "MDIO for PHY configuration is not yet supported!" +#endif + +#include <rtnet_port.h> + +MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>"); +MODULE_DESCRIPTION("RTnet driver for the MPC8260 FCC Ethernet"); +MODULE_LICENSE("GPL"); + +static unsigned int rx_pool_size = 0; +MODULE_PARM(rx_pool_size, "i"); +MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size"); + +static unsigned int rtnet_fcc = 1; +MODULE_PARM(rtnet_fcc, "i"); +MODULE_PARM_DESC(rtnet_fcc, "FCCx port for RTnet (default=1)"); + +#define RT_DEBUG(fmt,args...) + +/* The transmitter timeout + */ +#define TX_TIMEOUT (2*HZ) + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +/* Forward declarations of some structures to support different PHYs */ + +typedef struct { + uint mii_data; + void (*funct)(uint mii_reg, struct net_device *dev); +} phy_cmd_t; + +typedef struct { + uint id; + char *name; + + const phy_cmd_t *config; + const phy_cmd_t *startup; + const phy_cmd_t *ack_int; + const phy_cmd_t *shutdown; +} phy_info_t; + +/* Register definitions for the PHY. */ + +#define MII_REG_CR 0 /* Control Register */ +#define MII_REG_SR 1 /* Status Register */ +#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ +#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ +#define MII_REG_ANAR 4 /* A-N Advertisement Register */ +#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ +#define MII_REG_ANER 6 /* A-N Expansion Register */ +#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ +#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ + +/* values for phy_status */ + +#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ +#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ +#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ +#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ +#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ +#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ +#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ + +#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ +#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ +#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ +#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ +#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ +#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ +#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ +#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it is best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ +#define FCC_ENET_RX_PAGES 16 +#define FCC_ENET_RX_FRSIZE 2048 +#define FCC_ENET_RX_FRPPG (PAGE_SIZE / FCC_ENET_RX_FRSIZE) +#define RX_RING_SIZE (FCC_ENET_RX_FRPPG * FCC_ENET_RX_PAGES) +#define TX_RING_SIZE 16 /* Must be power of two */ +#define TX_RING_MOD_MASK 15 /* for this to work */ + +/* The FCC stores dest/src/type, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1518 +#define PKT_MINBUF_SIZE 64 + +/* Maximum input DMA size. Must be a should(?) be a multiple of 4. +*/ +#define PKT_MAXDMA_SIZE 1520 + +/* Maximum input buffer size. Must be a multiple of 32. +*/ +#define PKT_MAXBLR_SIZE 1536 + +static int fcc_enet_open(struct rtnet_device *rtev); +static int fcc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev); +static int fcc_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp); +static int fcc_enet_interrupt(rtdm_irq_t *irq_handle); +static int fcc_enet_close(struct rtnet_device *dev); + +static struct net_device_stats *fcc_enet_get_stats(struct rtnet_device *rtdev); +#ifdef ORIGINAL_VERSION +static void set_multicast_list(struct net_device *dev); +static int fcc_enet_set_mac_address(struct net_device *dev, void *addr); +#endif /* ORIGINAL_VERSION */ + +static void fcc_restart(struct rtnet_device *rtdev, int duplex); + +/* These will be configurable for the FCC choice. + * Multiple ports can be configured. There is little choice among the + * I/O pins to the PHY, except the clocks. We will need some board + * dependent clock selection. + * Why in the hell did I put these inside #ifdef's? I dunno, maybe to + * help show what pins are used for each device. + */ + +/* I/O Pin assignment for FCC1. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PA1_COL ((uint)0x00000001) +#define PA1_CRS ((uint)0x00000002) +#define PA1_TXER ((uint)0x00000004) +#define PA1_TXEN ((uint)0x00000008) +#define PA1_RXDV ((uint)0x00000010) +#define PA1_RXER ((uint)0x00000020) +#define PA1_TXDAT ((uint)0x00003c00) +#define PA1_RXDAT ((uint)0x0003c000) +#define PA1_PSORA0 (PA1_RXDAT | PA1_TXDAT) +#define PA1_PSORA1 (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \ + PA1_RXDV | PA1_RXER) +#define PA1_DIRA0 (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV) +#define PA1_DIRA1 (PA1_TXDAT | PA1_TXEN | PA1_TXER) + +/* CLK12 is receive, CLK11 is transmit. These are board specific. +*/ +#define PC_F1RXCLK ((uint)0x00000800) +#define PC_F1TXCLK ((uint)0x00000400) +#if defined(CONFIG_PM826) +#ifndef CONFIG_RTAI_RTNET_DB_CR826_J30x_ON +#define CMX1_CLK_ROUTE ((uint)0x35000000) +#define CMX1_CLK_MASK ((uint)0x7f000000) +#else +#define CMX1_CLK_ROUTE ((uint)0x37000000) +#define CMX1_CLK_MASK ((uint)0x7f000000) +#endif +#elif defined(CONFIG_CPU86) +#define CMX1_CLK_ROUTE ((uint)0x37000000) +#define CMX1_CLK_MASK ((uint)0x7f000000) +#else +#define CMX1_CLK_ROUTE ((uint)0x3e000000) +#define CMX1_CLK_MASK ((uint)0xff000000) +#endif /* CONFIG_PM826 */ + +/* I/O Pin assignment for FCC2. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PB2_TXER ((uint)0x00000001) +#define PB2_RXDV ((uint)0x00000002) +#define PB2_TXEN ((uint)0x00000004) +#define PB2_RXER ((uint)0x00000008) +#define PB2_COL ((uint)0x00000010) +#define PB2_CRS ((uint)0x00000020) +#define PB2_TXDAT ((uint)0x000003c0) +#define PB2_RXDAT ((uint)0x00003c00) +#define PB2_PSORB0 (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \ + PB2_RXER | PB2_RXDV | PB2_TXER) +#define PB2_PSORB1 (PB2_TXEN) +#define PB2_DIRB0 (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV) +#define PB2_DIRB1 (PB2_TXDAT | PB2_TXEN | PB2_TXER) + +/* CLK13 is receive, CLK14 is transmit. These are board dependent. +*/ +#define PC_F2RXCLK ((uint)0x00001000) +#define PC_F2TXCLK ((uint)0x00002000) +#define CMX2_CLK_ROUTE ((uint)0x00250000) +#define CMX2_CLK_MASK ((uint)0x00ff0000) + +/* I/O Pin assignment for FCC3. I don't yet know the best way to do this, + * but there is little variation among the choices. + */ +#define PB3_RXDV ((uint)0x00004000) +#define PB3_RXER ((uint)0x00008000) +#define PB3_TXER ((uint)0x00010000) +#define PB3_TXEN ((uint)0x00020000) +#define PB3_COL ((uint)0x00040000) +#define PB3_CRS ((uint)0x00080000) +#define PB3_TXDAT ((uint)0x0f000000) +#define PB3_RXDAT ((uint)0x00f00000) +#define PB3_PSORB0 (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \ + PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN) +#define PB3_PSORB1 (0) +#define PB3_DIRB0 (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV) +#define PB3_DIRB1 (PB3_TXDAT | PB3_TXEN | PB3_TXER) + +/* CLK15 is receive, CLK16 is transmit. These are board dependent. +*/ +#ifdef CONFIG_IPHASE4539 +#define PC_F3RXCLK ((uint)0x00002000) /* CLK 14 is receive */ +#define PC_F3TXCLK ((uint)0x00008000) /* CLK 16 is transmit */ +#define CMX3_CLK_ROUTE ((uint)0x00002f00) +#define CMX3_CLK_MASK ((uint)0x00007f00) +#else +#define PC_F3RXCLK ((uint)0x00004000) +#define PC_F3TXCLK ((uint)0x00008000) +#define CMX3_CLK_ROUTE ((uint)0x00003700) +#define CMX3_CLK_MASK ((uint)0x0000ff00) +#endif + +/* MII status/control serial interface. +*/ +#define IOP_PORT_OFF(f) ((uint)(&((iop8260_t *)0)->iop_p##f)) +#define IOP_PORT(x) IOP_PORT_OFF(dir##x) + +#define IOP_DIR(b,p) *((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(dira)-IOP_PORT_OFF(dira)))) +#define IOP_PAR(b,p) *((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(para)-IOP_PORT_OFF(dira)))) +#define IOP_SOR(b,p) *((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(sora)-IOP_PORT_OFF(dira)))) +#define IOP_ODR(b,p) *((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(odra)-IOP_PORT_OFF(dira)))) +#define IOP_DAT(b,p) *((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(data)-IOP_PORT_OFF(dira)))) + +#if defined(CONFIG_TQM8260) +/* TQM8260 has MDIO and MDCK on PC30 and PC31 respectively */ +#define MII_MDIO ((uint)0x00000002) +#define MII_MDCK ((uint)0x00000001) +#elif defined (CONFIG_PM826) +#ifndef CONFIG_RTAI_RTNET_DB_CR826_J30x_ON +#define MII_MDIO ((uint)0x00000080) /* MDIO on PC24 */ +#define MII_MDCK ((uint)0x00000100) /* MDCK on PC23 */ +#else +#define MII_MDIO ((uint)0x00000100) /* MDIO on PA23 */ +#define MII_MDCK ((uint)0x00000200) /* MDCK on PA22 */ +#define MII_PORT IOP_PORT(a) +#endif /* CONFIG_RTAI_RTNET_DB_CR826_J30x_ON */ +#elif defined (CONFIG_IPHASE4539) +#define MII_MDIO ((uint)0x00000080) /* MDIO on PC24 */ +#define MII_MDCK ((uint)0x00000100) /* MDCK on PC23 */ +#else +#define MII_MDIO ((uint)0x00000004) +#define MII_MDCK ((uint)0x00000100) +#endif + +# if defined(CONFIG_TQM8260) +#define MII_MDIO2 MII_MDIO +#define MII_MDCK2 MII_MDCK +#elif defined(CONFIG_EST8260) || defined(CONFIG_ADS8260) +#define MII_MDIO2 ((uint)0x00400000) +#define MII_MDCK2 ((uint)0x00200000) +#elif defined(CONFIG_PM826) +#define MII_MDIO2 ((uint)0x00000040) /* MDIO on PA25 */ +#define MII_MDCK2 ((uint)0x00000080) /* MDCK on PA24 */ +#define MII_PORT2 IOP_PORT(a) +#else +#define MII_MDIO2 ((uint)0x00000002) +#define MII_MDCK2 ((uint)0x00000080) +#endif + +# if defined(CONFIG_TQM8260) +#define MII_MDIO3 MII_MDIO +#define MII_MDCK3 MII_MDCK +#else +#define MII_MDIO3 ((uint)0x00000001) +#define MII_MDCK3 ((uint)0x00000040) +#endif + +#ifndef MII_PORT +#define MII_PORT IOP_PORT(c) +#endif + +#ifndef MII_PORT2 +#define MII_PORT2 IOP_PORT(c) +#endif + +#ifndef MII_PORT3 +#define MII_PORT3 IOP_PORT(c) +#endif + +/* A table of information for supporting FCCs. This does two things. + * First, we know how many FCCs we have and they are always externally + * numbered from zero. Second, it holds control register and I/O + * information that could be different among board designs. + */ +typedef struct fcc_info { + uint fc_fccnum; + uint fc_cpmblock; + uint fc_cpmpage; + uint fc_proff; + uint fc_interrupt; + uint fc_trxclocks; + uint fc_clockroute; + uint fc_clockmask; + uint fc_mdio; + uint fc_mdck; + uint fc_port; + struct rtnet_device *rtdev; +} fcc_info_t; + +static fcc_info_t fcc_ports[] = { + { 0, CPM_CR_FCC1_SBLOCK, CPM_CR_FCC1_PAGE, PROFF_FCC1, SIU_INT_FCC1, + (PC_F1RXCLK | PC_F1TXCLK), CMX1_CLK_ROUTE, CMX1_CLK_MASK, + MII_MDIO, MII_MDCK, MII_PORT }, + { 1, CPM_CR_FCC2_SBLOCK, CPM_CR_FCC2_PAGE, PROFF_FCC2, SIU_INT_FCC2, + (PC_F2RXCLK | PC_F2TXCLK), CMX2_CLK_ROUTE, CMX2_CLK_MASK, + MII_MDIO2, MII_MDCK2, MII_PORT2 }, + { 2, CPM_CR_FCC3_SBLOCK, CPM_CR_FCC3_PAGE, PROFF_FCC3, SIU_INT_FCC3, + (PC_F3RXCLK | PC_F3TXCLK), CMX3_CLK_ROUTE, CMX3_CLK_MASK, + MII_MDIO3, MII_MDCK3, MII_PORT3 }, +}; + +/* The FCC buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct fcc_enet_private { + /* The addresses of a Tx/Rx-in-place packets/buffers. */ + struct rtskb *tx_skbuff[TX_RING_SIZE]; + ushort skb_cur; + ushort skb_dirty; + + /* CPM dual port RAM relative addresses. + */ + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ + cbd_t *dirty_tx; /* The ring entries to be free()ed. */ + volatile fcc_t *fccp; + volatile fcc_enet_t *ep; + struct net_device_stats stats; + uint tx_full; + rtdm_lock_t lock; + rtdm_irq_t irq_handle; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + uint phy_id; + uint phy_id_done; + uint phy_status; + phy_info_t *phy; + struct tq_struct phy_task; + + uint sequence_done; + + uint phy_addr; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + int link; + int old_link; + int full_duplex; + + fcc_info_t *fip; +}; + +static void init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep, + volatile immap_t *immap); +static void init_fcc_startup(fcc_info_t *fip, struct rtnet_device *rtdev); +static void init_fcc_ioports(fcc_info_t *fip, volatile iop8260_t *io, + volatile immap_t *immap); +static void init_fcc_param(fcc_info_t *fip, struct rtnet_device *rtdev, + volatile immap_t *immap); + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +static int mii_queue(struct net_device *dev, int request, void (*func)(uint, struct net_device *)); +static uint mii_send_receive(fcc_info_t *fip, uint cmd); + +static void fcc_stop(struct net_device *dev); + +/* Make MII read/write commands for the FCC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ + (VAL & 0xffff)) +#define mk_mii_end 0 +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + +static int +fcc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct fcc_enet_private *cep = (struct fcc_enet_private *)rtdev->priv; + volatile cbd_t *bdp; + rtdm_lockctx_t context; + + RT_DEBUG(__FUNCTION__": ...\n"); + + if (!cep->link) { + /* Link is down or autonegotiation is in progress. */ + return 1; + } + + /* Fill in a Tx ring entry */ + bdp = cep->cur_tx; + +#ifndef final_version + if (bdp->cbd_sc & BD_ENET_TX_READY) { + /* Ooops. All transmit buffers are full. Bail out. + * This should not happen, since cep->tx_full should be set. + */ + rtdm_printk("%s: tx queue full!.\n", rtdev->name); + return 1; + } +#endif + + /* Clear all of the status flags. */ + bdp->cbd_sc &= ~BD_ENET_TX_STATS; + + /* If the frame is short, tell CPM to pad it. */ + if (skb->len <= ETH_ZLEN) + bdp->cbd_sc |= BD_ENET_TX_PAD; + else + bdp->cbd_sc &= ~BD_ENET_TX_PAD; + + /* Set buffer length and buffer pointer. */ + bdp->cbd_datlen = skb->len; + bdp->cbd_bufaddr = __pa(skb->data); + + /* Save skb pointer. */ + cep->tx_skbuff[cep->skb_cur] = skb; + + cep->stats.tx_bytes += skb->len; + cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; + + rtdm_lock_get_irqsave(&cep->lock, context); + + /* Get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + + /* Send it on its way. Tell CPM its ready, interrupt when done, + * its the last BD of the frame, and to put the CRC on the end. + */ + bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); + +#ifdef ORIGINAL_VERSION + dev->trans_start = jiffies; +#endif + + /* If this was the last BD in the ring, start at the beginning again. */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) + bdp = cep->tx_bd_base; + else + bdp++; + + if (bdp->cbd_sc & BD_ENET_TX_READY) { + rtnetif_stop_queue(rtdev); + cep->tx_full = 1; + } + + cep->cur_tx = (cbd_t *)bdp; + + rtdm_lock_put_irqrestore(&cep->lock, context); + + return 0; +} + + +#ifdef ORIGINAL_VERSION +static void +fcc_enet_timeout(struct net_device *dev) +{ + struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv; + + printk("%s: transmit timed out.\n", dev->name); + cep->stats.tx_errors++; +#ifndef final_version + { + int i; + cbd_t *bdp; + printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n", + cep->cur_tx, cep->tx_full ? " (full)" : "", + cep->cur_rx); + bdp = cep->tx_bd_base; + printk(" Tx @base %p :\n", bdp); + for (i = 0 ; i < TX_RING_SIZE; i++, bdp++) + printk("%04x %04x %08x\n", + bdp->cbd_sc, + bdp->cbd_datlen, + bdp->cbd_bufaddr); + bdp = cep->rx_bd_base; + printk(" Rx @base %p :\n", bdp); + for (i = 0 ; i < RX_RING_SIZE; i++, bdp++) + printk("%04x %04x %08x\n", + bdp->cbd_sc, + bdp->cbd_datlen, + bdp->cbd_bufaddr); + } +#endif + if (!cep->tx_full) + netif_wake_queue(dev); +} +#endif /* ORIGINAL_VERSION */ + +/* The interrupt handler. */ +static int fcc_enet_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + int packets = 0; + struct fcc_enet_private *cep; + volatile cbd_t *bdp; + ushort int_events; + int must_restart; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + + cep = (struct fcc_enet_private *)rtdev->priv; + + /* Get the interrupt events that caused us to be here. + */ + int_events = cep->fccp->fcc_fcce; + cep->fccp->fcc_fcce = int_events; + must_restart = 0; + + /* Handle receive event in its own function. + */ + if (int_events & FCC_ENET_RXF) { + fcc_enet_rx(rtdev, &packets, &time_stamp); + } + + /* Check for a transmit error. The manual is a little unclear + * about this, so the debug code until I get it figured out. It + * appears that if TXE is set, then TXB is not set. However, + * if carrier sense is lost during frame transmission, the TXE + * bit is set, "and continues the buffer transmission normally." + * I don't know if "normally" implies TXB is set when the buffer + * descriptor is closed.....trial and error :-). + */ + + /* Transmit OK, or non-fatal error. Update the buffer descriptors. + */ + if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) { + rtdm_lock_get(&cep->lock); + bdp = cep->dirty_tx; + while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { + if ((bdp==cep->cur_tx) && (cep->tx_full == 0)) + break; + + if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ + cep->stats.tx_heartbeat_errors++; + if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ + cep->stats.tx_window_errors++; + if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ + cep->stats.tx_aborted_errors++; + if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ + cep->stats.tx_fifo_errors++; + if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ + cep->stats.tx_carrier_errors++; + + + /* No heartbeat or Lost carrier are not really bad errors. + * The others require a restart transmit command. + */ + if (bdp->cbd_sc & + (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { + must_restart = 1; + cep->stats.tx_errors++; + } + + cep->stats.tx_packets++; + + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (bdp->cbd_sc & BD_ENET_TX_DEF) + cep->stats.collisions++; + + /* Free the sk buffer associated with this last transmit. */ + dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]); + cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; + + /* Update pointer to next buffer descriptor to be transmitted. */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) + bdp = cep->tx_bd_base; + else + bdp++; + + /* I don't know if we can be held off from processing these + * interrupts for more than one frame time. I really hope + * not. In such a case, we would now want to check the + * currently available BD (cur_tx) and determine if any + * buffers between the dirty_tx and cur_tx have also been + * sent. We would want to process anything in between that + * does not have BD_ENET_TX_READY set. + */ + + /* Since we have freed up a buffer, the ring is no longer + * full. + */ + if (cep->tx_full) { + cep->tx_full = 0; + if (rtnetif_queue_stopped(rtdev)) + rtnetif_wake_queue(rtdev); + } + + cep->dirty_tx = (cbd_t *)bdp; + } + + if (must_restart) { + volatile cpm8260_t *cp; + + /* Some transmit errors cause the transmitter to shut + * down. We now issue a restart transmit. Since the + * errors close the BD and update the pointers, the restart + * _should_ pick up without having to reset any of our + * pointers either. Also, To workaround 8260 device erratum + * CPM37, we must disable and then re-enable the transmitter + * following a Late Collision, Underrun, or Retry Limit error. + */ + cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT; +#ifdef ORIGINAL_VERSION + udelay(10); /* wait a few microseconds just on principle */ +#endif + cep->fccp->fcc_gfmr |= FCC_GFMR_ENT; + + cp = cpmp; + cp->cp_cpcr = + mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock, + 0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG; + while (cp->cp_cpcr & CPM_CR_FLG); // looks suspicious - how long may it take? + } + rtdm_lock_put(&cep->lock); + } + + /* Check for receive busy, i.e. packets coming but no place to + * put them. + */ + if (int_events & FCC_ENET_BSY) { + cep->stats.rx_dropped++; + } + + if (packets > 0) + rt_mark_stack_mgr(rtdev); + return RTDM_IRQ_HANDLED; +} + +/* During a receive, the cur_rx points to the current incoming buffer. + * When we update through the ring, if the next incoming buffer has + * not been given to the system, we just set the empty indicator, + * effectively tossing the packet. + */ +static int +fcc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp) +{ + struct fcc_enet_private *cep; + volatile cbd_t *bdp; + struct rtskb *skb; + ushort pkt_len; + + RT_DEBUG(__FUNCTION__": ...\n"); + + cep = (struct fcc_enet_private *)rtdev->priv; + + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = cep->cur_rx; + +for (;;) { + if (bdp->cbd_sc & BD_ENET_RX_EMPTY) + break; + +#ifndef final_version + /* Since we have allocated space to hold a complete frame, both + * the first and last indicators should be set. + */ + if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != + (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) + rtdm_printk("CPM ENET: rcv is not first+last\n"); +#endif + + /* Frame too long or too short. */ + if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + cep->stats.rx_length_errors++; + if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ + cep->stats.rx_frame_errors++; + if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ + cep->stats.rx_crc_errors++; + if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ + cep->stats.rx_crc_errors++; + if (bdp->cbd_sc & BD_ENET_RX_CL) /* Late Collision */ + cep->stats.rx_frame_errors++; + + if (!(bdp->cbd_sc & + (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR + | BD_ENET_RX_OV | BD_ENET_RX_CL))) + { + /* Process the incoming frame. */ + cep->stats.rx_packets++; + + /* Remove the FCS from the packet length. */ + pkt_len = bdp->cbd_datlen - 4; + cep->stats.rx_bytes += pkt_len; + + /* This does 16 byte alignment, much more than we need. */ + skb = rtnetdev_alloc_rtskb(rtdev, pkt_len); + + if (skb == NULL) { + rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name); + cep->stats.rx_dropped++; + } + else { + rtskb_put(skb,pkt_len); /* Make room */ + memcpy(skb->data, + (unsigned char *)__va(bdp->cbd_bufaddr), + pkt_len); + skb->protocol=rt_eth_type_trans(skb,rtdev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + (*packets)++; + } + } + + /* Clear the status flags for this buffer. */ + bdp->cbd_sc &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty. */ + bdp->cbd_sc |= BD_ENET_RX_EMPTY; + + /* Update BD pointer to next entry. */ + if (bdp->cbd_sc & BD_ENET_RX_WRAP) + bdp = cep->rx_bd_base; + else + bdp++; + + } + cep->cur_rx = (cbd_t *)bdp; + + return 0; +} + +static int +fcc_enet_close(struct rtnet_device *rtdev) +{ + /* Don't know what to do yet. */ + rtnetif_stop_queue(rtdev); + + return 0; +} + +static struct net_device_stats *fcc_enet_get_stats(struct rtnet_device *rtdev) +{ + struct fcc_enet_private *cep = (struct fcc_enet_private *)rtdev->priv; + + return &cep->stats; +} + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + +/* NOTE: Most of the following comes from the FEC driver for 860. The + * overall structure of MII code has been retained (as it's proved stable + * and well-tested), but actual transfer requests are processed "at once" + * instead of being queued (there's no interrupt-driven MII transfer + * mechanism, one has to toggle the data/clock bits manually). + */ +static int +mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) +{ + struct fcc_enet_private *fep; + int retval, tmp; + + /* Add PHY address to register command. */ + fep = dev->priv; + regval |= fep->phy_addr << 23; + + retval = 0; + + tmp = mii_send_receive(fep->fip, regval); + if (func) + func(tmp, dev); + + return retval; +} + +static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) +{ + int k; + + if(!c) + return; + + for(k = 0; (c+k)->mii_data != mk_mii_end; k++) + mii_queue(dev, (c+k)->mii_data, (c+k)->funct); +} + +static void mii_parse_sr(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); + + if (mii_reg & 0x0004) + s |= PHY_STAT_LINK; + if (mii_reg & 0x0010) + s |= PHY_STAT_FAULT; + if (mii_reg & 0x0020) + s |= PHY_STAT_ANC; + + fep->phy_status = s; + fep->link = (s & PHY_STAT_LINK) ? 1 : 0; +} + +static void mii_parse_cr(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); + + if (mii_reg & 0x1000) + s |= PHY_CONF_ANE; + if (mii_reg & 0x4000) + s |= PHY_CONF_LOOP; + + fep->phy_status = s; +} + +static void mii_parse_anar(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_CONF_SPMASK); + + if (mii_reg & 0x0020) + s |= PHY_CONF_10HDX; + if (mii_reg & 0x0040) + s |= PHY_CONF_10FDX; + if (mii_reg & 0x0080) + s |= PHY_CONF_100HDX; + if (mii_reg & 0x00100) + s |= PHY_CONF_100FDX; + + fep->phy_status = s; +} + +/* Some boards don't have the MDIRQ line connected (PM826 is such a board) */ + +static void mii_waitfor_anc(uint mii_reg, struct net_device *dev) +{ + struct fcc_enet_private *fep; + int regval; + int i; + + fep = dev->priv; + regval = mk_mii_read(MII_REG_SR) | (fep->phy_addr << 23); + + for (i = 0; i < 1000; i++) + { + if (mii_send_receive(fep->fip, regval) & 0x20) + return; + udelay(10000); + } + + printk("%s: autonegotiation timeout\n", dev->name); +} + +/* ------------------------------------------------------------------------- */ +/* The Level one LXT970 is used by many boards */ + +#ifdef CONFIG_FCC_LXT970 + +#define MII_LXT970_MIRROR 16 /* Mirror register */ +#define MII_LXT970_IER 17 /* Interrupt Enable Register */ +#define MII_LXT970_ISR 18 /* Interrupt Status Register */ +#define MII_LXT970_CONFIG 19 /* Configuration Register */ +#define MII_LXT970_CSR 20 /* Chip Status Register */ + +static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x0800) { + if (mii_reg & 0x1000) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } else { + if (mii_reg & 0x1000) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_lxt970 = { + 0x07810000, + "LXT970", + + (const phy_cmd_t []) { /* config */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* read SR and ISR to acknowledge */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_LXT970_ISR), NULL }, + + /* find out the current status */ + + { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FEC_LXT970 */ + +/* ------------------------------------------------------------------------- */ +/* The Level one LXT971 is used on some of my custom boards */ + +#ifdef CONFIG_FCC_LXT971 + +/* register definitions for the 971 */ + +#define MII_LXT971_PCR 16 /* Port Control Register */ +#define MII_LXT971_SR2 17 /* Status Register 2 */ +#define MII_LXT971_IER 18 /* Interrupt Enable Register */ +#define MII_LXT971_ISR 19 /* Interrupt Status Register */ +#define MII_LXT971_LCR 20 /* LED Control Register */ +#define MII_LXT971_TCR 30 /* Transmit Control Register */ + +/* + * I had some nice ideas of running the MDIO faster... + * The 971 should support 8MHz and I tried it, but things acted really + * weird, so 2.5 MHz ought to be enough for anyone... + */ + +static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x4000) { + if (mii_reg & 0x0200) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } else { + if (mii_reg & 0x0200) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + if (mii_reg & 0x0008) + s |= PHY_STAT_FAULT; + + fep->phy_status = s; +} + +static phy_info_t phy_info_lxt971 = { + 0x0001378e, + "LXT971", + + (const phy_cmd_t []) { /* config */ +// { mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10 Mbps, HD */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + + /* Somehow does the 971 tell me that the link is down + * the first read after power-up. + * read here to get a valid value in ack_int */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, +#ifdef CONFIG_PM826 + { mk_mii_read(MII_REG_SR), mii_waitfor_anc }, +#endif + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* find out the current status */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, + + /* we only need to read ISR to acknowledge */ + + { mk_mii_read(MII_LXT971_ISR), NULL }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FEC_LXT971 */ + + +/* ------------------------------------------------------------------------- */ +/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ + +#ifdef CONFIG_FCC_QS6612 + +/* register definitions */ + +#define MII_QS6612_MCR 17 /* Mode Control Register */ +#define MII_QS6612_FTR 27 /* Factory Test Register */ +#define MII_QS6612_MCO 28 /* Misc. Control Register */ +#define MII_QS6612_ISR 29 /* Interrupt Source Register */ +#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ +#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ + +static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + switch((mii_reg >> 2) & 7) { + case 1: s |= PHY_STAT_10HDX; break; + case 2: s |= PHY_STAT_100HDX; break; + case 5: s |= PHY_STAT_10FDX; break; + case 6: s |= PHY_STAT_100FDX; break; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_qs6612 = { + 0x00181440, + "QS6612", + + (const phy_cmd_t []) { /* config */ +// { mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10 Mbps */ + + /* The PHY powers up isolated on the RPX, + * so send a command to allow operation. + */ + + { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, + + /* parse cr and anar to get some info */ + + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + + /* we need to read ISR, SR and ANER to acknowledge */ + + { mk_mii_read(MII_QS6612_ISR), NULL }, + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_ANER), NULL }, + + /* read pcr to get info */ + + { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + + +#endif /* CONFIG_FCC_QS6612 */ + +/* ------------------------------------------------------------------------- */ +/* The AMD Am79C873 PHY is on PM826 */ + +#ifdef CONFIG_FCC_AMD79C873 + +#define MII_79C873_IER 17 /* Interrupt Enable Register */ +#define MII_79C873_DR 18 /* Diagnostic Register */ + +static void mii_parse_79c873_cr(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x2000) { + if (mii_reg & 0x0100) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } else { + if (mii_reg & 0x0100) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_79c873 = { + 0x00181b80, + "AMD79C873", + + (const phy_cmd_t []) { /* config */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup */ + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ +#ifdef CONFIG_PM826 + { mk_mii_read(MII_REG_SR), mii_waitfor_anc }, +#endif + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* read SR twice: to acknowledge and to get link status */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + + /* find out the current link parameters */ + + { mk_mii_read(MII_REG_CR), mii_parse_79c873_cr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_79C873_IER, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FCC_AMD79C873 */ + + +/* ------------------------------------------------------------------------- */ +/* The Davicom DM9131 is used on the HYMOD board */ + +#ifdef CONFIG_FCC_DM9131 + +/* register definitions */ + +#define MII_DM9131_ACR 16 /* Aux. Config Register */ +#define MII_DM9131_ACSR 17 /* Aux. Config/Status Register */ +#define MII_DM9131_10TCSR 18 /* 10BaseT Config/Status Reg. */ +#define MII_DM9131_INTR 21 /* Interrupt Register */ +#define MII_DM9131_RECR 22 /* Receive Error Counter Reg. */ +#define MII_DM9131_DISCR 23 /* Disconnect Counter Register */ + +static void mii_parse_dm9131_acsr(uint mii_reg, struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + switch ((mii_reg >> 12) & 0xf) { + case 1: s |= PHY_STAT_10HDX; break; + case 2: s |= PHY_STAT_10FDX; break; + case 4: s |= PHY_STAT_100HDX; break; + case 8: s |= PHY_STAT_100FDX; break; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_dm9131 = { + 0x00181b80, + "DM9131", + + (const phy_cmd_t []) { /* config */ + /* parse cr and anar to get some info */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_DM9131_INTR, 0x0002), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + + /* we need to read INTR, SR and ANER to acknowledge */ + + { mk_mii_read(MII_DM9131_INTR), NULL }, + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_ANER), NULL }, + + /* read acsr to get info */ + + { mk_mii_read(MII_DM9131_ACSR), mii_parse_dm9131_acsr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_DM9131_INTR, 0x0f00), NULL }, + { mk_mii_end, } + }, +}; + + +#endif /* CONFIG_FEC_DM9131 */ + + +static phy_info_t *phy_info[] = { + +#ifdef CONFIG_FCC_LXT970 + &phy_info_lxt970, +#endif /* CONFIG_FCC_LXT970 */ + +#ifdef CONFIG_FCC_LXT971 + &phy_info_lxt971, +#endif /* CONFIG_FCC_LXT971 */ + +#ifdef CONFIG_FCC_QS6612 + &phy_info_qs6612, +#endif /* CONFIG_FCC_QS6612 */ + +#ifdef CONFIG_FCC_DM9131 + &phy_info_dm9131, +#endif /* CONFIG_FCC_DM9131 */ + +#ifdef CONFIG_FCC_AMD79C873 + &phy_info_79c873, +#endif /* CONFIG_FCC_AMD79C873 */ + + NULL +}; + +static void mii_display_status(struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + if (!fep->link && !fep->old_link) { + /* Link is still down - don't print anything */ + return; + } + + printk("%s: status: ", dev->name); + + if (!fep->link) { + printk("link down"); + } else { + printk("link up"); + + switch(s & PHY_STAT_SPMASK) { + case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break; + case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break; + case PHY_STAT_10FDX: printk(", 10 Mbps Full Duplex"); break; + case PHY_STAT_10HDX: printk(", 10 Mbps Half Duplex"); break; + default: + printk(", Unknown speed/duplex"); + } + + if (s & PHY_STAT_ANC) + printk(", auto-negotiation complete"); + } + + if (s & PHY_STAT_FAULT) + printk(", remote fault"); + + printk(".\n"); +} + +static void mii_display_config(struct net_device *dev) +{ + volatile struct fcc_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + printk("%s: config: auto-negotiation ", dev->name); + + if (s & PHY_CONF_ANE) + printk("on"); + else + printk("off"); + + if (s & PHY_CONF_100FDX) + printk(", 100FDX"); + if (s & PHY_CONF_100HDX) + printk(", 100HDX"); + if (s & PHY_CONF_10FDX) + printk(", 10FDX"); + if (s & PHY_CONF_10HDX) + printk(", 10HDX"); + if (!(s & PHY_CONF_SPMASK)) + printk(", No speed/duplex selected?"); + + if (s & PHY_CONF_LOOP) + printk(", loopback enabled"); + + printk(".\n"); + + fep->sequence_done = 1; +} + +static void mii_relink(struct net_device *dev) +{ + struct fcc_enet_private *fep = dev->priv; + int duplex; + + fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; + mii_display_status(dev); + fep->old_link = fep->link; + + if (fep->link) { + duplex = 0; + if (fep->phy_status + & (PHY_STAT_100FDX | PHY_STAT_10FDX)) + duplex = 1; + fcc_restart(dev, duplex); + } else { + fcc_stop(dev); + } +} + +static void mii_queue_relink(uint mii_reg, struct net_device *dev) +{ + struct fcc_enet_private *fep = dev->priv; + + fep->phy_task.routine = (void *)mii_relink; + fep->phy_task.data = dev; + schedule_task(&fep->phy_task); +} + +static void mii_queue_config(uint mii_reg, struct net_device *dev) +{ + struct fcc_enet_private *fep = dev->priv; + + fep->phy_task.routine = (void *)mii_display_config; + fep->phy_task.data = dev; + schedule_task(&fep->phy_task); +} + + + +phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink }, + { mk_mii_end, } }; +phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config }, + { mk_mii_end, } }; + + +/* Read remainder of PHY ID. +*/ +static void +mii_discover_phy3(uint mii_reg, struct net_device *dev) +{ + struct fcc_enet_private *fep; + int i; + + fep = dev->priv; + fep->phy_id |= (mii_reg & 0xffff); + + for(i = 0; phy_info[i]; i++) + if(phy_info[i]->id == (fep->phy_id >> 4)) + break; + + if(!phy_info[i]) + panic("%s: PHY id 0x%08x is not supported!\n", + dev->name, fep->phy_id); + + fep->phy = phy_info[i]; + + printk("%s: Phy @ 0x%x, type %s (0x%08x)\n", + dev->name, fep->phy_addr, fep->phy->name, fep->phy_id); +} + +/* Scan all of the MII PHY addresses looking for someone to respond + * with a valid ID. This usually happens quickly. + */ +static void +mii_discover_phy(uint mii_reg, struct net_device *dev) +{ + struct fcc_enet_private *fep; + uint phytype; + + fep = dev->priv; + + if ((phytype = (mii_reg & 0xfff)) != 0xfff && phytype != 0) { + + /* Got first part of ID, now get remainder. */ + fep->phy_id = phytype << 16; + mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3); + } else { + fep->phy_addr++; + if (fep->phy_addr < 32) { + mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), + mii_discover_phy); + } else { + printk("FCC: No PHY device found.\n"); + } + } +} + +/* This interrupt occurs when the PHY detects a link change. */ +#if !defined (CONFIG_PM826) +static void +mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs) +{ + struct net_device *dev = dev_id; + struct fcc_enet_private *fep = dev->priv; + + mii_do_cmd(dev, fep->phy->ack_int); + mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ +} +#endif /* !CONFIG_PM826 */ + +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +#ifdef ORIGINAL_VERSION +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ +static void +set_multicast_list(struct net_device *dev) +{ + struct fcc_enet_private *cep; + struct dev_mc_list *dmi; + u_char *mcptr, *tdptr; + volatile fcc_enet_t *ep; + int i, j; + + cep = (struct fcc_enet_private *)dev->priv; + +return; + /* Get pointer to FCC area in parameter RAM. + */ + ep = (fcc_enet_t *)dev->base_addr; + + if (dev->flags&IFF_PROMISC) { + + /* Log any net taps. */ + printk("%s: Promiscuous mode enabled.\n", dev->name); + cep->fccp->fcc_fpsmr |= FCC_PSMR_PRO; + } else { + + cep->fccp->fcc_fpsmr &= ~FCC_PSMR_PRO; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + ep->fen_gaddrh = 0xffffffff; + ep->fen_gaddrl = 0xffffffff; + } + else { + /* Clear filter and add the addresses in the list. + */ + ep->fen_gaddrh = 0; + ep->fen_gaddrl = 0; + + dmi = dev->mc_list; + + for (i=0; i<dev->mc_count; i++) { + + /* Only support group multicast for now. + */ + if (!(dmi->dmi_addr[0] & 1)) + continue; + + /* The address in dmi_addr is LSB first, + * and taddr is MSB first. We have to + * copy bytes MSB first from dmi_addr. + */ + mcptr = (u_char *)dmi->dmi_addr + 5; + tdptr = (u_char *)&ep->fen_taddrh; + for (j=0; j<6; j++) + *tdptr++ = *mcptr--; + + /* Ask CPM to run CRC and set bit in + * filter mask. + */ + cpmp->cp_cpcr = mk_cr_cmd(cep->fip->fc_cpmpage, + cep->fip->fc_cpmblock, 0x0c, + CPM_CR_SET_GADDR) | CPM_CR_FLG; + udelay(10); + while (cpmp->cp_cpcr & CPM_CR_FLG); + } + } + } +} + + +/* Set the individual MAC address. + */ +int fcc_enet_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr= (struct sockaddr *) p; + struct fcc_enet_private *cep; + volatile fcc_enet_t *ep; + unsigned char *eap; + int i; + + cep = (struct fcc_enet_private *)(dev->priv); + ep = cep->ep; + + if (netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + eap = (unsigned char *) &(ep->fen_paddrh); + for (i=5; i>=0; i--) + *eap++ = addr->sa_data[i]; + + return 0; +} +#endif /* ORIGINAL_VERSION */ + + +/* Initialize the CPM Ethernet on FCC. + */ +int __init fec_enet_init(void) +{ + struct rtnet_device *rtdev = NULL; + struct fcc_enet_private *cep; + fcc_info_t *fip; + int i, np; + volatile immap_t *immap; + volatile iop8260_t *io; + + immap = (immap_t *)IMAP_ADDR; /* and to internal registers */ + io = &immap->im_ioport; + + for (np = 0, fip = fcc_ports; + np < sizeof(fcc_ports) / sizeof(fcc_info_t); + np++, fip++) { + + /* Skip FCC ports not used for RTnet. + */ + if (np != rtnet_fcc - 1) continue; + + /* Allocate some private information and create an Ethernet device instance. + */ + if (!rx_pool_size) + rx_pool_size = RX_RING_SIZE * 2; + + rtdev = rt_alloc_etherdev(sizeof(struct fcc_enet_private), + rx_pool_size + TX_RING_SIZE); + if (rtdev == NULL) { + printk(KERN_ERR "fcc_enet: Could not allocate ethernet device.\n"); + return -1; + } + rtdev_alloc_name(rtdev, "rteth%d"); + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + + cep = (struct fcc_enet_private *)rtdev->priv; + rtdm_lock_init(&cep->lock); + cep->fip = fip; + fip->rtdev = rtdev; /* need for cleanup */ + + init_fcc_shutdown(fip, cep, immap); + init_fcc_ioports(fip, io, immap); + init_fcc_param(fip, rtdev, immap); + + rtdev->base_addr = (unsigned long)(cep->ep); + + /* The CPM Ethernet specific entries in the device + * structure. + */ + rtdev->open = fcc_enet_open; + rtdev->hard_start_xmit = fcc_enet_start_xmit; + rtdev->stop = fcc_enet_close; + rtdev->hard_header = &rt_eth_header; + rtdev->get_stats = fcc_enet_get_stats; + + if ((i = rt_register_rtnetdev(rtdev))) { + rtdm_irq_disable(&cep->irq_handle); + rtdm_irq_free(&cep->irq_handle); + rtdev_free(rtdev); + return i; + } + init_fcc_startup(fip, rtdev); + + printk("%s: FCC%d ENET Version 0.4, %02x:%02x:%02x:%02x:%02x:%02x\n", + rtdev->name, fip->fc_fccnum + 1, + rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2], + rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]); + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Queue up command to detect the PHY and initialize the + * remainder of the interface. + */ + cep->phy_addr = 0; + mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + } + + return 0; +} + +/* Make sure the device is shut down during initialization. +*/ +static void __init +init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep, + volatile immap_t *immap) +{ + volatile fcc_enet_t *ep; + volatile fcc_t *fccp; + + /* Get pointer to FCC area in parameter RAM. + */ + ep = (fcc_enet_t *)(&immap->im_dprambase[fip->fc_proff]); + + /* And another to the FCC register area. + */ + fccp = (volatile fcc_t *)(&immap->im_fcc[fip->fc_fccnum]); + cep->fccp = fccp; /* Keep the pointers handy */ + cep->ep = ep; + + /* Disable receive and transmit in case someone left it running. + */ + fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT); +} + +/* Initialize the I/O pins for the FCC Ethernet. +*/ +static void __init +init_fcc_ioports(fcc_info_t *fip, volatile iop8260_t *io, + volatile immap_t *immap) +{ + + /* FCC1 pins are on port A/C. FCC2/3 are port B/C. + */ + if (fip->fc_proff == PROFF_FCC1) { + /* Configure port A and C pins for FCC1 Ethernet. + */ + io->iop_pdira &= ~PA1_DIRA0; + io->iop_pdira |= PA1_DIRA1; + io->iop_psora &= ~PA1_PSORA0; + io->iop_psora |= PA1_PSORA1; + io->iop_ppara |= (PA1_DIRA0 | PA1_DIRA1); + } + if (fip->fc_proff == PROFF_FCC2) { + /* Configure port B and C pins for FCC Ethernet. + */ + io->iop_pdirb &= ~PB2_DIRB0; + io->iop_pdirb |= PB2_DIRB1; + io->iop_psorb &= ~PB2_PSORB0; + io->iop_psorb |= PB2_PSORB1; + io->iop_pparb |= (PB2_DIRB0 | PB2_DIRB1); + } + if (fip->fc_proff == PROFF_FCC3) { + /* Configure port B and C pins for FCC Ethernet. + */ + io->iop_pdirb &= ~PB3_DIRB0; + io->iop_pdirb |= PB3_DIRB1; + io->iop_psorb &= ~PB3_PSORB0; + io->iop_psorb |= PB3_PSORB1; + io->iop_pparb |= (PB3_DIRB0 | PB3_DIRB1); + } + + /* Port C has clocks...... + */ + io->iop_psorc &= ~(fip->fc_trxclocks); + io->iop_pdirc &= ~(fip->fc_trxclocks); + io->iop_pparc |= fip->fc_trxclocks; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* ....and the MII serial clock/data. + */ +#ifndef CONFIG_PM826 + IOP_DAT(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck); + IOP_ODR(io,fip->fc_port) &= ~(fip->fc_mdio | fip->fc_mdck); +#endif /* CONFIG_PM826 */ + IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck); + IOP_PAR(io,fip->fc_port) &= ~(fip->fc_mdio | fip->fc_mdck); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + /* Configure Serial Interface clock routing. + * First, clear all FCC bits to zero, + * then set the ones we want. + */ + immap->im_cpmux.cmx_fcr &= ~(fip->fc_clockmask); + immap->im_cpmux.cmx_fcr |= fip->fc_clockroute; +} + +static void __init +init_fcc_param(fcc_info_t *fip, struct rtnet_device *rtdev, + volatile immap_t *immap) +{ + unsigned char *eap; + unsigned long mem_addr; + bd_t *bd; + int i, j; + struct fcc_enet_private *cep; + volatile fcc_enet_t *ep; + volatile cbd_t *bdp; + volatile cpm8260_t *cp; + + cep = (struct fcc_enet_private *)rtdev->priv; + ep = cep->ep; + cp = cpmp; + + bd = (bd_t *)__res; + + /* Zero the whole thing.....I must have missed some individually. + * It works when I do this. + */ + memset((char *)ep, 0, sizeof(fcc_enet_t)); + + /* Allocate space for the buffer descriptors in the DP ram. + * These are relative offsets in the DP ram address space. + * Initialize base addresses for the buffer descriptors. + */ + cep->rx_bd_base = (cbd_t *)m8260_cpm_hostalloc(sizeof(cbd_t) * RX_RING_SIZE, 8); + ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base); + cep->tx_bd_base = (cbd_t *)m8260_cpm_hostalloc(sizeof(cbd_t) * TX_RING_SIZE, 8); + ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base); + + cep->dirty_tx = cep->cur_tx = cep->tx_bd_base; + cep->cur_rx = cep->rx_bd_base; + + ep->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB) << 24; + ep->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB) << 24; + + /* Set maximum bytes per receive buffer. + * It must be a multiple of 32. + */ + ep->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE; + + /* Allocate space in the reserved FCC area of DPRAM for the + * internal buffers. No one uses this space (yet), so we + * can do this. Later, we will add resource management for + * this area. + */ + mem_addr = CPM_FCC_SPECIAL_BASE + (fip->fc_fccnum * 128); + ep->fen_genfcc.fcc_riptr = mem_addr; + ep->fen_genfcc.fcc_tiptr = mem_addr+32; + ep->fen_padptr = mem_addr+64; + memset((char *)(&(immap->im_dprambase[(mem_addr+64)])), 0x88, 32); + + ep->fen_genfcc.fcc_rbptr = 0; + ep->fen_genfcc.fcc_tbptr = 0; + ep->fen_genfcc.fcc_rcrc = 0; + ep->fen_genfcc.fcc_tcrc = 0; + ep->fen_genfcc.fcc_res1 = 0; + ep->fen_genfcc.fcc_res2 = 0; + + ep->fen_camptr = 0; /* CAM isn't used in this driver */ + + /* Set CRC preset and mask. + */ + ep->fen_cmask = 0xdebb20e3; + ep->fen_cpres = 0xffffffff; + + ep->fen_crcec = 0; /* CRC Error counter */ + ep->fen_alec = 0; /* alignment error counter */ + ep->fen_disfc = 0; /* discard frame counter */ + ep->fen_retlim = 15; /* Retry limit threshold */ + ep->fen_pper = 0; /* Normal persistence */ + + /* Clear hash filter tables. + */ + ep->fen_gaddrh = 0; + ep->fen_gaddrl = 0; + ep->fen_iaddrh = 0; + ep->fen_iaddrl = 0; + + /* Clear the Out-of-sequence TxBD. + */ + ep->fen_tfcstat = 0; + ep->fen_tfclen = 0; + ep->fen_tfcptr = 0; + + ep->fen_mflr = PKT_MAXBUF_SIZE; /* maximum frame length register */ + ep->fen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */ + + /* Set Ethernet station address. + * + * This is supplied in the board information structure, so we + * copy that into the controller. + */ + eap = (unsigned char *)&(ep->fen_paddrh); +#if defined(CONFIG_CPU86) || defined(CONFIG_TQM8260) + /* + * TQM8260 and CPU86 use sequential MAC addresses + */ + *eap++ = rtdev->dev_addr[5] = bd->bi_enetaddr[5] + fip->fc_fccnum; + for (i=4; i>=0; i--) { + *eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i]; + } +#elif defined(CONFIG_PM826) + *eap++ = rtdev->dev_addr[5] = bd->bi_enetaddr[5] + fip->fc_fccnum + 1; + for (i=4; i>=0; i--) { + *eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i]; + } +#else + /* + * So, far we have only been given one Ethernet address. We make + * it unique by toggling selected bits in the upper byte of the + * non-static part of the address (for the second and third ports, + * the first port uses the address supplied as is). + */ + for (i=5; i>=0; i--) { + if (i == 3 && fip->fc_fccnum != 0) { + rtdev->dev_addr[i] = bd->bi_enetaddr[i]; + rtdev->dev_addr[i] ^= (1 << (7 - fip->fc_fccnum)); + *eap++ = dev->dev_addr[i]; + } + else { + *eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i]; + } + } +#endif + + ep->fen_taddrh = 0; + ep->fen_taddrm = 0; + ep->fen_taddrl = 0; + + ep->fen_maxd1 = PKT_MAXDMA_SIZE; /* maximum DMA1 length */ + ep->fen_maxd2 = PKT_MAXDMA_SIZE; /* maximum DMA2 length */ + + /* Clear stat counters, in case we ever enable RMON. + */ + ep->fen_octc = 0; + ep->fen_colc = 0; + ep->fen_broc = 0; + ep->fen_mulc = 0; + ep->fen_uspc = 0; + ep->fen_frgc = 0; + ep->fen_ospc = 0; + ep->fen_jbrc = 0; + ep->fen_p64c = 0; + ep->fen_p65c = 0; + ep->fen_p128c = 0; + ep->fen_p256c = 0; + ep->fen_p512c = 0; + ep->fen_p1024c = 0; + + ep->fen_rfthr = 0; /* Suggested by manual */ + ep->fen_rfcnt = 0; + ep->fen_cftype = 0; + + /* Now allocate the host memory pages and initialize the + * buffer descriptors. + */ + bdp = cep->tx_bd_base; + for (i=0; i<TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. + */ + bdp->cbd_sc = 0; + bdp->cbd_datlen = 0; + bdp->cbd_bufaddr = 0; + bdp++; + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + bdp = cep->rx_bd_base; + for (i=0; i<FCC_ENET_RX_PAGES; i++) { + + /* Allocate a page. + */ + mem_addr = __get_free_page(GFP_KERNEL); + + /* Initialize the BD for every fragment in the page. + */ + for (j=0; j<FCC_ENET_RX_FRPPG; j++) { + bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR; + bdp->cbd_datlen = 0; + bdp->cbd_bufaddr = __pa(mem_addr); + mem_addr += FCC_ENET_RX_FRSIZE; + bdp++; + } + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* Let's re-initialize the channel now. We have to do it later + * than the manual describes because we have just now finished + * the BD initialization. + */ + cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, 0x0c, + CPM_CR_INIT_TRX) | CPM_CR_FLG; + while (cp->cp_cpcr & CPM_CR_FLG); + + cep->skb_cur = cep->skb_dirty = 0; +} + +/* Let 'er rip. +*/ +static void __init +init_fcc_startup(fcc_info_t *fip, struct rtnet_device *rtdev) +{ + volatile fcc_t *fccp; + struct fcc_enet_private *cep; + + cep = (struct fcc_enet_private *)rtdev->priv; + fccp = cep->fccp; + + fccp->fcc_fcce = 0xffff; /* Clear any pending events */ + + /* Enable interrupts for transmit error, complete frame + * received, and any transmit buffer we have also set the + * interrupt flag. + */ + fccp->fcc_fccm = (FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB); + + rt_stack_connect(rtdev, &STACK_manager); + + /* Install our interrupt handler. + */ + if (rtdm_irq_request(&cep->irq_handle, fip->fc_interrupt, + fcc_enet_interrupt, 0, "rt_mpc8260_fcc_enet", rtdev)) { + printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq); + rtdev_free(rtdev); + return; + } + + +#if defined (CONFIG_XENO_DRIVERS_NET_USE_MDIO) && !defined (CONFIG_PM826) +# ifndef PHY_INTERRUPT +# error Want to use MDIO, but PHY_INTERRUPT not defined! +# endif + if (request_8xxirq(PHY_INTERRUPT, mii_link_interrupt, 0, + "mii", dev) < 0) + printk("Can't get MII IRQ %d\n", PHY_INTERRUPT); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO, CONFIG_PM826 */ + + /* Set GFMR to enable Ethernet operating mode. + */ +#ifndef CONFIG_EST8260 + fccp->fcc_gfmr = (FCC_GFMR_TCI | FCC_GFMR_MODE_ENET); +#else + fccp->fcc_gfmr = FCC_GFMR_MODE_ENET; +#endif + + /* Set sync/delimiters. + */ + fccp->fcc_fdsr = 0xd555; + + /* Set protocol specific processing mode for Ethernet. + * This has to be adjusted for Full Duplex operation after we can + * determine how to detect that. + */ + fccp->fcc_fpsmr = FCC_PSMR_ENCRC; + +#ifdef CONFIG_ADS8260 + /* Enable the PHY. + */ + ads_csr_addr[1] |= BCSR1_FETH_RST; /* Remove reset */ + ads_csr_addr[1] &= ~BCSR1_FETHIEN; /* Enable */ +#endif + +#if defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) || defined(CONFIG_TQM8260) + /* start in full duplex mode, and negotiate speed */ + fcc_restart (rtdev, 1); +#else + /* start in half duplex mode */ + fcc_restart (rtdev, 0); +#endif +} + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +/* MII command/status interface. + * I'm not going to describe all of the details. You can find the + * protocol definition in many other places, including the data sheet + * of most PHY parts. + * I wonder what "they" were thinking (maybe weren't) when they leave + * the I2C in the CPM but I have to toggle these bits...... + * + * Timing is a critical, especially on faster CPU's ... + */ +#define MDIO_DELAY 5 + +#define FCC_MDIO(bit) do { \ + udelay(MDIO_DELAY); \ + if (bit) \ + IOP_DAT(io,fip->fc_port) |= fip->fc_mdio; \ + else \ + IOP_DAT(io,fip->fc_port) &= ~fip->fc_mdio; \ +} while(0) + +#define FCC_MDC(bit) do { \ + udelay(MDIO_DELAY); \ + if (bit) \ + IOP_DAT(io,fip->fc_port) |= fip->fc_mdck; \ + else \ + IOP_DAT(io,fip->fc_port) &= ~fip->fc_mdck; \ +} while(0) + +static uint +mii_send_receive(fcc_info_t *fip, uint cmd) +{ + uint retval; + int read_op, i, off; + volatile immap_t *immap; + volatile iop8260_t *io; + + immap = (immap_t *)IMAP_ADDR; + io = &immap->im_ioport; + + IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck); + + read_op = ((cmd & 0xf0000000) == 0x60000000); + + /* Write preamble + */ + for (i = 0; i < 32; i++) + { + FCC_MDC(0); + FCC_MDIO(1); + FCC_MDC(1); + } + + /* Write data + */ + for (i = 0, off = 31; i < (read_op ? 14 : 32); i++, --off) + { + FCC_MDC(0); + FCC_MDIO((cmd >> off) & 0x00000001); + FCC_MDC(1); + } + + retval = cmd; + + if (read_op) + { + retval >>= 16; + + FCC_MDC(0); + IOP_DIR(io,fip->fc_port) &= ~fip->fc_mdio; + FCC_MDC(1); + FCC_MDC(0); + + for (i = 0, off = 15; i < 16; i++, off--) + { + FCC_MDC(1); + udelay(MDIO_DELAY); + retval <<= 1; + if (IOP_DAT(io,fip->fc_port) & fip->fc_mdio) + retval++; + FCC_MDC(0); + } + } + + IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck); + + for (i = 0; i < 32; i++) + { + FCC_MDC(0); + FCC_MDIO(1); + FCC_MDC(1); + } + + return retval; +} + +static void +fcc_stop(struct net_device *dev) +{ + volatile fcc_t *fccp; + struct fcc_enet_private *fcp; + + fcp = (struct fcc_enet_private *)(dev->priv); + fccp = fcp->fccp; + + /* Disable transmit/receive */ + fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT); +} +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +static void +fcc_restart(struct rtnet_device *rtdev, int duplex) +{ + volatile fcc_t *fccp; + struct fcc_enet_private *fcp; + + fcp = (struct fcc_enet_private *)rtdev->priv; + fccp = fcp->fccp; + + if (duplex) + fccp->fcc_fpsmr |= (FCC_PSMR_FDE | FCC_PSMR_LPB); + else + fccp->fcc_fpsmr &= ~(FCC_PSMR_FDE | FCC_PSMR_LPB); + + /* Enable transmit/receive */ + fccp->fcc_gfmr |= FCC_GFMR_ENR | FCC_GFMR_ENT; +} + +static int +fcc_enet_open(struct rtnet_device *rtdev) +{ + struct fcc_enet_private *fep = rtdev->priv; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + fep->sequence_done = 0; + fep->link = 0; + + if (fep->phy) { + mii_do_cmd(dev, fep->phy->ack_int); + mii_do_cmd(dev, fep->phy->config); + mii_do_cmd(dev, phy_cmd_config); /* display configuration */ + while(!fep->sequence_done) + schedule(); + + mii_do_cmd(dev, fep->phy->startup); +#ifdef CONFIG_PM826 + /* Read the autonegotiation results */ + mii_do_cmd(dev, fep->phy->ack_int); + mii_do_cmd(dev, phy_cmd_relink); +#endif /* CONFIG_PM826 */ + rtnetif_start_queue(rtdev); + return 0; /* Success */ + } + return -ENODEV; /* No PHY we understand */ +#else + fep->link = 1; + rtnetif_start_queue(rtdev); + return 0; /* Always succeed */ +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ +} + +static void __exit fcc_enet_cleanup(void) +{ + struct rtnet_device *rtdev; + volatile immap_t *immap = (immap_t *)IMAP_ADDR; + struct fcc_enet_private *cep; + fcc_info_t *fip; + int np; + + for (np = 0, fip = fcc_ports; + np < sizeof(fcc_ports) / sizeof(fcc_info_t); + np++, fip++) { + + /* Skip FCC ports not used for RTnet. */ + if (np != rtnet_fcc - 1) continue; + + rtdev = fip->rtdev; + cep = (struct fcc_enet_private *)rtdev->priv; + + rtdm_irq_disable(&cep->irq_handle); + rtdm_irq_free(&cep->irq_handle); + + init_fcc_shutdown(fip, cep, immap); + printk("%s: cleanup incomplete (m8260_cpm_dpfree does not exit)!\n", + rtdev->name); + rt_stack_disconnect(rtdev); + rt_unregister_rtnetdev(rtdev); + rt_rtdev_disconnect(rtdev); + + printk("%s: unloaded\n", rtdev->name); + rtdev_free(rtdev); + fip++; + } +} + +module_init(fec_enet_init); +module_exit(fcc_enet_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c new file mode 100644 index 0000000..7fb0fcf --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c @@ -0,0 +1,1073 @@ +/* + * BK Id: SCCS/s.enet.c 1.24 01/19/02 03:07:14 dan + */ +/* + * Ethernet driver for Motorola MPC8xx. + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * I copied the basic skeleton from the lance driver, because I did not + * know how to write the Linux driver, but I did know how the LANCE worked. + * + * This version of the driver is somewhat selectable for the different + * processor/board combinations. It works for the boards I know about + * now, and should be easily modified to include others. Some of the + * configuration information is contained in <asm/commproc.h> and the + * remainder is here. + * + * Buffer descriptors are kept in the CPM dual port RAM, and the frame + * buffers are in the host memory. + * + * Right now, I am very watseful with the buffers. I allocate memory + * pages and then divide them into 2K frame buffers. This way I know I + * have buffers large enough to hold one frame within one buffer descriptor. + * Once I get this working, I will use 64 or 128 byte CPM buffers, which + * will be much more memory efficient and will easily handle lots of + * small packets. + * + * Ported to RTnet. + * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de) + */ + +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/8xx_immap.h> +#include <asm/pgtable.h> +#include <asm/mpc8xx.h> +#include <asm/bitops.h> +#include <asm/irq.h> +#include <asm/commproc.h> + +#include <rtnet_port.h> + +MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>"); +MODULE_DESCRIPTION("RTnet MPC8xx SCC Ethernet driver"); +MODULE_LICENSE("GPL"); + +static unsigned int rx_pool_size = 0; +MODULE_PARM(rx_pool_size, "i"); +MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size"); + +static unsigned int rtnet_scc = 1; /* SCC1 */ +MODULE_PARM(rtnet_scc, "i"); +MODULE_PARM_DESC(rtnet_scc, "SCCx port for RTnet, x=1..3 (default=1)"); + +#define RT_DEBUG(fmt,args...) + +/* + * Theory of Operation + * + * The MPC8xx CPM performs the Ethernet processing on SCC1. It can use + * an aribtrary number of buffers on byte boundaries, but must have at + * least two receive buffers to prevent constant overrun conditions. + * + * The buffer descriptors are allocated from the CPM dual port memory + * with the data buffers allocated from host memory, just like all other + * serial communication protocols. The host memory buffers are allocated + * from the free page pool, and then divided into smaller receive and + * transmit buffers. The size of the buffers should be a power of two, + * since that nicely divides the page. This creates a ring buffer + * structure similar to the LANCE and other controllers. + * + * Like the LANCE driver: + * The driver runs as two independent, single-threaded flows of control. One + * is the send-packet routine, which enforces single-threaded use by the + * cep->tx_busy flag. The other thread is the interrupt handler, which is + * single threaded by the hardware and other software. + * + * The send packet thread has partial control over the Tx ring and the + * 'cep->tx_busy' flag. It sets the tx_busy flag whenever it's queuing a Tx + * packet. If the next queue slot is empty, it clears the tx_busy flag when + * finished otherwise it sets the 'lp->tx_full' flag. + * + * The MBX has a control register external to the MPC8xx that has some + * control of the Ethernet interface. Information is in the manual for + * your board. + * + * The RPX boards have an external control/status register. Consult the + * programming documents for details unique to your board. + * + * For the TQM8xx(L) modules, there is no control register interface. + * All functions are directly controlled using I/O pins. See <asm/commproc.h>. + */ + +/* The transmitter timeout + */ +#define TX_TIMEOUT (2*HZ) + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it is best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ +#define CPM_ENET_RX_PAGES 4 +#define CPM_ENET_RX_FRSIZE 2048 +#define CPM_ENET_RX_FRPPG (PAGE_SIZE / CPM_ENET_RX_FRSIZE) +#define RX_RING_SIZE (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES) +#define TX_RING_SIZE 8 /* Must be power of two */ +#define TX_RING_MOD_MASK 7 /* for this to work */ + +/* The CPM stores dest/src/type, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1518 +#define PKT_MINBUF_SIZE 64 +#define PKT_MAXBLR_SIZE 1520 + +/* The CPM buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct scc_enet_private { + /* The addresses of a Tx/Rx-in-place packets/buffers. */ + struct rtskb *tx_skbuff[TX_RING_SIZE]; + ushort skb_cur; + ushort skb_dirty; + + /* CPM dual port RAM relative addresses. + */ + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ + cbd_t *dirty_tx; /* The ring entries to be free()ed. */ + scc_t *sccp; + + /* Virtual addresses for the receive buffers because we can't + * do a __va() on them anymore. + */ + unsigned char *rx_vaddr[RX_RING_SIZE]; + struct net_device_stats stats; + uint tx_full; + rtdm_lock_t lock; + rtdm_irq_t irq_handle; +}; + +static int scc_enet_open(struct rtnet_device *rtdev); +static int scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev); +static int scc_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp); +static int scc_enet_interrupt(rtdm_irq_t *irq_handle); +static int scc_enet_close(struct rtnet_device *rtdev); + +static struct net_device_stats *scc_enet_get_stats(struct rtnet_device *rtdev); +#ifdef ORIGINAL_VERSION +static void set_multicast_list(struct net_device *dev); +#endif + +#ifndef ORIGINAL_VERSION +static struct rtnet_device *rtdev_root = NULL; +#endif + +/* Typically, 860(T) boards use SCC1 for Ethernet, and other 8xx boards + * use SCC2. Some even may use SCC3. + * This is easily extended if necessary. + * These values are set when the driver is initialized. + */ +static int CPM_CR_ENET; +static int PROFF_ENET; +static int SCC_ENET; +static int CPMVEC_ENET; + +static int +scc_enet_open(struct rtnet_device *rtdev) +{ + /* I should reset the ring buffers here, but I don't yet know + * a simple way to do that. + */ + rtnetif_start_queue(rtdev); + + return 0; /* Always succeed */ +} + +static int +scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; + volatile cbd_t *bdp; + rtdm_lockctx_t context; + + + RT_DEBUG(__FUNCTION__": ...\n"); + + /* Fill in a Tx ring entry */ + bdp = cep->cur_tx; + +#ifndef final_version + if (bdp->cbd_sc & BD_ENET_TX_READY) { + /* Ooops. All transmit buffers are full. Bail out. + * This should not happen, since cep->tx_busy should be set. + */ + rtdm_printk("%s: tx queue full!.\n", rtdev->name); + return 1; + } +#endif + + /* Clear all of the status flags. + */ + bdp->cbd_sc &= ~BD_ENET_TX_STATS; + + /* If the frame is short, tell CPM to pad it. + */ + if (skb->len <= ETH_ZLEN) + bdp->cbd_sc |= BD_ENET_TX_PAD; + else + bdp->cbd_sc &= ~BD_ENET_TX_PAD; + + /* Set buffer length and buffer pointer. + */ + bdp->cbd_datlen = skb->len; + bdp->cbd_bufaddr = __pa(skb->data); + + /* Save skb pointer. + */ + cep->tx_skbuff[cep->skb_cur] = skb; + + cep->stats.tx_bytes += skb->len; + cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; + + /* Prevent interrupts from changing the Tx ring from underneath us. */ + // *** RTnet *** + rtdm_lock_get_irqsave(&cep->lock, context); + + /* Get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + + /* Push the data cache so the CPM does not get stale memory + * data. + */ + flush_dcache_range((unsigned long)(skb->data), + (unsigned long)(skb->data + skb->len)); + + + /* Send it on its way. Tell CPM its ready, interrupt when done, + * its the last BD of the frame, and to put the CRC on the end. + */ + bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); + + /* If this was the last BD in the ring, start at the beginning again. + */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) + bdp = cep->tx_bd_base; + else + bdp++; + + if (bdp->cbd_sc & BD_ENET_TX_READY) { + rtnetif_stop_queue(rtdev); + cep->tx_full = 1; + } + + cep->cur_tx = (cbd_t *)bdp; + + // *** RTnet *** + rtdm_lock_put_irqrestore(&cep->lock, context); + + return 0; +} + +#ifdef ORIGINAL_VERSION +static void +scc_enet_timeout(struct net_device *dev) +{ + struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; + + printk("%s: transmit timed out.\n", dev->name); + cep->stats.tx_errors++; +#ifndef final_version + { + int i; + cbd_t *bdp; + printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n", + cep->cur_tx, cep->tx_full ? " (full)" : "", + cep->cur_rx); + bdp = cep->tx_bd_base; + for (i = 0 ; i < TX_RING_SIZE; i++, bdp++) + printk("%04x %04x %08x\n", + bdp->cbd_sc, + bdp->cbd_datlen, + bdp->cbd_bufaddr); + bdp = cep->rx_bd_base; + for (i = 0 ; i < RX_RING_SIZE; i++, bdp++) + printk("%04x %04x %08x\n", + bdp->cbd_sc, + bdp->cbd_datlen, + bdp->cbd_bufaddr); + } +#endif + if (!cep->tx_full) + netif_wake_queue(dev); +} +#endif /* ORIGINAL_VERSION */ + +/* The interrupt handler. + * This is called from the CPM handler, not the MPC core interrupt. + */ +static int scc_enet_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + int packets = 0; + struct scc_enet_private *cep; + volatile cbd_t *bdp; + ushort int_events; + int must_restart; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + + cep = (struct scc_enet_private *)rtdev->priv; + + /* Get the interrupt events that caused us to be here. + */ + int_events = cep->sccp->scc_scce; + cep->sccp->scc_scce = int_events; + must_restart = 0; + + /* Handle receive event in its own function. + */ + if (int_events & SCCE_ENET_RXF) { + scc_enet_rx(rtdev, &packets, &time_stamp); + } + + /* Check for a transmit error. The manual is a little unclear + * about this, so the debug code until I get it figured out. It + * appears that if TXE is set, then TXB is not set. However, + * if carrier sense is lost during frame transmission, the TXE + * bit is set, "and continues the buffer transmission normally." + * I don't know if "normally" implies TXB is set when the buffer + * descriptor is closed.....trial and error :-). + */ + + /* Transmit OK, or non-fatal error. Update the buffer descriptors. + */ + if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) { + rtdm_lock_get(&cep->lock); + bdp = cep->dirty_tx; + while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { + RT_DEBUG(__FUNCTION__": Tx ok\n"); + if ((bdp==cep->cur_tx) && (cep->tx_full == 0)) + break; + + if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ + cep->stats.tx_heartbeat_errors++; + if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ + cep->stats.tx_window_errors++; + if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ + cep->stats.tx_aborted_errors++; + if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ + cep->stats.tx_fifo_errors++; + if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ + cep->stats.tx_carrier_errors++; + + + /* No heartbeat or Lost carrier are not really bad errors. + * The others require a restart transmit command. + */ + if (bdp->cbd_sc & + (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { + must_restart = 1; + cep->stats.tx_errors++; + } + + cep->stats.tx_packets++; + + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (bdp->cbd_sc & BD_ENET_TX_DEF) + cep->stats.collisions++; + + /* Free the sk buffer associated with this last transmit. + */ + dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]); + cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; + + /* Update pointer to next buffer descriptor to be transmitted. + */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) + bdp = cep->tx_bd_base; + else + bdp++; + + /* I don't know if we can be held off from processing these + * interrupts for more than one frame time. I really hope + * not. In such a case, we would now want to check the + * currently available BD (cur_tx) and determine if any + * buffers between the dirty_tx and cur_tx have also been + * sent. We would want to process anything in between that + * does not have BD_ENET_TX_READY set. + */ + + /* Since we have freed up a buffer, the ring is no longer + * full. + */ + if (cep->tx_full) { + cep->tx_full = 0; + if (rtnetif_queue_stopped(rtdev)) + rtnetif_wake_queue(rtdev); + } + + cep->dirty_tx = (cbd_t *)bdp; + } + + if (must_restart) { + volatile cpm8xx_t *cp; + + /* Some transmit errors cause the transmitter to shut + * down. We now issue a restart transmit. Since the + * errors close the BD and update the pointers, the restart + * _should_ pick up without having to reset any of our + * pointers either. + */ + cp = cpmp; + cp->cp_cpcr = + mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG; + while (cp->cp_cpcr & CPM_CR_FLG); + } + rtdm_lock_put(&cep->lock); + } + + /* Check for receive busy, i.e. packets coming but no place to + * put them. This "can't happen" because the receive interrupt + * is tossing previous frames. + */ + if (int_events & SCCE_ENET_BSY) { + cep->stats.rx_dropped++; + rtdm_printk("CPM ENET: BSY can't happen.\n"); + } + + if (packets > 0) + rt_mark_stack_mgr(rtdev); + return RTDM_IRQ_HANDLED; +} + +/* During a receive, the cur_rx points to the current incoming buffer. + * When we update through the ring, if the next incoming buffer has + * not been given to the system, we just set the empty indicator, + * effectively tossing the packet. + */ +static int +scc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp) +{ + struct scc_enet_private *cep; + volatile cbd_t *bdp; + ushort pkt_len; + struct rtskb *skb; + + RT_DEBUG(__FUNCTION__": ...\n"); + + cep = (struct scc_enet_private *)rtdev->priv; + + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = cep->cur_rx; + + for (;;) { + + if (bdp->cbd_sc & BD_ENET_RX_EMPTY) + break; + +#ifndef final_version + /* Since we have allocated space to hold a complete frame, both + * the first and last indicators should be set. + */ + if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != + (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) + rtdm_printk("CPM ENET: rcv is not first+last\n"); +#endif + + /* Frame too long or too short. + */ + if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + cep->stats.rx_length_errors++; + if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ + cep->stats.rx_frame_errors++; + if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ + cep->stats.rx_crc_errors++; + if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ + cep->stats.rx_crc_errors++; + + /* Report late collisions as a frame error. + * On this error, the BD is closed, but we don't know what we + * have in the buffer. So, just drop this frame on the floor. + */ + if (bdp->cbd_sc & BD_ENET_RX_CL) { + cep->stats.rx_frame_errors++; + } + else { + + /* Process the incoming frame. + */ + cep->stats.rx_packets++; + pkt_len = bdp->cbd_datlen; + cep->stats.rx_bytes += pkt_len; + + /* This does 16 byte alignment, much more than we need. + * The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ + skb = rtnetdev_alloc_rtskb(rtdev, pkt_len-4); + if (skb == NULL) { + rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name); + cep->stats.rx_dropped++; + } + else { + rtskb_put(skb,pkt_len-4); /* Make room */ + memcpy(skb->data, + cep->rx_vaddr[bdp - cep->rx_bd_base], + pkt_len-4); + skb->protocol=rt_eth_type_trans(skb,rtdev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + (*packets)++; + } + } + + /* Clear the status flags for this buffer. + */ + bdp->cbd_sc &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty. + */ + bdp->cbd_sc |= BD_ENET_RX_EMPTY; + + /* Update BD pointer to next entry. + */ + if (bdp->cbd_sc & BD_ENET_RX_WRAP) + bdp = cep->rx_bd_base; + else + bdp++; + + } + cep->cur_rx = (cbd_t *)bdp; + + return 0; +} + +static int +scc_enet_close(struct rtnet_device *rtdev) +{ + /* Don't know what to do yet. + */ + rtnetif_stop_queue(rtdev); + + return 0; +} + +static struct net_device_stats *scc_enet_get_stats(struct rtnet_device *rtdev) +{ + struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; + + return &cep->stats; +} + +#ifdef ORIGINAL_VERSION +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ + +static void set_multicast_list(struct net_device *dev) +{ + struct scc_enet_private *cep; + struct dev_mc_list *dmi; + u_char *mcptr, *tdptr; + volatile scc_enet_t *ep; + int i, j; + cep = (struct scc_enet_private *)dev->priv; + + /* Get pointer to SCC area in parameter RAM. + */ + ep = (scc_enet_t *)dev->base_addr; + + if (dev->flags&IFF_PROMISC) { + + /* Log any net taps. */ + printk("%s: Promiscuous mode enabled.\n", dev->name); + cep->sccp->scc_pmsr |= SCC_PMSR_PRO; + } else { + + cep->sccp->scc_pmsr &= ~SCC_PMSR_PRO; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + ep->sen_gaddr1 = 0xffff; + ep->sen_gaddr2 = 0xffff; + ep->sen_gaddr3 = 0xffff; + ep->sen_gaddr4 = 0xffff; + } + else { + /* Clear filter and add the addresses in the list. + */ + ep->sen_gaddr1 = 0; + ep->sen_gaddr2 = 0; + ep->sen_gaddr3 = 0; + ep->sen_gaddr4 = 0; + + dmi = dev->mc_list; + + for (i=0; i<dev->mc_count; i++) { + + /* Only support group multicast for now. + */ + if (!(dmi->dmi_addr[0] & 1)) + continue; + + /* The address in dmi_addr is LSB first, + * and taddr is MSB first. We have to + * copy bytes MSB first from dmi_addr. + */ + mcptr = (u_char *)dmi->dmi_addr + 5; + tdptr = (u_char *)&ep->sen_taddrh; + for (j=0; j<6; j++) + *tdptr++ = *mcptr--; + + /* Ask CPM to run CRC and set bit in + * filter mask. + */ + cpmp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_SET_GADDR) | CPM_CR_FLG; + /* this delay is necessary here -- Cort */ + udelay(10); + while (cpmp->cp_cpcr & CPM_CR_FLG); + } + } + } +} +#endif /* ORIGINAL_VERSION */ + +/* Initialize the CPM Ethernet on SCC. If EPPC-Bug loaded us, or performed + * some other network I/O, a whole bunch of this has already been set up. + * It is no big deal if we do it again, we just have to disable the + * transmit and receive to make sure we don't catch the CPM with some + * inconsistent control information. + */ +int __init scc_enet_init(void) +{ + struct rtnet_device *rtdev = NULL; + struct scc_enet_private *cep; + int i, j, k; + unsigned char *eap, *ba; + dma_addr_t mem_addr; + bd_t *bd; + volatile cbd_t *bdp; + volatile cpm8xx_t *cp; + volatile scc_t *sccp; + volatile scc_enet_t *ep; + volatile immap_t *immap; + + cp = cpmp; /* Get pointer to Communication Processor */ + + immap = (immap_t *)(mfspr(IMMR) & 0xFFFF0000); /* and to internal registers */ + + bd = (bd_t *)__res; + + /* Configure the SCC parameters (this has formerly be done + * by macro definitions). + */ + switch (rtnet_scc) { + case 3: + CPM_CR_ENET = CPM_CR_CH_SCC3; + PROFF_ENET = PROFF_SCC3; + SCC_ENET = 2; /* Index, not number! */ + CPMVEC_ENET = CPMVEC_SCC3; + break; + case 2: + CPM_CR_ENET = CPM_CR_CH_SCC2; + PROFF_ENET = PROFF_SCC2; + SCC_ENET = 1; /* Index, not number! */ + CPMVEC_ENET = CPMVEC_SCC2; + break; + case 1: + CPM_CR_ENET = CPM_CR_CH_SCC1; + PROFF_ENET = PROFF_SCC1; + SCC_ENET = 0; /* Index, not number! */ + CPMVEC_ENET = CPMVEC_SCC1; + break; + default: + printk(KERN_ERR "enet: SCC%d doesn't exit (check rtnet_scc)\n", rtnet_scc); + return -1; + } + + /* Allocate some private information and create an Ethernet device instance. + */ + if (!rx_pool_size) + rx_pool_size = RX_RING_SIZE * 2; + rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct scc_enet_private), + rx_pool_size + TX_RING_SIZE); + if (rtdev == NULL) { + printk(KERN_ERR "enet: Could not allocate ethernet device.\n"); + return -1; + } + rtdev_alloc_name(rtdev, "rteth%d"); + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + + cep = (struct scc_enet_private *)rtdev->priv; + rtdm_lock_init(&cep->lock); + + /* Get pointer to SCC area in parameter RAM. + */ + ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]); + + /* And another to the SCC register area. + */ + sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]); + cep->sccp = (scc_t *)sccp; /* Keep the pointer handy */ + + /* Disable receive and transmit in case EPPC-Bug started it. + */ + sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + /* Cookbook style from the MPC860 manual..... + * Not all of this is necessary if EPPC-Bug has initialized + * the network. + * So far we are lucky, all board configurations use the same + * pins, or at least the same I/O Port for these functions..... + * It can't last though...... + */ + +#if (defined(PA_ENET_RXD) && defined(PA_ENET_TXD)) + /* Configure port A pins for Txd and Rxd. + */ + immap->im_ioport.iop_papar |= (PA_ENET_RXD | PA_ENET_TXD); + immap->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD); + immap->im_ioport.iop_paodr &= ~PA_ENET_TXD; +#elif (defined(PB_ENET_RXD) && defined(PB_ENET_TXD)) + /* Configure port B pins for Txd and Rxd. + */ + immap->im_cpm.cp_pbpar |= (PB_ENET_RXD | PB_ENET_TXD); + immap->im_cpm.cp_pbdir &= ~(PB_ENET_RXD | PB_ENET_TXD); + immap->im_cpm.cp_pbodr &= ~PB_ENET_TXD; +#else +#error Exactly ONE pair of PA_ENET_[RT]XD, PB_ENET_[RT]XD must be defined +#endif + +#if defined(PC_ENET_LBK) + /* Configure port C pins to disable External Loopback + */ + immap->im_ioport.iop_pcpar &= ~PC_ENET_LBK; + immap->im_ioport.iop_pcdir |= PC_ENET_LBK; + immap->im_ioport.iop_pcso &= ~PC_ENET_LBK; + immap->im_ioport.iop_pcdat &= ~PC_ENET_LBK; /* Disable Loopback */ +#endif /* PC_ENET_LBK */ + + /* Configure port C pins to enable CLSN and RENA. + */ + immap->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA); + immap->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA); + immap->im_ioport.iop_pcso |= (PC_ENET_CLSN | PC_ENET_RENA); + + /* Configure port A for TCLK and RCLK. + */ + immap->im_ioport.iop_papar |= (PA_ENET_TCLK | PA_ENET_RCLK); + immap->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK); + + /* Configure Serial Interface clock routing. + * First, clear all SCC bits to zero, then set the ones we want. + */ + cp->cp_sicr &= ~SICR_ENET_MASK; + cp->cp_sicr |= SICR_ENET_CLKRT; + + /* Manual says set SDDR, but I can't find anything with that + * name. I think it is a misprint, and should be SDCR. This + * has already been set by the communication processor initialization. + */ + + /* Allocate space for the buffer descriptors in the DP ram. + * These are relative offsets in the DP ram address space. + * Initialize base addresses for the buffer descriptors. + */ + i = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE); + ep->sen_genscc.scc_rbase = i; + cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[i]; + + i = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE); + ep->sen_genscc.scc_tbase = i; + cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[i]; + + cep->dirty_tx = cep->cur_tx = cep->tx_bd_base; + cep->cur_rx = cep->rx_bd_base; + + /* Issue init Rx BD command for SCC. + * Manual says to perform an Init Rx parameters here. We have + * to perform both Rx and Tx because the SCC may have been + * already running. + * In addition, we have to do it later because we don't yet have + * all of the BD control/status set properly. + cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG; + while (cp->cp_cpcr & CPM_CR_FLG); + */ + + /* Initialize function code registers for big-endian. + */ + ep->sen_genscc.scc_rfcr = SCC_EB; + ep->sen_genscc.scc_tfcr = SCC_EB; + + /* Set maximum bytes per receive buffer. + * This appears to be an Ethernet frame size, not the buffer + * fragment size. It must be a multiple of four. + */ + ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE; + + /* Set CRC preset and mask. + */ + ep->sen_cpres = 0xffffffff; + ep->sen_cmask = 0xdebb20e3; + + ep->sen_crcec = 0; /* CRC Error counter */ + ep->sen_alec = 0; /* alignment error counter */ + ep->sen_disfc = 0; /* discard frame counter */ + + ep->sen_pads = 0x8888; /* Tx short frame pad character */ + ep->sen_retlim = 15; /* Retry limit threshold */ + + ep->sen_maxflr = PKT_MAXBUF_SIZE; /* maximum frame length register */ + ep->sen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */ + + ep->sen_maxd1 = PKT_MAXBLR_SIZE; /* maximum DMA1 length */ + ep->sen_maxd2 = PKT_MAXBLR_SIZE; /* maximum DMA2 length */ + + /* Clear hash tables. + */ + ep->sen_gaddr1 = 0; + ep->sen_gaddr2 = 0; + ep->sen_gaddr3 = 0; + ep->sen_gaddr4 = 0; + ep->sen_iaddr1 = 0; + ep->sen_iaddr2 = 0; + ep->sen_iaddr3 = 0; + ep->sen_iaddr4 = 0; + + /* Set Ethernet station address. + */ + eap = (unsigned char *)&(ep->sen_paddrh); +#ifdef CONFIG_FEC_ENET + /* We need a second MAC address if FEC is used by Linux */ + for (i=5; i>=0; i--) + *eap++ = rtdev->dev_addr[i] = (bd->bi_enetaddr[i] | + (i==3 ? 0x80 : 0)); +#else + for (i=5; i>=0; i--) + *eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i]; +#endif + + ep->sen_pper = 0; /* 'cause the book says so */ + ep->sen_taddrl = 0; /* temp address (LSB) */ + ep->sen_taddrm = 0; + ep->sen_taddrh = 0; /* temp address (MSB) */ + + /* Now allocate the host memory pages and initialize the + * buffer descriptors. + */ + bdp = cep->tx_bd_base; + for (i=0; i<TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. + */ + bdp->cbd_sc = 0; + bdp->cbd_bufaddr = 0; + bdp++; + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + bdp = cep->rx_bd_base; + k = 0; + for (i=0; i<CPM_ENET_RX_PAGES; i++) { + + /* Allocate a page. + */ + ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr); + + /* Initialize the BD for every fragment in the page. + */ + for (j=0; j<CPM_ENET_RX_FRPPG; j++) { + bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR; + bdp->cbd_bufaddr = mem_addr; + cep->rx_vaddr[k++] = ba; + mem_addr += CPM_ENET_RX_FRSIZE; + ba += CPM_ENET_RX_FRSIZE; + bdp++; + } + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* Let's re-initialize the channel now. We have to do it later + * than the manual describes because we have just now finished + * the BD initialization. + */ + cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG; + while (cp->cp_cpcr & CPM_CR_FLG); + + cep->skb_cur = cep->skb_dirty = 0; + + sccp->scc_scce = 0xffff; /* Clear any pending events */ + + /* Enable interrupts for transmit error, complete frame + * received, and any transmit buffer we have also set the + * interrupt flag. + */ + sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); + + /* Install our interrupt handler. + */ + rtdev->irq = CPM_IRQ_OFFSET + CPMVEC_ENET; + rt_stack_connect(rtdev, &STACK_manager); + if ((i = rtdm_irq_request(&cep->irq_handle, rtdev->irq, + scc_enet_interrupt, 0, "rt_mpc8xx_enet", rtdev))) { + printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq); + rtdev_free(rtdev); + return i; + } + + + /* Set GSMR_H to enable all normal operating modes. + * Set GSMR_L to enable Ethernet to MC68160. + */ + sccp->scc_gsmrh = 0; + sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET); + + /* Set sync/delimiters. + */ + sccp->scc_dsr = 0xd555; + + /* Set processing mode. Use Ethernet CRC, catch broadcast, and + * start frame search 22 bit times after RENA. + */ + sccp->scc_pmsr = (SCC_PMSR_ENCRC | SCC_PMSR_NIB22); + + /* It is now OK to enable the Ethernet transmitter. + * Unfortunately, there are board implementation differences here. + */ +#if (!defined (PB_ENET_TENA) && defined (PC_ENET_TENA)) + immap->im_ioport.iop_pcpar |= PC_ENET_TENA; + immap->im_ioport.iop_pcdir &= ~PC_ENET_TENA; +#elif ( defined (PB_ENET_TENA) && !defined (PC_ENET_TENA)) + cp->cp_pbpar |= PB_ENET_TENA; + cp->cp_pbdir |= PB_ENET_TENA; +#else +#error Configuration Error: define exactly ONE of PB_ENET_TENA, PC_ENET_TENA +#endif + +#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC) + /* And while we are here, set the configuration to enable ethernet. + */ + *((volatile uint *)RPX_CSR_ADDR) &= ~BCSR0_ETHLPBK; + *((volatile uint *)RPX_CSR_ADDR) |= + (BCSR0_ETHEN | BCSR0_COLTESTDIS | BCSR0_FULLDPLXDIS); +#endif + +#ifdef CONFIG_BSEIP + /* BSE uses port B and C for PHY control. + */ + cp->cp_pbpar &= ~(PB_BSE_POWERUP | PB_BSE_FDXDIS); + cp->cp_pbdir |= (PB_BSE_POWERUP | PB_BSE_FDXDIS); + cp->cp_pbdat |= (PB_BSE_POWERUP | PB_BSE_FDXDIS); + + immap->im_ioport.iop_pcpar &= ~PC_BSE_LOOPBACK; + immap->im_ioport.iop_pcdir |= PC_BSE_LOOPBACK; + immap->im_ioport.iop_pcso &= ~PC_BSE_LOOPBACK; + immap->im_ioport.iop_pcdat &= ~PC_BSE_LOOPBACK; +#endif + +#ifdef CONFIG_FADS + cp->cp_pbpar |= PB_ENET_TENA; + cp->cp_pbdir |= PB_ENET_TENA; + + /* Enable the EEST PHY. + */ + *((volatile uint *)BCSR1) &= ~BCSR1_ETHEN; +#endif + + rtdev->base_addr = (unsigned long)ep; + + /* The CPM Ethernet specific entries in the device structure. */ + rtdev->open = scc_enet_open; + rtdev->hard_start_xmit = scc_enet_start_xmit; + rtdev->stop = scc_enet_close; + rtdev->hard_header = &rt_eth_header; + rtdev->get_stats = scc_enet_get_stats; + + if (!rx_pool_size) + rx_pool_size = RX_RING_SIZE * 2; + + if ((i = rt_register_rtnetdev(rtdev))) { + printk(KERN_ERR "Couldn't register rtdev\n"); + rtdm_irq_disable(&cep->irq_handle); + rtdm_irq_free(&cep->irq_handle); + rtdev_free(rtdev); + return i; + } + + /* And last, enable the transmit and receive processing. + */ + sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + printk("%s: CPM ENET Version 0.2 on SCC%d, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n", + rtdev->name, SCC_ENET+1, rtdev->irq, + rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2], + rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]); + + return 0; +} + +static void __exit scc_enet_cleanup(void) +{ + struct rtnet_device *rtdev = rtdev_root; + struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv; + volatile cpm8xx_t *cp = cpmp; + volatile scc_enet_t *ep; + + if (rtdev) { + rtdm_irq_disable(&cep->irq_handle); + rtdm_irq_free(&cep->irq_handle); + + ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]); + m8xx_cpm_dpfree(ep->sen_genscc.scc_rbase); + m8xx_cpm_dpfree(ep->sen_genscc.scc_tbase); + + rt_stack_disconnect(rtdev); + rt_unregister_rtnetdev(rtdev); + rt_rtdev_disconnect(rtdev); + + printk("%s: unloaded\n", rtdev->name); + rtdev_free(rtdev); + rtdev_root = NULL; + } +} + +module_init(scc_enet_init); +module_exit(scc_enet_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c new file mode 100644 index 0000000..e57f85a --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c @@ -0,0 +1,2341 @@ +/* + * BK Id: SCCS/s.fec.c 1.30 09/11/02 14:55:08 paulus + */ +/* + * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * This version of the driver is specific to the FADS implementation, + * since the board contains control registers external to the processor + * for the control of the LevelOne LXT970 transceiver. The MPC860T manual + * describes connections using the internal parallel port I/O, which + * is basically all of Port D. + * + * Includes support for the following PHYs: QS6612, LXT970, LXT971/2. + * + * Right now, I am very wasteful with the buffers. I allocate memory + * pages and then divide them into 2K frame buffers. This way I know I + * have buffers large enough to hold one frame within one buffer descriptor. + * Once I get this working, I will use 64 or 128 byte CPM buffers, which + * will be much more memory efficient and will easily handle lots of + * small packets. + * + * Much better multiple PHY support by Magnus Damm. + * Copyright (c) 2000 Ericsson Radio Systems AB. + * + * Make use of MII for PHY control configurable. + * Some fixes. + * Copyright (c) 2000-2002 Wolfgang Denk, DENX Software Engineering. + * + * Fixes for tx_full condition and relink when using MII. + * Support for AMD AM79C874 added. + * Thomas Lange, thomas@corelatus.com + * + * Added code for Multicast support, Frederic Goddeeris, Paul Geerinckx + * Copyright (c) 2002 Siemens Atea + * + * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/8xx_io/fec.c". + * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de) + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/uaccess.h> + +#include <asm/8xx_immap.h> +#include <asm/pgtable.h> +#include <asm/mpc8xx.h> +#include <asm/irq.h> +#include <asm/bitops.h> +#include <asm/commproc.h> + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +#error "MDIO for PHY configuration is not yet supported!" +#endif + +#include <rtnet_port.h> + +MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>"); +MODULE_DESCRIPTION("RTnet driver for the MPC8xx FEC Ethernet"); +MODULE_LICENSE("GPL"); + +static unsigned int rx_pool_size = 0; +MODULE_PARM(rx_pool_size, "i"); +MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size"); + +#define RT_DEBUG(fmt,args...) + +/* multicast support + */ +/* #define DEBUG_MULTICAST */ + +/* CRC polynomium used by the FEC for the multicast group filtering + */ +#define FEC_CRC_POLY 0x04C11DB7 + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +/* Forward declarations of some structures to support different PHYs +*/ + +typedef struct { + uint mii_data; + void (*funct)(uint mii_reg, struct net_device *dev, uint data); +} phy_cmd_t; + +typedef struct { + uint id; + char *name; + + const phy_cmd_t *config; + const phy_cmd_t *startup; + const phy_cmd_t *ack_int; + const phy_cmd_t *shutdown; +} phy_info_t; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it is best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ +#define FEC_ENET_RX_PAGES 4 +#define FEC_ENET_RX_FRSIZE 2048 +#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) +#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) +#define TX_RING_SIZE 8 /* Must be power of two */ +#define TX_RING_MOD_MASK 7 /* for this to work */ + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ + +/* +*/ +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + +/* Delay to wait for FEC reset command to complete (in us) +*/ +#define FEC_RESET_DELAY 50 + +/* The FEC stores dest/src/type, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1518 +#define PKT_MINBUF_SIZE 64 +#define PKT_MAXBLR_SIZE 1520 + +/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct fec_enet_private { + /* The addresses of a Tx/Rx-in-place packets/buffers. */ + struct rtskb *tx_skbuff[TX_RING_SIZE]; + ushort skb_cur; + ushort skb_dirty; + + /* CPM dual port RAM relative addresses. + */ + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ + cbd_t *dirty_tx; /* The ring entries to be free()ed. */ + + /* Virtual addresses for the receive buffers because we can't + * do a __va() on them anymore. + */ + unsigned char *rx_vaddr[RX_RING_SIZE]; + + struct net_device_stats stats; + uint tx_full; + rtdm_lock_t lock; + rtdm_irq_t irq_handle; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + uint phy_id; + uint phy_id_done; + uint phy_status; + uint phy_speed; + phy_info_t *phy; + struct tq_struct phy_task; + + uint sequence_done; + + uint phy_addr; + + struct timer_list phy_timer_list; + u16 old_status; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + int link; + int old_link; + int full_duplex; + +}; + +static int fec_enet_open(struct rtnet_device *rtev); +static int fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev); +static void fec_enet_tx(struct rtnet_device *rtdev); +static void fec_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp); +static int fec_enet_interrupt(rtdm_irq_t *irq_handle); +static int fec_enet_close(struct rtnet_device *dev); +static void fec_restart(struct rtnet_device *rtdev, int duplex); +static void fec_stop(struct rtnet_device *rtdev); +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +static void fec_enet_mii(struct net_device *dev); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ +static struct net_device_stats *fec_enet_get_stats(struct rtnet_device *rtdev); +#ifdef ORIGINAL_VERSION +static void set_multicast_list(struct net_device *dev); +#endif /* ORIGINAL_VERSION */ + +static struct rtnet_device *rtdev_root = NULL; /* for cleanup */ + +static ushort my_enet_addr[3]; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr); + +static void mdio_callback(uint regval, struct net_device *dev, uint data); +static int mdio_read(struct net_device *dev, int phy_id, int location); + +#if defined(CONFIG_FEC_DP83846A) +static void mdio_timer_callback(unsigned long data); +#endif /* CONFIG_FEC_DP83846A */ + +/* MII processing. We keep this as simple as possible. Requests are + * placed on the list (if there is room). When the request is finished + * by the MII, an optional function may be called. + */ +typedef struct mii_list { + uint mii_regval; + void (*mii_func)(uint val, struct net_device *dev, uint data); + struct mii_list *mii_next; + uint mii_data; +} mii_list_t; + +#define NMII 20 +mii_list_t mii_cmds[NMII]; +mii_list_t *mii_free; +mii_list_t *mii_head; +mii_list_t *mii_tail; + +typedef struct mdio_read_data { + u16 regval; + struct task_struct *sleeping_task; +} mdio_read_data_t; + +static int mii_queue(struct net_device *dev, int request, + void (*func)(uint, struct net_device *, uint), uint data); +static void mii_queue_relink(uint mii_reg, struct net_device *dev, uint data); + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ + (VAL & 0xffff)) +#define mk_mii_end 0 +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +/* Transmitter timeout. +*/ +#define TX_TIMEOUT (2*HZ) + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +/* Register definitions for the PHY. +*/ + +#define MII_REG_CR 0 /* Control Register */ +#define MII_REG_SR 1 /* Status Register */ +#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ +#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ +#define MII_REG_ANAR 4 /* A-N Advertisement Register */ +#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ +#define MII_REG_ANER 6 /* A-N Expansion Register */ +#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ +#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ + +/* values for phy_status */ + +#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ +#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ +#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ +#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ +#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ +#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ +#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ + +#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ +#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ +#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ +#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ +#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ +#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ +#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ +#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + +static int +fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev) +{ + struct fec_enet_private *fep; + volatile fec_t *fecp; + volatile cbd_t *bdp; + rtdm_lockctx_t context; + + + RT_DEBUG(__FUNCTION__": ...\n"); + + fep = rtdev->priv; + fecp = (volatile fec_t*)rtdev->base_addr; + + if (!fep->link) { + /* Link is down or autonegotiation is in progress. */ + return 1; + } + + /* Fill in a Tx ring entry */ + bdp = fep->cur_tx; + +#ifndef final_version + if (bdp->cbd_sc & BD_ENET_TX_READY) { + /* Ooops. All transmit buffers are full. Bail out. + * This should not happen, since dev->tbusy should be set. + */ + rtdm_printk("%s: tx queue full!.\n", rtdev->name); + return 1; + } +#endif + + /* Clear all of the status flags. + */ + bdp->cbd_sc &= ~BD_ENET_TX_STATS; + + /* Set buffer length and buffer pointer. + */ + bdp->cbd_bufaddr = __pa(skb->data); + bdp->cbd_datlen = skb->len; + + /* Save skb pointer. + */ + fep->tx_skbuff[fep->skb_cur] = skb; + + fep->stats.tx_bytes += skb->len; + fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; + + rtdm_lock_get_irqsave(&fep->lock, context); + + /* Get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + + /* Push the data cache so the CPM does not get stale memory + * data. + */ + flush_dcache_range((unsigned long)skb->data, + (unsigned long)skb->data + skb->len); + + /* Send it on its way. Tell FEC its ready, interrupt when done, + * its the last BD of the frame, and to put the CRC on the end. + */ + + bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR + | BD_ENET_TX_LAST | BD_ENET_TX_TC); + + //rtdev->trans_start = jiffies; + + /* Trigger transmission start */ + fecp->fec_x_des_active = 0x01000000; + + /* If this was the last BD in the ring, start at the beginning again. + */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) { + bdp = fep->tx_bd_base; + } else { + bdp++; + } + + if (bdp->cbd_sc & BD_ENET_TX_READY) { + rtnetif_stop_queue(rtdev); + fep->tx_full = 1; + } + + fep->cur_tx = (cbd_t *)bdp; + + rtdm_lock_put_irqrestore(&fep->lock, context); + + return 0; +} + +#ifdef ORIGINAL_VERSION +static void +fec_timeout(struct net_device *dev) +{ + struct fec_enet_private *fep = rtdev->priv; + + if (fep->link || fep->old_link) { + /* Link status changed - print timeout message */ + printk("%s: transmit timed out.\n", rtdev->name); + } + + fep->stats.tx_errors++; +#ifndef final_version + if (fep->link) { + int i; + cbd_t *bdp; + + printk ("Ring data dump: " + "cur_tx %p%s dirty_tx %p cur_rx %p\n", + fep->cur_tx, + fep->tx_full ? " (full)" : "", + fep->dirty_tx, + fep->cur_rx); + + bdp = fep->tx_bd_base; + printk(" tx: %u buffers\n", TX_RING_SIZE); + for (i = 0 ; i < TX_RING_SIZE; i++) { + printk(" %08x: %04x %04x %08x\n", + (uint) bdp, + bdp->cbd_sc, + bdp->cbd_datlen, + bdp->cbd_bufaddr); + bdp++; + } + + bdp = fep->rx_bd_base; + printk(" rx: %lu buffers\n", RX_RING_SIZE); + for (i = 0 ; i < RX_RING_SIZE; i++) { + printk(" %08x: %04x %04x %08x\n", + (uint) bdp, + bdp->cbd_sc, + bdp->cbd_datlen, + bdp->cbd_bufaddr); + bdp++; + } + } +#endif + if (!fep->tx_full) { + netif_wake_queue(dev); + } +} +#endif /* ORIGINAL_VERSION */ + +/* The interrupt handler. + * This is called from the MPC core interrupt. + */ +static int fec_enet_interrupt(rtdm_irq_t *irq_handle) +{ + struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); + int packets = 0; + volatile fec_t *fecp; + uint int_events; + nanosecs_abs_t time_stamp = rtdm_clock_read(); + + + fecp = (volatile fec_t*)rtdev->base_addr; + + /* Get the interrupt events that caused us to be here. + */ + while ((int_events = fecp->fec_ievent) != 0) { + fecp->fec_ievent = int_events; + if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR | + FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) { + rtdm_printk("FEC ERROR %x\n", int_events); + } + + /* Handle receive event in its own function. + */ + if (int_events & FEC_ENET_RXF) { + fec_enet_rx(rtdev, &packets, &time_stamp); + } + + /* Transmit OK, or non-fatal error. Update the buffer + descriptors. FEC handles all errors, we just discover + them as part of the transmit process. + */ + if (int_events & FEC_ENET_TXF) { + fec_enet_tx(rtdev); + } + + if (int_events & FEC_ENET_MII) { +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + fec_enet_mii(dev); +#else + rtdm_printk("%s[%d] %s: unexpected FEC_ENET_MII event\n", + __FILE__,__LINE__,__FUNCTION__); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + } + + } + + if (packets > 0) + rt_mark_stack_mgr(rtdev); + return RTDM_IRQ_HANDLED; +} + + +static void +fec_enet_tx(struct rtnet_device *rtdev) +{ + struct rtskb *skb; + struct fec_enet_private *fep = rtdev->priv; + volatile cbd_t *bdp; + rtdm_lock_get(&fep->lock); + bdp = fep->dirty_tx; + + while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { + if (bdp == fep->cur_tx && fep->tx_full == 0) break; + + skb = fep->tx_skbuff[fep->skb_dirty]; + /* Check for errors. */ + if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | + BD_ENET_TX_CSL)) { + fep->stats.tx_errors++; + if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ + fep->stats.tx_heartbeat_errors++; + if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ + fep->stats.tx_window_errors++; + if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ + fep->stats.tx_aborted_errors++; + if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ + fep->stats.tx_fifo_errors++; + if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ + fep->stats.tx_carrier_errors++; + } else { + fep->stats.tx_packets++; + } + +#ifndef final_version + if (bdp->cbd_sc & BD_ENET_TX_READY) + rtdm_printk("HEY! Enet xmit interrupt and TX_READY.\n"); +#endif + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (bdp->cbd_sc & BD_ENET_TX_DEF) + fep->stats.collisions++; + + /* Free the sk buffer associated with this last transmit. + */ + dev_kfree_rtskb(skb); + fep->tx_skbuff[fep->skb_dirty] = NULL; + fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; + + /* Update pointer to next buffer descriptor to be transmitted. + */ + if (bdp->cbd_sc & BD_ENET_TX_WRAP) + bdp = fep->tx_bd_base; + else + bdp++; + + /* Since we have freed up a buffer, the ring is no longer + * full. + */ + if (fep->tx_full) { + fep->tx_full = 0; + if (rtnetif_queue_stopped(rtdev)) + rtnetif_wake_queue(rtdev); + } + } + fep->dirty_tx = (cbd_t *)bdp; + rtdm_lock_put(&fep->lock); +} + + +/* During a receive, the cur_rx points to the current incoming buffer. + * When we update through the ring, if the next incoming buffer has + * not been given to the system, we just set the empty indicator, + * effectively tossing the packet. + */ +static void +fec_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp) +{ + struct fec_enet_private *fep; + volatile fec_t *fecp; + volatile cbd_t *bdp; + struct rtskb *skb; + ushort pkt_len; + __u8 *data; + + fep = rtdev->priv; + fecp = (volatile fec_t*)rtdev->base_addr; + + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + +while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { + +#ifndef final_version + /* Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) + rtdm_printk("FEC ENET: rcv is not +last\n"); +#endif + + /* Check for errors. */ + if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | + BD_ENET_RX_CR | BD_ENET_RX_OV)) { + fep->stats.rx_errors++; + if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + /* Frame too long or too short. */ + fep->stats.rx_length_errors++; + } + if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ + fep->stats.rx_frame_errors++; + if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ + fep->stats.rx_crc_errors++; + if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ + fep->stats.rx_crc_errors++; + } + + /* Report late collisions as a frame error. + * On this error, the BD is closed, but we don't know what we + * have in the buffer. So, just drop this frame on the floor. + */ + if (bdp->cbd_sc & BD_ENET_RX_CL) { + fep->stats.rx_errors++; + fep->stats.rx_frame_errors++; + goto rx_processing_done; + } + + /* Process the incoming frame. + */ + fep->stats.rx_packets++; + pkt_len = bdp->cbd_datlen; + fep->stats.rx_bytes += pkt_len; + data = fep->rx_vaddr[bdp - fep->rx_bd_base]; + + /* This does 16 byte alignment, exactly what we need. + * The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ + skb = rtnetdev_alloc_rtskb(rtdev, pkt_len-4); + + if (skb == NULL) { + rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name); + fep->stats.rx_dropped++; + } else { + rtskb_put(skb,pkt_len-4); /* Make room */ + memcpy(skb->data, data, pkt_len-4); + skb->protocol=rt_eth_type_trans(skb,rtdev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + (*packets)++; + } +rx_processing_done: + + /* Clear the status flags for this buffer. + */ + bdp->cbd_sc &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty. + */ + bdp->cbd_sc |= BD_ENET_RX_EMPTY; + + /* Update BD pointer to next entry. + */ + if (bdp->cbd_sc & BD_ENET_RX_WRAP) + bdp = fep->rx_bd_base; + else + bdp++; + + /* Doing this here will keep the FEC running while we process + * incoming frames. On a heavily loaded network, we should be + * able to keep up at the expense of system resources. + */ + fecp->fec_r_des_active = 0x01000000; + } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ + fep->cur_rx = (cbd_t *)bdp; + +} + + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +static void +fec_enet_mii(struct net_device *dev) +{ + struct fec_enet_private *fep; + volatile fec_t *ep; + mii_list_t *mip; + uint mii_reg; + + fep = (struct fec_enet_private *)dev->priv; + ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec); + mii_reg = ep->fec_mii_data; + + if ((mip = mii_head) == NULL) { + printk("MII and no head!\n"); + return; + } + + if (mip->mii_func != NULL) + (*(mip->mii_func))(mii_reg, dev, mip->mii_data); + + mii_head = mip->mii_next; + mip->mii_next = mii_free; + mii_free = mip; + + if ((mip = mii_head) != NULL) { + ep->fec_mii_data = mip->mii_regval; + } +} + +static int +mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *, uint), uint data) +{ + struct fec_enet_private *fep; + unsigned long flags; + mii_list_t *mip; + int retval; + + /* Add PHY address to register command. + */ + fep = dev->priv; + regval |= fep->phy_addr << 23; + + retval = 0; + + save_flags(flags); + cli(); + + if ((mip = mii_free) != NULL) { + mii_free = mip->mii_next; + mip->mii_regval = regval; + mip->mii_func = func; + mip->mii_next = NULL; + mip->mii_data = data; + if (mii_head) { + mii_tail->mii_next = mip; + mii_tail = mip; + } else { + mii_head = mii_tail = mip; + (&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec))->fec_mii_data = regval; + } + } else { + retval = 1; + } + + restore_flags(flags); + + return(retval); +} + +static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) +{ + int k; + + if(!c) + return; + + for(k = 0; (c+k)->mii_data != mk_mii_end; k++) + mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0); +} + +static void mii_parse_sr(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); + + if (mii_reg & 0x0004) + s |= PHY_STAT_LINK; + if (mii_reg & 0x0010) + s |= PHY_STAT_FAULT; + if (mii_reg & 0x0020) + s |= PHY_STAT_ANC; + + fep->phy_status = s; + fep->link = (s & PHY_STAT_LINK) ? 1 : 0; +} + +static void mii_parse_cr(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); + + if (mii_reg & 0x1000) + s |= PHY_CONF_ANE; + if (mii_reg & 0x4000) + s |= PHY_CONF_LOOP; + + fep->phy_status = s; +} + +static void mii_parse_anar(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_CONF_SPMASK); + + if (mii_reg & 0x0020) + s |= PHY_CONF_10HDX; + if (mii_reg & 0x0040) + s |= PHY_CONF_10FDX; + if (mii_reg & 0x0080) + s |= PHY_CONF_100HDX; + if (mii_reg & 0x0100) + s |= PHY_CONF_100FDX; + + fep->phy_status = s; +} + +/* ------------------------------------------------------------------------- */ +/* The Level one LXT970 is used by many boards */ + +#ifdef CONFIG_FEC_LXT970 + +#define MII_LXT970_MIRROR 16 /* Mirror register */ +#define MII_LXT970_IER 17 /* Interrupt Enable Register */ +#define MII_LXT970_ISR 18 /* Interrupt Status Register */ +#define MII_LXT970_CONFIG 19 /* Configuration Register */ +#define MII_LXT970_CSR 20 /* Chip Status Register */ + +static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x0800) { + if (mii_reg & 0x1000) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } + else { + if (mii_reg & 0x1000) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_lxt970 = { + 0x07810000, + "LXT970", + + (const phy_cmd_t []) { /* config */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* read SR and ISR to acknowledge */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_LXT970_ISR), NULL }, + + /* find out the current status */ + + { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FEC_LXT970 */ + +/* ------------------------------------------------------------------------- */ +/* The Level one LXT971 is used on some of my custom boards */ + +#ifdef CONFIG_FEC_LXT971 + +/* register definitions for the 971 */ + +#define MII_LXT971_PCR 16 /* Port Control Register */ +#define MII_LXT971_SR2 17 /* Status Register 2 */ +#define MII_LXT971_IER 18 /* Interrupt Enable Register */ +#define MII_LXT971_ISR 19 /* Interrupt Status Register */ +#define MII_LXT971_LCR 20 /* LED Control Register */ +#define MII_LXT971_TCR 30 /* Transmit Control Register */ + +/* + * I had some nice ideas of running the MDIO faster... + * The 971 should support 8MHz and I tried it, but things acted really + * weird, so 2.5 MHz ought to be enough for anyone... + */ + +static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x4000) { + if (mii_reg & 0x0200) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } + else { + if (mii_reg & 0x0200) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + if (mii_reg & 0x0008) + s |= PHY_STAT_FAULT; + + fep->phy_status = s; +} + +static phy_info_t phy_info_lxt971 = { + 0x0001378e, + "LXT971", + + (const phy_cmd_t []) { /* config */ +// { mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10 Mbps, HD */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + + /* Somehow does the 971 tell me that the link is down + * the first read after power-up. + * read here to get a valid value in ack_int */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* find out the current status */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, + + /* we only need to read ISR to acknowledge */ + + { mk_mii_read(MII_LXT971_ISR), NULL }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FEC_LXT971 */ + + +/* ------------------------------------------------------------------------- */ +/* The Quality Semiconductor QS6612 is used on the RPX CLLF */ + +#ifdef CONFIG_FEC_QS6612 + +/* register definitions */ + +#define MII_QS6612_MCR 17 /* Mode Control Register */ +#define MII_QS6612_FTR 27 /* Factory Test Register */ +#define MII_QS6612_MCO 28 /* Misc. Control Register */ +#define MII_QS6612_ISR 29 /* Interrupt Source Register */ +#define MII_QS6612_IMR 30 /* Interrupt Mask Register */ +#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ + +static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + switch((mii_reg >> 2) & 7) { + case 1: s |= PHY_STAT_10HDX; break; + case 2: s |= PHY_STAT_100HDX; break; + case 5: s |= PHY_STAT_10FDX; break; + case 6: s |= PHY_STAT_100FDX; break; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_qs6612 = { + 0x00181440, + "QS6612", + + (const phy_cmd_t []) { /* config */ +// { mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10 Mbps */ + + /* The PHY powers up isolated on the RPX, + * so send a command to allow operation. + */ + + { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, + + /* parse cr and anar to get some info */ + + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + + /* we need to read ISR, SR and ANER to acknowledge */ + + { mk_mii_read(MII_QS6612_ISR), NULL }, + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_ANER), NULL }, + + /* read pcr to get info */ + + { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FEC_QS6612 */ + +/* ------------------------------------------------------------------------- */ +/* The Advanced Micro Devices AM79C874 is used on the ICU862 */ + +#ifdef CONFIG_FEC_AM79C874 + +/* register definitions for the 79C874 */ + +#define MII_AM79C874_MFR 16 /* Miscellaneous Features Register */ +#define MII_AM79C874_ICSR 17 /* Interrupt Control/Status Register */ +#define MII_AM79C874_DR 18 /* Diagnostic Register */ +#define MII_AM79C874_PMLR 19 /* Power Management & Loopback Register */ +#define MII_AM79C874_MCR 21 /* Mode Control Register */ +#define MII_AM79C874_DC 23 /* Disconnect Counter */ +#define MII_AM79C874_REC 24 /* Receiver Error Counter */ + +static void mii_parse_amd79c874_dr(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + /* Register 18: Bit 10 is data rate, 11 is Duplex */ + switch ((mii_reg >> 10) & 3) { + case 0: s |= PHY_STAT_10HDX; break; + case 1: s |= PHY_STAT_100HDX; break; + case 2: s |= PHY_STAT_10FDX; break; + case 3: s |= PHY_STAT_100FDX; break; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_amd79c874 = { + 0x00022561, + "AM79C874", + + (const phy_cmd_t []) { /* config */ +// { mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10 Mbps, HD */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup - enable interrupts */ + { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + /* find out the current status */ + + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_AM79C874_DR), mii_parse_amd79c874_dr }, + + /* we only need to read ICSR to acknowledge */ + + { mk_mii_read(MII_AM79C874_ICSR), NULL }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, + { mk_mii_end, } + }, +}; + +#endif /* CONFIG_FEC_AM79C874 */ + +/* -------------------------------------------------------------------- */ +/* The National Semiconductor DP83843BVJE is used on a Mediatrix board */ +/* -------------------------------------------------------------------- */ + +#ifdef CONFIG_FEC_DP83843 + +/* Register definitions */ +#define MII_DP83843_PHYSTS 0x10 /* PHY Status Register */ +#define MII_DP83843_MIPSCR 0x11 /* Specific Status Register */ +#define MII_DP83843_MIPGSR 0x12 /* Generic Status Register */ + +static void mii_parse_dp83843_physts(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x0002) + { + if (mii_reg & 0x0004) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + else + { + if (mii_reg & 0x0004) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } + + fep->phy_status = s; +} + +static phy_info_t phy_info_dp83843 = { + 0x020005c1, + "DP83843BVJE", + + (const phy_cmd_t []) { /* config */ + { mk_mii_write(MII_REG_ANAR, 0x01E1), NULL }, /* Auto-Negociation Register Control set to */ + /* auto-negociate 10/100MBps, Half/Full duplex */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup */ + { mk_mii_write(MII_DP83843_MIPSCR, 0x0002), NULL }, /* Enable interrupts */ + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_DP83843_PHYSTS), mii_parse_dp83843_physts }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + { mk_mii_read(MII_DP83843_MIPGSR), NULL }, /* Acknowledge interrupts */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, /* Find out the current status */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_DP83843_PHYSTS), mii_parse_dp83843_physts }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_end, } + } +}; + +#endif /* CONFIG_FEC_DP83843 */ + + +/* ----------------------------------------------------------------- */ +/* The National Semiconductor DP83846A is used on a Mediatrix board */ +/* ----------------------------------------------------------------- */ + +#ifdef CONFIG_FEC_DP83846A + +/* Register definitions */ +#define MII_DP83846A_PHYSTS 0x10 /* PHY Status Register */ + +static void mii_parse_dp83846a_physts(uint mii_reg, struct net_device *dev, uint data) +{ + volatile struct fec_enet_private *fep = (struct fec_enet_private *)dev->priv; + uint s = fep->phy_status; + int link_change_mask; + + s &= ~(PHY_STAT_SPMASK); + + if (mii_reg & 0x0002) { + if (mii_reg & 0x0004) + s |= PHY_STAT_10FDX; + else + s |= PHY_STAT_10HDX; + } + else { + if (mii_reg & 0x0004) + s |= PHY_STAT_100FDX; + else + s |= PHY_STAT_100HDX; + } + + fep->phy_status = s; + + link_change_mask = PHY_STAT_LINK | PHY_STAT_10FDX | PHY_STAT_10HDX | PHY_STAT_100FDX | PHY_STAT_100HDX; + if(fep->old_status != (link_change_mask & s)) + { + fep->old_status = (link_change_mask & s); + mii_queue_relink(mii_reg, dev, 0); + } +} + +static phy_info_t phy_info_dp83846a = { + 0x020005c2, + "DP83846A", + + (const phy_cmd_t []) { /* config */ + { mk_mii_write(MII_REG_ANAR, 0x01E1), NULL }, /* Auto-Negociation Register Control set to */ + /* auto-negociate 10/100MBps, Half/Full duplex */ + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* startup */ + { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_DP83846A_PHYSTS), mii_parse_dp83846a_physts }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* ack_int */ + { mk_mii_read(MII_REG_SR), mii_parse_sr }, + { mk_mii_read(MII_REG_CR), mii_parse_cr }, + { mk_mii_read(MII_DP83846A_PHYSTS), mii_parse_dp83846a_physts }, + { mk_mii_end, } + }, + (const phy_cmd_t []) { /* shutdown - disable interrupts */ + { mk_mii_end, } + } +}; + +#endif /* CONFIG_FEC_DP83846A */ + + +static phy_info_t *phy_info[] = { + +#ifdef CONFIG_FEC_LXT970 + &phy_info_lxt970, +#endif /* CONFIG_FEC_LXT970 */ + +#ifdef CONFIG_FEC_LXT971 + &phy_info_lxt971, +#endif /* CONFIG_FEC_LXT971 */ + +#ifdef CONFIG_FEC_QS6612 + &phy_info_qs6612, +#endif /* CONFIG_FEC_QS6612 */ + +#ifdef CONFIG_FEC_AM79C874 + &phy_info_amd79c874, +#endif /* CONFIG_FEC_AM79C874 */ + +#ifdef CONFIG_FEC_DP83843 + &phy_info_dp83843, +#endif /* CONFIG_FEC_DP83843 */ + +#ifdef CONFIG_FEC_DP83846A + &phy_info_dp83846a, +#endif /* CONFIG_FEC_DP83846A */ + + NULL +}; + +static void mii_display_status(struct net_device *dev) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + if (!fep->link && !fep->old_link) { + /* Link is still down - don't print anything */ + return; + } + + printk("%s: status: ", dev->name); + + if (!fep->link) { + printk("link down"); + } else { + printk("link up"); + + switch(s & PHY_STAT_SPMASK) { + case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break; + case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break; + case PHY_STAT_10FDX: printk(", 10 Mbps Full Duplex"); break; + case PHY_STAT_10HDX: printk(", 10 Mbps Half Duplex"); break; + default: + printk(", Unknown speed/duplex"); + } + + if (s & PHY_STAT_ANC) + printk(", auto-negotiation complete"); + } + + if (s & PHY_STAT_FAULT) + printk(", remote fault"); + + printk(".\n"); +} + +static void mii_display_config(struct net_device *dev) +{ + volatile struct fec_enet_private *fep = dev->priv; + uint s = fep->phy_status; + + printk("%s: config: auto-negotiation ", dev->name); + + if (s & PHY_CONF_ANE) + printk("on"); + else + printk("off"); + + if (s & PHY_CONF_100FDX) + printk(", 100FDX"); + if (s & PHY_CONF_100HDX) + printk(", 100HDX"); + if (s & PHY_CONF_10FDX) + printk(", 10FDX"); + if (s & PHY_CONF_10HDX) + printk(", 10HDX"); + if (!(s & PHY_CONF_SPMASK)) + printk(", No speed/duplex selected?"); + + if (s & PHY_CONF_LOOP) + printk(", loopback enabled"); + + printk(".\n"); + + fep->sequence_done = 1; +} + +static void mii_relink(struct net_device *dev) +{ + struct fec_enet_private *fep = dev->priv; + int duplex; + + fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; + mii_display_status(dev); + fep->old_link = fep->link; + + if (fep->link) { + duplex = 0; + if (fep->phy_status + & (PHY_STAT_100FDX | PHY_STAT_10FDX)) + duplex = 1; + fec_restart(dev, duplex); + + if (netif_queue_stopped(dev)) { + netif_wake_queue(dev); + } + } else { + netif_stop_queue(dev); + fec_stop(dev); + } +} + +static void mii_queue_relink(uint mii_reg, struct net_device *dev, uint data) +{ + struct fec_enet_private *fep = dev->priv; + + fep->phy_task.routine = (void *)mii_relink; + fep->phy_task.data = dev; + schedule_task(&fep->phy_task); +} + +static void mii_queue_config(uint mii_reg, struct net_device *dev, uint data) +{ + struct fec_enet_private *fep = dev->priv; + + fep->phy_task.routine = (void *)mii_display_config; + fep->phy_task.data = dev; + schedule_task(&fep->phy_task); +} + + + +phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink }, + { mk_mii_end, } }; +phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config }, + { mk_mii_end, } }; + + + +/* Read remainder of PHY ID. +*/ +static void +mii_discover_phy3(uint mii_reg, struct net_device *dev, uint data) +{ + struct fec_enet_private *fep; + int i; + + fep = dev->priv; + fep->phy_id |= (mii_reg & 0xffff); + + for(i = 0; phy_info[i]; i++) + if(phy_info[i]->id == (fep->phy_id >> 4)) + break; + + if(!phy_info[i]) + panic("%s: PHY id 0x%08x is not supported!\n", + dev->name, fep->phy_id); + + fep->phy = phy_info[i]; + fep->phy_id_done = 1; + + printk("%s: Phy @ 0x%x, type %s (0x%08x)\n", + dev->name, fep->phy_addr, fep->phy->name, fep->phy_id); +} + +/* Scan all of the MII PHY addresses looking for someone to respond + * with a valid ID. This usually happens quickly. + */ +static void +mii_discover_phy(uint mii_reg, struct net_device *dev, uint data) +{ + struct fec_enet_private *fep; + uint phytype; + + fep = dev->priv; + + if ((phytype = (mii_reg & 0xffff)) != 0xffff) { + + /* Got first part of ID, now get remainder. + */ + fep->phy_id = phytype << 16; + mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3, 0); + } else { + fep->phy_addr++; + if (fep->phy_addr < 32) { + mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), + mii_discover_phy, 0); + } else { + printk("fec: No PHY device found.\n"); + } + } +} +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +/* This interrupt occurs when the PHY detects a link change. +*/ +static void +#ifdef CONFIG_RPXCLASSIC +mii_link_interrupt(void *dev_id) +#else +mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs) +#endif +{ + struct net_device *dev = dev_id; + struct fec_enet_private *fep = dev->priv; + volatile immap_t *immap = (immap_t *)IMAP_ADDR; + volatile fec_t *fecp = &(immap->im_cpm.cp_fec); + unsigned int ecntrl = fecp->fec_ecntrl; + + /* + * Acknowledge the interrupt if possible. If we have not + * found the PHY yet we can't process or acknowledge the + * interrupt now. Instead we ignore this interrupt for now, + * which we can do since it is edge triggered. It will be + * acknowledged later by fec_enet_open(). + */ + if (fep->phy) { + /* + * We need the FEC enabled to access the MII + */ + if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) { + fecp->fec_ecntrl |= FEC_ECNTRL_ETHER_EN; + } + + mii_do_cmd(dev, fep->phy->ack_int); + mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ + + if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) { + fecp->fec_ecntrl = ecntrl; /* restore old settings */ + } + } + +} +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +static int +fec_enet_open(struct rtnet_device *rtdev) +{ + struct fec_enet_private *fep = rtdev->priv; + + /* I should reset the ring buffers here, but I don't yet know + * a simple way to do that. + */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + fep->sequence_done = 0; + fep->link = 0; + + if (fep->phy) { + mii_do_cmd(dev, fep->phy->config); + mii_do_cmd(dev, phy_cmd_config); /* display configuration */ + while(!fep->sequence_done) + schedule(); + + mii_do_cmd(dev, fep->phy->startup); + +#if defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) && defined(CONFIG_FEC_DP83846A) + if(fep->phy == &phy_info_dp83846a) + { + /* Initializing timers + */ + init_timer( &fep->phy_timer_list ); + + /* Starting timer for periodic link status check + * After 100 milli-seconds, mdio_timer_callback function is called. + */ + fep->phy_timer_list.expires = jiffies + (100 * HZ / 1000); + fep->phy_timer_list.data = (unsigned long)dev; + fep->phy_timer_list.function = mdio_timer_callback; + add_timer( &fep->phy_timer_list ); + } + +#if defined(CONFIG_IP_PNP) + rtdm_printk("%s: Waiting for the link to be up...\n", rtdev->name); + + while(fep->link == 0 || ((((volatile fec_t*)rtdev->base_addr)->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0)) + { + schedule(); + } +#endif /* CONFIG_IP_PNP */ + +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO && CONFIG_FEC_DP83846A */ + + netif_start_queue(dev); + return 0; /* Success */ + } + return -ENODEV; /* No PHY we understand */ +#else /* !CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + fep->link = 1; + rtnetif_start_queue(rtdev); + + return 0; /* Success */ +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +} + +static int +fec_enet_close(struct rtnet_device *rtdev) +{ + /* Don't know what to do yet. + */ + rtnetif_stop_queue(rtdev); + + fec_stop(rtdev); + + return 0; +} + +static struct net_device_stats *fec_enet_get_stats(struct rtnet_device *rtdev) +{ + struct fec_enet_private *fep = (struct fec_enet_private *)rtdev->priv; + + return &fep->stats; +} + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + +#if defined(CONFIG_FEC_DP83846A) +/* Execute the ack_int command set and schedules next timer call back. */ +static void mdio_timer_callback(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct fec_enet_private *fep = (struct fec_enet_private *)(dev->priv); + mii_do_cmd(dev, fep->phy->ack_int); + + if(fep->link == 0) + { + fep->phy_timer_list.expires = jiffies + (100 * HZ / 1000); /* Sleep for 100ms */ + } + else + { + fep->phy_timer_list.expires = jiffies + (1 * HZ); /* Sleep for 1 sec. */ + } + add_timer( &fep->phy_timer_list ); +} +#endif /* CONFIG_FEC_DP83846A */ + +static void mdio_callback(uint regval, struct net_device *dev, uint data) +{ + mdio_read_data_t* mrd = (mdio_read_data_t *)data; + mrd->regval = 0xFFFF & regval; + wake_up_process(mrd->sleeping_task); +} + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + uint retval; + mdio_read_data_t* mrd = (mdio_read_data_t *)kmalloc(sizeof(*mrd), GFP_KERNEL); + + mrd->sleeping_task = current; + set_current_state(TASK_INTERRUPTIBLE); + mii_queue(dev, mk_mii_read(location), mdio_callback, (unsigned int) mrd); + schedule(); + + retval = mrd->regval; + + kfree(mrd); + + return retval; +} + +void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + mii_queue(dev, mk_mii_write(location, value), NULL, 0); +} + +static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct fec_enet_private *cep = (struct fec_enet_private *)dev->priv; + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data; + + int phy = cep->phy_addr & 0x1f; + int retval; + + if (data == NULL) + { + retval = -EINVAL; + } + else + { + switch(cmd) + { + case SIOCETHTOOL: + return netdev_ethtool_ioctl(dev, (void*)rq->ifr_data); + break; + + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */ + data->phy_id = phy; + + case SIOCGMIIREG: /* Read MII PHY register. */ + case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */ + data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); + retval = 0; + break; + + case SIOCSMIIREG: /* Write MII PHY register. */ + case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */ + if (!capable(CAP_NET_ADMIN)) + { + retval = -EPERM; + } + else + { + mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); + retval = 0; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } + } + return retval; +} + + +static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr) +{ + u32 ethcmd; + + /* dev_ioctl() in ../../net/core/dev.c has already checked + capable(CAP_NET_ADMIN), so don't bother with that here. */ + + if (copy_from_user (ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GDRVINFO: + { + struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO }; + strcpy (info.driver, dev->name); + strcpy (info.version, "0.3"); + strcpy (info.bus_info, ""); + if (copy_to_user (useraddr, &info, sizeof (info))) + return -EFAULT; + return 0; + } + default: + break; + } + + return -EOPNOTSUPP; +} + +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + +#ifdef ORIGINAL_VERSION + +/* Returns the CRC needed when filling in the hash table for + * multicast group filtering + * pAddr must point to a MAC address (6 bytes) + */ +static u32 fec_mulicast_calc_crc(char *pAddr) +{ + u8 byte; + int byte_count; + int bit_count; + u32 crc = 0xffffffff; + u8 msb; + + for (byte_count=0; byte_count<6; byte_count++) { + byte = pAddr[byte_count]; + for (bit_count=0; bit_count<8; bit_count++) { + msb = crc >> 31; + crc <<= 1; + if (msb ^ (byte & 0x1)) { + crc ^= FEC_CRC_POLY; + } + byte >>= 1; + } + } + return (crc); +} + +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ + +static void set_multicast_list(struct net_device *dev) +{ + struct fec_enet_private *fep; + volatile fec_t *ep; + + fep = (struct fec_enet_private *)dev->priv; + ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec); + + if (dev->flags&IFF_PROMISC) { + + /* Log any net taps. */ + printk("%s: Promiscuous mode enabled.\n", dev->name); + ep->fec_r_cntrl |= FEC_RCNTRL_PROM; + } else { + + ep->fec_r_cntrl &= ~FEC_RCNTRL_PROM; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + ep->fec_hash_table_high = 0xffffffff; + ep->fec_hash_table_low = 0xffffffff; + } else { + struct dev_mc_list *pmc = dev->mc_list; + + /* Clear Hash-Table + */ + ep->fec_hash_table_high = 0; + ep->fec_hash_table_low = 0; + + /* Now populate the hash table + */ +#ifdef DEBUG_MULTICAST + if (pmc) { + printk ("%s: Recalculating hash-table:\n", + dev->name); + printk (" MAC Address high low\n"); + } +#endif + + while (pmc) { + u32 crc; + int temp; + u32 csrVal; + int hash_index; + + crc = fec_mulicast_calc_crc(pmc->dmi_addr); + temp = (crc & 0x3f) >> 1; + hash_index = ((temp & 0x01) << 4) | + ((temp & 0x02) << 2) | + ((temp & 0x04)) | + ((temp & 0x08) >> 2) | + ((temp & 0x10) >> 4); + csrVal = (1 << hash_index); + if (crc & 1) { + ep->fec_hash_table_high |= csrVal; + } + else { + ep->fec_hash_table_low |= csrVal; + } +#ifdef DEBUG_MULTICAST + printk (" %02x:%02x:%02x:%02x:%02x:%02x %08x %08x\n", + (int)pmc->dmi_addr[0], + (int)pmc->dmi_addr[1], + (int)pmc->dmi_addr[2], + (int)pmc->dmi_addr[3], + (int)pmc->dmi_addr[4], + (int)pmc->dmi_addr[5], + ep->fec_hash_table_high, + ep->fec_hash_table_low + ); +#endif + pmc = pmc->next; + } + } + } +} +#endif /* ORIGINAL_VERSION */ + +/* Initialize the FEC Ethernet on 860T. + */ +int __init fec_enet_init(void) +{ + struct rtnet_device *rtdev = NULL; + struct fec_enet_private *fep; + int i, j, k; + unsigned char *eap, *iap, *ba; + unsigned long mem_addr; + volatile cbd_t *bdp; + cbd_t *cbd_base; + volatile immap_t *immap; + volatile fec_t *fecp; + bd_t *bd; + + immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ + + bd = (bd_t *)__res; + + if (!rx_pool_size) + rx_pool_size = RX_RING_SIZE * 2; + + rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct fec_enet_private), + rx_pool_size + TX_RING_SIZE); + if (rtdev == NULL) { + printk(KERN_ERR "enet: Could not allocate ethernet device.\n"); + return -1; + } + rtdev_alloc_name(rtdev, "rteth%d"); + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + + fep = (struct fec_enet_private *)rtdev->priv; + fecp = &(immap->im_cpm.cp_fec); + + /* Whack a reset. We should wait for this. + */ + fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET; + for (i = 0; + (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY); + ++i) { + udelay(1); + } + if (i == FEC_RESET_DELAY) { + printk ("FEC Reset timeout!\n"); + } + + /* Set the Ethernet address. If using multiple Enets on the 8xx, + * this needs some work to get unique addresses. + */ + eap = (unsigned char *)my_enet_addr; + iap = bd->bi_enetaddr; + +#if defined(CONFIG_SCC_ENET) && !defined(ORIGINAL_VERSION) + /* + * If a board has Ethernet configured both on a SCC and the + * FEC, it needs (at least) 2 MAC addresses (we know that Sun + * disagrees, but anyway). For the FEC port, we create + * another address by setting one of the address bits above + * something that would have (up to now) been allocated. + */ + { + unsigned char tmpaddr[6]; + for (i=0; i<6; i++) + tmpaddr[i] = *iap++; + tmpaddr[3] |= 0x80; + iap = tmpaddr; + } +#endif + + for (i=0; i<6; i++) { + rtdev->dev_addr[i] = *eap++ = *iap++; + } + + /* Allocate memory for buffer descriptors. + */ + if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) { + printk("FEC init error. Need more space.\n"); + printk("FEC initialization failed.\n"); + return 1; + } + cbd_base = (cbd_t *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, (void *)&mem_addr); + + /* Set receive and transmit descriptor base. + */ + fep->rx_bd_base = cbd_base; + fep->tx_bd_base = cbd_base + RX_RING_SIZE; + + fep->skb_cur = fep->skb_dirty = 0; + + /* Initialize the receive buffer descriptors. + */ + bdp = fep->rx_bd_base; + k = 0; + for (i=0; i<FEC_ENET_RX_PAGES; i++) { + + /* Allocate a page. + */ + ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, (void *)&mem_addr); + + /* Initialize the BD for every fragment in the page. + */ + for (j=0; j<FEC_ENET_RX_FRPPG; j++) { + bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_bufaddr = mem_addr; + fep->rx_vaddr[k++] = ba; + mem_addr += FEC_ENET_RX_FRSIZE; + ba += FEC_ENET_RX_FRSIZE; + bdp++; + } + } + + rtdm_lock_init(&fep->lock); + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* Install our interrupt handler. + */ + rt_stack_connect(rtdev, &STACK_manager); + if ((i = rtdm_irq_request(&fep->irq_handle, FEC_INTERRUPT, + fec_enet_interrupt, 0, "rt_mpc8xx_fec", rtdev))) { + printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq); + rtdev_free(rtdev); + return i; + } + + rtdev->base_addr = (unsigned long)fecp; + +#ifdef CONFIG_RPXCLASSIC +/* If MDIO is disabled the PHY should not be allowed to + * generate interrupts telling us to read the PHY. + */ +# ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Make Port C, bit 15 an input that causes interrupts. + */ + immap->im_ioport.iop_pcpar &= ~0x0001; + immap->im_ioport.iop_pcdir &= ~0x0001; + immap->im_ioport.iop_pcso &= ~0x0001; + immap->im_ioport.iop_pcint |= 0x0001; + cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev); +# endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + /* Make LEDS reflect Link status. + */ + *((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE; +#endif /* CONFIG_RPXCLASSIC */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO +# ifndef PHY_INTERRUPT +# error Want to use MII, but PHY_INTERRUPT not defined! +# endif + ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |= + (0x80000000 >> PHY_INTERRUPT); + + if (request_8xxirq(PHY_INTERRUPT, mii_link_interrupt, 0, "mii", dev) != 0) + panic("Could not allocate MII IRQ!"); +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + rtdev->base_addr = (unsigned long)fecp; + + /* The FEC Ethernet specific entries in the device structure. */ + rtdev->open = fec_enet_open; + rtdev->hard_start_xmit = fec_enet_start_xmit; + rtdev->stop = fec_enet_close; + rtdev->hard_header = &rt_eth_header; + rtdev->get_stats = fec_enet_get_stats; + + if ((i = rt_register_rtnetdev(rtdev))) { + rtdm_irq_disable(&fep->irq_handle); + rtdm_irq_free(&fep->irq_handle); + rtdev_free(rtdev); + return i; + } + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + dev->do_ioctl = fec_enet_ioctl; + + for (i=0; i<NMII-1; i++) + mii_cmds[i].mii_next = &mii_cmds[i+1]; + mii_free = mii_cmds; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +#ifndef CONFIG_ICU862 + /* Configure all of port D for MII. + */ + immap->im_ioport.iop_pdpar = 0x1fff; + +#else /* CONFIG_ICU862 */ + /* Configure port A for MII. + */ + + /* Has Utopia been configured? */ + if (immap->im_ioport.iop_pdpar & (0x8000 >> 1)) { + /* + * YES - Use MUXED mode for UTOPIA bus. + * This frees Port A for use by MII (see 862UM table 41-6). + */ + immap->im_ioport.utmode &= ~0x80; + } else { + /* + * NO - set SPLIT mode for UTOPIA bus. + * + * This doesn't really effect UTOPIA (which isn't + * enabled anyway) but just tells the 862 + * to use port A for MII (see 862UM table 41-6). + */ + immap->im_ioport.utmode |= 0x80; + } + +# ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Now configure MII_MDC pin */ + immap->im_ioport.iop_pdpar |= (0x8000 >> 8); +# endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ +#endif /* CONFIG_ICU862 */ + + /* Bits moved from Rev. D onward. + */ + if ((mfspr(IMMR) & 0xffff) < 0x0501) + immap->im_ioport.iop_pddir = 0x1c58; /* Pre rev. D */ + else + immap->im_ioport.iop_pddir = 0x1fff; /* Rev. D and later */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Set MII speed to 2.5 MHz + */ + fecp->fec_mii_speed = fep->phy_speed = + ((((bd->bi_intfreq + 4999999) / 2500000) / 2 ) & 0x3F ) << 1; +#else + fecp->fec_mii_speed = 0; /* turn off MDIO */ +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + +#ifndef ORIGINAL_VERSION + printk("%s: FEC ENET Version 0.3, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n", + rtdev->name, FEC_INTERRUPT, + rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2], + rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]); +#else + printk ("%s: FEC ENET Version 0.3, FEC irq %d" +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + ", with MDIO" +#endif +#ifdef PHY_INTERRUPT + ", MII irq %d" +#endif + ", addr ", + dev->name, FEC_INTERRUPT +#ifdef PHY_INTERRUPT + , PHY_INTERRUPT +#endif + ); + for (i=0; i<6; i++) + printk("%02x%c", rtdev->dev_addr[i], (i==5) ? '\n' : ':'); +#endif /* ORIGINAL_VERSION */ + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO /* start in full duplex mode, and negotiate speed */ + fec_restart (dev, 1); +#else /* always use half duplex mode only */ + fec_restart (rtdev, 0); +#endif + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Queue up command to detect the PHY and initialize the + * remainder of the interface. + */ + fep->phy_id_done = 0; + fep->phy_addr = 0; + mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy, 0); + + fep->old_status = 0; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + return 0; +} + +/* This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ +static void +fec_restart(struct rtnet_device *rtdev, int duplex) +{ + struct fec_enet_private *fep; + int i; + volatile cbd_t *bdp; + volatile immap_t *immap; + volatile fec_t *fecp; + + immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ + + fecp = &(immap->im_cpm.cp_fec); + + fep = rtdev->priv; + + /* Whack a reset. We should wait for this. + */ + fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET; + for (i = 0; + (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY); + ++i) { + udelay(1); + } + if (i == FEC_RESET_DELAY) { + printk ("FEC Reset timeout!\n"); + } + + /* Set station address. + */ + fecp->fec_addr_low = (my_enet_addr[0] << 16) | my_enet_addr[1]; + fecp->fec_addr_high = my_enet_addr[2]; + + /* Reset all multicast. + */ + fecp->fec_hash_table_high = 0; + fecp->fec_hash_table_low = 0; + + /* Set maximum receive buffer size. + */ + fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; + fecp->fec_r_hash = PKT_MAXBUF_SIZE; + + /* Set receive and transmit descriptor base. + */ + fecp->fec_r_des_start = iopa((uint)(fep->rx_bd_base)); + fecp->fec_x_des_start = iopa((uint)(fep->tx_bd_base)); + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->cur_rx = fep->rx_bd_base; + + /* Reset SKB transmit buffers. + */ + fep->skb_cur = fep->skb_dirty = 0; + for (i=0; i<=TX_RING_MOD_MASK; i++) { + if (fep->tx_skbuff[i] != NULL) { + dev_kfree_rtskb(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; + } + } + + /* Initialize the receive buffer descriptors. + */ + bdp = fep->rx_bd_base; + for (i=0; i<RX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. + */ + bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp++; + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* ...and the same for transmmit. + */ + bdp = fep->tx_bd_base; + for (i=0; i<TX_RING_SIZE; i++) { + + /* Initialize the BD for every fragment in the page. + */ + bdp->cbd_sc = 0; + bdp->cbd_bufaddr = 0; + bdp++; + } + + /* Set the last buffer to wrap. + */ + bdp--; + bdp->cbd_sc |= BD_SC_WRAP; + + /* Enable MII mode. + */ + if (duplex) { + fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE; /* MII enable */ + fecp->fec_x_cntrl = FEC_TCNTRL_FDEN; /* FD enable */ + } + else { + fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE | FEC_RCNTRL_DRT; + fecp->fec_x_cntrl = 0; + } + + fep->full_duplex = duplex; + + /* Enable big endian and don't care about SDMA FC. + */ + fecp->fec_fun_code = 0x78000000; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Set MII speed. + */ + fecp->fec_mii_speed = fep->phy_speed; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + /* Clear any outstanding interrupt. + */ + fecp->fec_ievent = 0xffc0; + + fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; + + /* Enable interrupts we wish to service. + */ + fecp->fec_imask = ( FEC_ENET_TXF | FEC_ENET_TXB | + FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII ); + + /* And last, enable the transmit and receive processing. + */ + fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN; + fecp->fec_r_des_active = 0x01000000; + + /* The tx ring is no longer full. */ + if(fep->tx_full) + { + fep->tx_full = 0; + rtnetif_wake_queue(rtdev); + } +} + +static void +fec_stop(struct rtnet_device *rtdev) +{ + volatile immap_t *immap; + volatile fec_t *fecp; + int i; + struct fec_enet_private *fep; + + immap = (immap_t *)IMAP_ADDR; /* pointer to internal registers */ + + fecp = &(immap->im_cpm.cp_fec); + + if ((fecp->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0) + return; /* already down */ + + fep = rtdev->priv; + + + fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ + + for (i = 0; + ((fecp->fec_ievent & 0x10000000) == 0) && (i < FEC_RESET_DELAY); + ++i) { + udelay(1); + } + if (i == FEC_RESET_DELAY) { + printk ("FEC timeout on graceful transmit stop\n"); + } + + /* Clear outstanding MII command interrupts. + */ + fecp->fec_ievent = FEC_ENET_MII; + + /* Enable MII command finished interrupt + */ + fecp->fec_ivec = (FEC_INTERRUPT/2) << 29; + fecp->fec_imask = FEC_ENET_MII; + +#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO + /* Set MII speed. + */ + fecp->fec_mii_speed = fep->phy_speed; +#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */ + + /* Disable FEC + */ + fecp->fec_ecntrl &= ~(FEC_ECNTRL_ETHER_EN); +} + +static void __exit fec_enet_cleanup(void) +{ + struct rtnet_device *rtdev = rtdev_root; + struct fec_enet_private *fep = rtdev->priv; + + if (rtdev) { + rtdm_irq_disable(&fep->irq_handle); + rtdm_irq_free(&fep->irq_handle); + + consistent_free(fep->rx_bd_base); + + rt_stack_disconnect(rtdev); + rt_unregister_rtnetdev(rtdev); + rt_rtdev_disconnect(rtdev); + + printk("%s: unloaded\n", rtdev->name); + rtdev_free(rtdev); + rtdev_root = NULL; + } +} + +module_init(fec_enet_init); +module_exit(fec_enet_cleanup); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c new file mode 100644 index 0000000..82d1c33 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c @@ -0,0 +1,2095 @@ +/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */ +/* + Written/copyright 1999-2001 by Donald Becker. + Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) + Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. License for under other terms may be + available. Contact the original author for details. + + The original author may be reached as becker@scyld.com, or at + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support information and updates available at + http://www.scyld.com/network/netsemi.html + + + Linux kernel modifications: + + Version 1.0.1: + - Spinlock fixes + - Bug fixes and better intr performance (Tjeerd) + Version 1.0.2: + - Now reads correct MAC address from eeprom + Version 1.0.3: + - Eliminate redundant priv->tx_full flag + - Call netif_start_queue from dev->tx_timeout + - wmb() in start_tx() to flush data + - Update Tx locking + - Clean up PCI enable (davej) + Version 1.0.4: + - Merge Donald Becker's natsemi.c version 1.07 + Version 1.0.5: + - { fill me in } + Version 1.0.6: + * ethtool support (jgarzik) + * Proper initialization of the card (which sometimes + fails to occur and leaves the card in a non-functional + state). (uzi) + + * Some documented register settings to optimize some + of the 100Mbit autodetection circuitry in rev C cards. (uzi) + + * Polling of the PHY intr for stuff like link state + change and auto- negotiation to finally work properly. (uzi) + + * One-liner removal of a duplicate declaration of + netdev_error(). (uzi) + + Version 1.0.7: (Manfred Spraul) + * pci dma + * SMP locking update + * full reset added into tx_timeout + * correct multicast hash generation (both big and little endian) + [copied from a natsemi driver version + from Myrio Corporation, Greg Smith] + * suspend/resume + + version 1.0.8 (Tim Hockin <thockin@sun.com>) + * ETHTOOL_* support + * Wake on lan support (Erik Gilling) + * MXDMA fixes for serverworks + * EEPROM reload + + version 1.0.9 (Manfred Spraul) + * Main change: fix lack of synchronize + netif_close/netif_suspend against a last interrupt + or packet. + * do not enable superflous interrupts (e.g. the + drivers relies on TxDone - TxIntr not needed) + * wait that the hardware has really stopped in close + and suspend. + * workaround for the (at least) gcc-2.95.1 compiler + problem. Also simplifies the code a bit. + * disable_irq() in tx_timeout - needed to protect + against rx interrupts. + * stop the nic before switching into silent rx mode + for wol (required according to docu). + + version 1.0.10: + * use long for ee_addr (various) + * print pointers properly (DaveM) + * include asm/irq.h (?) + + version 1.0.11: + * check and reset if PHY errors appear (Adrian Sun) + * WoL cleanup (Tim Hockin) + * Magic number cleanup (Tim Hockin) + * Don't reload EEPROM on every reset (Tim Hockin) + * Save and restore EEPROM state across reset (Tim Hockin) + * MDIO Cleanup (Tim Hockin) + * Reformat register offsets/bits (jgarzik) + + version 1.0.12: + * ETHTOOL_* further support (Tim Hockin) + + version 1.0.13: + * ETHTOOL_[G]EEPROM support (Tim Hockin) + + version 1.0.13: + * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>) + + version 1.0.14: + * Cleanup some messages and autoneg in ethtool (Tim Hockin) + + version 1.0.15: + * Get rid of cable_magic flag + * use new (National provided) solution for cable magic issue + + version 1.0.16: + * call netdev_rx() for RxErrors (Manfred Spraul) + * formatting and cleanups + * change options and full_duplex arrays to be zero + initialized + * enable only the WoL and PHY interrupts in wol mode + + version 1.0.17: + * only do cable_magic on 83815 and early 83816 (Tim Hockin) + * create a function for rx refill (Manfred Spraul) + * combine drain_ring and init_ring (Manfred Spraul) + * oom handling (Manfred Spraul) + * hands_off instead of playing with netif_device_{de,a}ttach + (Manfred Spraul) + * be sure to write the MAC back to the chip (Manfred Spraul) + * lengthen EEPROM timeout, and always warn about timeouts + (Manfred Spraul) + * comments update (Manfred) + * do the right thing on a phy-reset (Manfred and Tim) + + TODO: + * big endian support with CFG:BEM instead of cpu_to_le32 + * support for an external PHY + * NAPI + + Ported to RTNET: December 2003, Erik Buit <e.buit@student.utwente.nl> +*/ + +#if !defined(__OPTIMIZE__) +#warning You must compile this file with the correct options! +#warning See the last lines of the source file. +#error You must compile this driver with "-O". +#endif + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/rtnetlink.h> +#include <linux/mii.h> +#include <linux/uaccess.h> +#include <asm/processor.h> /* Processor type for cache alignment. */ +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/irq.h> + +/*** RTnet ***/ +#include <rtnet_port.h> + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +#define DEFAULT_RX_POOL_SIZE 16 + +static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); +/*** RTnet ***/ + +#define DRV_NAME "natsemi-rt" +#define DRV_VERSION "1.07+LK1.0.17-RTnet-0.2" +#define DRV_RELDATE "Dec 16, 2003" + +/* Updated to recommendations in pci-skeleton v2.03. */ + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \ + NETIF_MSG_LINK | \ + NETIF_MSG_WOL | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) +static int local_debug = -1; + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 20; +static int mtu; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +/*** RTnet *** +static int rx_copybreak; + *** RTnet ***/ + +/* Used to pass the media type, etc. + Both 'options[]' and 'full_duplex[]' should exist for driver + interoperability. + The media type is usually passed in 'options[]'. +*/ +static int options[MAX_UNITS]; +static int full_duplex[MAX_UNITS]; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + The compiler will convert <unsigned>'%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_RING_SIZE 16 +#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */ +#define RX_RING_SIZE 8 /*** RTnet ***/ + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define NATSEMI_HW_TIMEOUT 400 +#define NATSEMI_TIMER_FREQ 3*HZ +#define NATSEMI_PG0_NREGS 64 +#define NATSEMI_RFDR_NREGS 8 +#define NATSEMI_PG1_NREGS 4 +#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \ + NATSEMI_PG1_NREGS) +#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */ +#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) +#define NATSEMI_EEPROM_SIZE 24 /* 12 16-bit values */ + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ + +/* These identify the driver base version and may not be removed. */ +static char version[] = + KERN_INFO DRV_NAME " dp8381x driver, version " + DRV_VERSION ", " DRV_RELDATE "\n" + KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" + KERN_INFO " http://www.scyld.com/network/natsemi.html\n" + KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n" + KERN_INFO " RTnet port by Erik Buit\n"; + +MODULE_AUTHOR("Erik Buit"); +MODULE_DESCRIPTION("RTnet National Semiconductor DP8381x series PCI Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(max_interrupt_work, int, 0444); +module_param(mtu, int, 0444); +module_param_named(debug, local_debug, int, 0444); +/*** RTnet *** +MODULE_PARM(rx_copybreak, "i"); + *** RTnet ***/ +module_param_array(options, int, NULL, 0444); +module_param_array(full_duplex, int, NULL, 0444); +MODULE_PARM_DESC(max_interrupt_work, + "DP8381x maximum events handled per interrupt"); +MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); +MODULE_PARM_DESC(debug, "DP8381x default debug level"); +/*** RTnet *** +MODULE_PARM_DESC(rx_copybreak, + "DP8381x copy breakpoint for copy-only-tiny-frames"); + *** RTnet ***/ +MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex"); +MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC. +It also works with other chips in in the DP83810 series. + +II. Board-specific settings + +This driver requires the PCI interrupt line to be valid. +It honors the EEPROM-set values. + +III. Driver operation + +IIIa. Ring buffers + +This driver uses two statically allocated fixed-size descriptor lists +formed into rings by a branch from the final descriptor to the beginning of +the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. +The NatSemi design uses a 'next descriptor' pointer that the driver forms +into a list. + +IIIb/c. Transmit/Receive Structure + +This driver uses a zero-copy receive and transmit scheme. +The driver allocates full frame size skbuffs for the Rx ring buffers at +open() time and passes the skb->data field to the chip as receive data +buffers. When an incoming frame is less than RX_COPYBREAK bytes long, +a fresh skbuff is allocated and the frame is copied to the new skbuff. +When the incoming frame is larger, the skbuff is passed directly up the +protocol stack. Buffers consumed this way are replaced by newly allocated +skbuffs in a later phase of receives. + +The RX_COPYBREAK value is chosen to trade-off the memory wasted by +using a full-sized skbuff for small frames vs. the copying costs of larger +frames. New boards are typically used in generously configured machines +and the underfilled buffers have negligible impact compared to the benefit of +a single allocation size, so the default value of zero results in never +copying packets. When copying is done, the cost is usually mitigated by using +a combined copy/checksum routine. Copying also preloads the cache, which is +most useful with small frames. + +A subtle aspect of the operation is that unaligned buffers are not permitted +by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't +longword aligned for further processing. On copies frames are put into the +skbuff at an offset of "+2", 16-byte aligning the IP header. + +IIId. Synchronization + +Most operations are synchronized on the np->lock irq spinlock, except the +performance critical codepaths: + +The rx process only runs in the interrupt handler. Access from outside +the interrupt handler is only permitted after disable_irq(). + +The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap +is set, then access is permitted under spin_lock_irq(&np->lock). + +Thus configuration functions that want to access everything must call + disable_irq(dev->irq); + spin_lock_bh(dev->xmit_lock); + spin_lock_irq(&np->lock); + +IV. Notes + +NatSemi PCI network controllers are very uncommon. + +IVb. References + +http://www.scyld.com/expert/100mbps.html +http://www.scyld.com/expert/NWay.html +Datasheet is available from: +http://www.national.com/pf/DP/DP83815.html + +IVc. Errata + +None characterised. +*/ + + + +enum pcistuff { + PCI_USES_IO = 0x01, + PCI_USES_MEM = 0x02, + PCI_USES_MASTER = 0x04, + PCI_ADDR0 = 0x08, + PCI_ADDR1 = 0x10, +}; + +/* MMIO operations required */ +#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1) + + +/* array of board data directly indexed by pci_tbl[x].driver_data */ +static struct { + const char *name; + unsigned long flags; +} natsemi_pci_info[] = { + { "NatSemi DP8381[56]", PCI_IOTYPE }, +}; + +static struct pci_device_id natsemi_pci_tbl[] = { + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); + +/* Offsets to the device registers. + Unlike software-only systems, device drivers interact with complex hardware. + It's not useful to define symbolic names for every register bit in the + device. +*/ +enum register_offsets { + ChipCmd = 0x00, + ChipConfig = 0x04, + EECtrl = 0x08, + PCIBusCfg = 0x0C, + IntrStatus = 0x10, + IntrMask = 0x14, + IntrEnable = 0x18, + IntrHoldoff = 0x16, /* DP83816 only */ + TxRingPtr = 0x20, + TxConfig = 0x24, + RxRingPtr = 0x30, + RxConfig = 0x34, + ClkRun = 0x3C, + WOLCmd = 0x40, + PauseCmd = 0x44, + RxFilterAddr = 0x48, + RxFilterData = 0x4C, + BootRomAddr = 0x50, + BootRomData = 0x54, + SiliconRev = 0x58, + StatsCtrl = 0x5C, + StatsData = 0x60, + RxPktErrs = 0x60, + RxMissed = 0x68, + RxCRCErrs = 0x64, + BasicControl = 0x80, + BasicStatus = 0x84, + AnegAdv = 0x90, + AnegPeer = 0x94, + PhyStatus = 0xC0, + MIntrCtrl = 0xC4, + MIntrStatus = 0xC8, + PhyCtrl = 0xE4, + + /* These are from the spec, around page 78... on a separate table. + * The meaning of these registers depend on the value of PGSEL. */ + PGSEL = 0xCC, + PMDCSR = 0xE4, + TSTDAT = 0xFC, + DSPCFG = 0xF4, + SDCFG = 0xF8 +}; +/* the values for the 'magic' registers above (PGSEL=1) */ +#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */ +#define TSTDAT_VAL 0x0 +#define DSPCFG_VAL 0x5040 +#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */ +#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */ +#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */ + +/* misc PCI space registers */ +enum pci_register_offsets { + PCIPM = 0x44, +}; + +enum ChipCmd_bits { + ChipReset = 0x100, + RxReset = 0x20, + TxReset = 0x10, + RxOff = 0x08, + RxOn = 0x04, + TxOff = 0x02, + TxOn = 0x01, +}; + +enum ChipConfig_bits { + CfgPhyDis = 0x200, + CfgPhyRst = 0x400, + CfgExtPhy = 0x1000, + CfgAnegEnable = 0x2000, + CfgAneg100 = 0x4000, + CfgAnegFull = 0x8000, + CfgAnegDone = 0x8000000, + CfgFullDuplex = 0x20000000, + CfgSpeed100 = 0x40000000, + CfgLink = 0x80000000, +}; + +enum EECtrl_bits { + EE_ShiftClk = 0x04, + EE_DataIn = 0x01, + EE_ChipSelect = 0x08, + EE_DataOut = 0x02, +}; + +enum PCIBusCfg_bits { + EepromReload = 0x4, +}; + +/* Bits in the interrupt status/mask registers. */ +enum IntrStatus_bits { + IntrRxDone = 0x0001, + IntrRxIntr = 0x0002, + IntrRxErr = 0x0004, + IntrRxEarly = 0x0008, + IntrRxIdle = 0x0010, + IntrRxOverrun = 0x0020, + IntrTxDone = 0x0040, + IntrTxIntr = 0x0080, + IntrTxErr = 0x0100, + IntrTxIdle = 0x0200, + IntrTxUnderrun = 0x0400, + StatsMax = 0x0800, + SWInt = 0x1000, + WOLPkt = 0x2000, + LinkChange = 0x4000, + IntrHighBits = 0x8000, + RxStatusFIFOOver = 0x10000, + IntrPCIErr = 0xf00000, + RxResetDone = 0x1000000, + TxResetDone = 0x2000000, + IntrAbnormalSummary = 0xCD20, +}; + +/* + * Default Interrupts: + * Rx OK, Rx Packet Error, Rx Overrun, + * Tx OK, Tx Packet Error, Tx Underrun, + * MIB Service, Phy Interrupt, High Bits, + * Rx Status FIFO overrun, + * Received Target Abort, Received Master Abort, + * Signalled System Error, Received Parity Error + */ +#define DEFAULT_INTR 0x00f1cd65 + +enum TxConfig_bits { + TxDrthMask = 0x3f, + TxFlthMask = 0x3f00, + TxMxdmaMask = 0x700000, + TxMxdma_512 = 0x0, + TxMxdma_4 = 0x100000, + TxMxdma_8 = 0x200000, + TxMxdma_16 = 0x300000, + TxMxdma_32 = 0x400000, + TxMxdma_64 = 0x500000, + TxMxdma_128 = 0x600000, + TxMxdma_256 = 0x700000, + TxCollRetry = 0x800000, + TxAutoPad = 0x10000000, + TxMacLoop = 0x20000000, + TxHeartIgn = 0x40000000, + TxCarrierIgn = 0x80000000 +}; + +enum RxConfig_bits { + RxDrthMask = 0x3e, + RxMxdmaMask = 0x700000, + RxMxdma_512 = 0x0, + RxMxdma_4 = 0x100000, + RxMxdma_8 = 0x200000, + RxMxdma_16 = 0x300000, + RxMxdma_32 = 0x400000, + RxMxdma_64 = 0x500000, + RxMxdma_128 = 0x600000, + RxMxdma_256 = 0x700000, + RxAcceptLong = 0x8000000, + RxAcceptTx = 0x10000000, + RxAcceptRunt = 0x40000000, + RxAcceptErr = 0x80000000 +}; + +enum ClkRun_bits { + PMEEnable = 0x100, + PMEStatus = 0x8000, +}; + +enum WolCmd_bits { + WakePhy = 0x1, + WakeUnicast = 0x2, + WakeMulticast = 0x4, + WakeBroadcast = 0x8, + WakeArp = 0x10, + WakePMatch0 = 0x20, + WakePMatch1 = 0x40, + WakePMatch2 = 0x80, + WakePMatch3 = 0x100, + WakeMagic = 0x200, + WakeMagicSecure = 0x400, + SecureHack = 0x100000, + WokePhy = 0x400000, + WokeUnicast = 0x800000, + WokeMulticast = 0x1000000, + WokeBroadcast = 0x2000000, + WokeArp = 0x4000000, + WokePMatch0 = 0x8000000, + WokePMatch1 = 0x10000000, + WokePMatch2 = 0x20000000, + WokePMatch3 = 0x40000000, + WokeMagic = 0x80000000, + WakeOptsSummary = 0x7ff +}; + +enum RxFilterAddr_bits { + RFCRAddressMask = 0x3ff, + AcceptMulticast = 0x00200000, + AcceptMyPhys = 0x08000000, + AcceptAllPhys = 0x10000000, + AcceptAllMulticast = 0x20000000, + AcceptBroadcast = 0x40000000, + RxFilterEnable = 0x80000000 +}; + +enum StatsCtrl_bits { + StatsWarn = 0x1, + StatsFreeze = 0x2, + StatsClear = 0x4, + StatsStrobe = 0x8, +}; + +enum MIntrCtrl_bits { + MICRIntEn = 0x2, +}; + +enum PhyCtrl_bits { + PhyAddrMask = 0xf, +}; + +/* values we might find in the silicon revision register */ +#define SRR_DP83815_C 0x0302 +#define SRR_DP83815_D 0x0403 +#define SRR_DP83816_A4 0x0504 +#define SRR_DP83816_A5 0x0505 + +/* The Rx and Tx buffer descriptors. */ +/* Note that using only 32 bit fields simplifies conversion to big-endian + architectures. */ +struct netdev_desc { + u32 next_desc; + s32 cmd_status; + u32 addr; + u32 software_use; +}; + +/* Bits in network_desc.status */ +enum desc_status_bits { + DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000, + DescNoCRC=0x10000000, DescPktOK=0x08000000, + DescSizeMask=0xfff, + + DescTxAbort=0x04000000, DescTxFIFO=0x02000000, + DescTxCarrier=0x01000000, DescTxDefer=0x00800000, + DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000, + DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000, + + DescRxAbort=0x04000000, DescRxOver=0x02000000, + DescRxDest=0x01800000, DescRxLong=0x00400000, + DescRxRunt=0x00200000, DescRxInvalid=0x00100000, + DescRxCRC=0x00080000, DescRxAlign=0x00040000, + DescRxLoop=0x00020000, DesRxColl=0x00010000, +}; + +struct netdev_private { + /* Descriptor rings first for alignment */ + dma_addr_t ring_dma; + struct netdev_desc *rx_ring; + struct netdev_desc *tx_ring; + /* The addresses of receive-in-place skbuffs */ + struct rtskb *rx_skbuff[RX_RING_SIZE]; /*** RTnet ***/ + dma_addr_t rx_dma[RX_RING_SIZE]; + /* address of a sent-in-place packet/buffer, for later free() */ + struct rtskb *tx_skbuff[TX_RING_SIZE]; /*** RTnet ***/ + dma_addr_t tx_dma[TX_RING_SIZE]; + struct net_device_stats stats; + /* Media monitoring timer */ + struct timer_list timer; + /* Frequently used values: keep some adjacent for cache effect */ + struct pci_dev *pci_dev; + struct netdev_desc *rx_head_desc; + /* Producer/consumer ring indices */ + unsigned int cur_rx, dirty_rx; + unsigned int cur_tx, dirty_tx; + /* Based on MTU+slack. */ + unsigned int rx_buf_sz; + int oom; + /* Do not touch the nic registers */ + int hands_off; + /* These values are keep track of the transceiver/media in use */ + unsigned int full_duplex; + /* Rx filter */ + u32 cur_rx_mode; + u32 rx_filter[16]; + /* FIFO and PCI burst thresholds */ + u32 tx_config, rx_config; + /* original contents of ClkRun register */ + u32 SavedClkRun; + /* silicon revision */ + u32 srr; + /* expected DSPCFG value */ + u16 dspcfg; + /* MII transceiver section */ + u16 advertising; + unsigned int iosize; + rtdm_lock_t lock; + u32 msg_enable; + + rtdm_irq_t irq_handle; +}; + +static int eeprom_read(long ioaddr, int location); +static int mdio_read(struct rtnet_device *dev, int phy_id, int reg); +/*static void mdio_write(struct rtnet_device *dev, int phy_id, int reg, u16 data);*/ +static void natsemi_reset(struct rtnet_device *dev); +static void natsemi_reload_eeprom(struct rtnet_device *dev); +static void natsemi_stop_rxtx(struct rtnet_device *dev); +static int netdev_open(struct rtnet_device *dev); +static void do_cable_magic(struct rtnet_device *dev); +static void undo_cable_magic(struct rtnet_device *dev); +static void check_link(struct rtnet_device *dev); +/*static void netdev_timer(unsigned long data);*/ +static void dump_ring(struct rtnet_device *dev); +/*static void tx_timeout(struct rtnet_device *dev);*/ +static int alloc_ring(struct rtnet_device *dev); +static void refill_rx(struct rtnet_device *dev); +static void init_ring(struct rtnet_device *dev); +static void drain_tx(struct rtnet_device *dev); +static void drain_ring(struct rtnet_device *dev); +static void free_ring(struct rtnet_device *dev); +/*static void reinit_ring(struct rtnet_device *dev);*/ +static void init_registers(struct rtnet_device *dev); +static int start_tx(struct rtskb *skb, struct rtnet_device *dev); +static int intr_handler(rtdm_irq_t *irq_handle); +static void netdev_error(struct rtnet_device *dev, int intr_status); +static void netdev_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp); +static void netdev_tx_done(struct rtnet_device *dev); +static void __set_rx_mode(struct rtnet_device *dev); +/*static void set_rx_mode(struct rtnet_device *dev);*/ +static void __get_stats(struct rtnet_device *rtdev); +static struct net_device_stats *get_stats(struct rtnet_device *dev); +/*static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int netdev_set_wol(struct rtnet_device *dev, u32 newval); +static int netdev_get_wol(struct rtnet_device *dev, u32 *supported, u32 *cur); +static int netdev_set_sopass(struct rtnet_device *dev, u8 *newval); +static int netdev_get_sopass(struct rtnet_device *dev, u8 *data); +static int netdev_get_ecmd(struct rtnet_device *dev, struct ethtool_cmd *ecmd); +static int netdev_set_ecmd(struct rtnet_device *dev, struct ethtool_cmd *ecmd); +static void enable_wol_mode(struct rtnet_device *dev, int enable_intr);*/ +static int netdev_close(struct rtnet_device *dev); +/*static int netdev_get_regs(struct rtnet_device *dev, u8 *buf); +static int netdev_get_eeprom(struct rtnet_device *dev, u8 *buf);*/ + + +static int natsemi_probe1 (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct rtnet_device *dev; /*** RTnet ***/ + struct netdev_private *np; + int i, option, irq, chip_idx = ent->driver_data; + static int find_cnt = -1; + unsigned long ioaddr, iosize; + const int pcibar = 1; /* PCI base address register */ + int prev_eedata; + u32 tmp; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + rtdm_printk(version); +#endif + + i = pci_enable_device(pdev); + if (i) return i; + + /* natsemi has a non-standard PM control register + * in PCI config space. Some boards apparently need + * to be brought to D0 in this manner. + */ + pci_read_config_dword(pdev, PCIPM, &tmp); + if (tmp & PCI_PM_CTRL_STATE_MASK) { + /* D0 state, disable PME assertion */ + u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK; + pci_write_config_dword(pdev, PCIPM, newtmp); + } + + find_cnt++; + ioaddr = pci_resource_start(pdev, pcibar); + iosize = pci_resource_len(pdev, pcibar); + irq = pdev->irq; + +/*** RTnet ***/ + if (cards[find_cnt] == 0) + goto err_out; +/*** RTnet ***/ + + if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER) + pci_set_master(pdev); + +/*** RTnet ***/ + dev = rt_alloc_etherdev(sizeof(struct netdev_private), + RX_RING_SIZE * 2 + TX_RING_SIZE); + if (dev == NULL) { + rtdm_printk(KERN_ERR "init_ethernet failed for card #%d\n", find_cnt); + goto err_out; + } + rtdev_alloc_name(dev, "rteth%d"); + rt_rtdev_connect(dev, &RTDEV_manager); + dev->vers = RTDEV_VERS_2_0; + dev->sysbind = &pdev->dev; +/*** RTnet ***/ + + i = pci_request_regions(pdev, dev->name); + if (i) { +/*** RTnet ***/ + rt_rtdev_disconnect(dev); + rtdev_free(dev); +/*** RTnet ***/ + return i; + } + + { + void *mmio = ioremap (ioaddr, iosize); + if (!mmio) { + pci_release_regions(pdev); +/*** RTnet ***/ + rt_rtdev_disconnect(dev); + rtdev_free(dev); +/*** RTnet ***/ + return -ENOMEM; + } + ioaddr = (unsigned long) mmio; + } + + /* Work around the dropped serial bit. */ + prev_eedata = eeprom_read(ioaddr, 6); + for (i = 0; i < 3; i++) { + int eedata = eeprom_read(ioaddr, i + 7); + dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15); + dev->dev_addr[i*2+1] = eedata >> 7; + prev_eedata = eedata; + } + + dev->base_addr = ioaddr; + dev->irq = irq; + + np = dev->priv; + + np->pci_dev = pdev; + pci_set_drvdata(pdev, dev); + np->iosize = iosize; + rtdm_lock_init(&np->lock); + np->msg_enable = (local_debug >= 0) ? (1<<local_debug)-1 : NATSEMI_DEF_MSG; + np->hands_off = 0; + + /* Reset the chip to erase previous misconfiguration. */ + natsemi_reload_eeprom(dev); + natsemi_reset(dev); + + option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + if (dev->mem_start) + option = dev->mem_start; + + /* The lower four bits are the media type. */ + if (option) { + if (option & 0x200) + np->full_duplex = 1; + if (option & 15) + rtdm_printk(KERN_INFO + "%s: ignoring user supplied media type %d", + dev->name, option & 15); + } + if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) + np->full_duplex = 1; + + /* The chip-specific entries in the device structure. */ + dev->open = &netdev_open; + dev->hard_start_xmit = &start_tx; + dev->stop = &netdev_close; + dev->get_stats = &get_stats; +/*** RTnet *** + dev->set_multicast_list = &set_rx_mode; + dev->do_ioctl = &netdev_ioctl; + dev->tx_timeout = &tx_timeout; + dev->watchdog_timeo = TX_TIMEOUT; + *** RTnet ***/ + + if (mtu) + dev->mtu = mtu; + +/*** RTnet ***/ + i = rt_register_rtnetdev(dev); + if (i) { + goto err_out_unmap; + } +/*** RTnet ***/ + + rtnetif_carrier_off(dev); + + if (netif_msg_drv(np)) { + rtdm_printk(KERN_INFO "%s: %s at %#08lx, ", + dev->name, natsemi_pci_info[chip_idx].name, ioaddr); + for (i = 0; i < ETH_ALEN-1; i++) + rtdm_printk("%02x:", dev->dev_addr[i]); + rtdm_printk("%02x, IRQ %d.\n", dev->dev_addr[i], irq); + } + + np->advertising = mdio_read(dev, 1, MII_ADVERTISE); + if ((readl((void *)(ioaddr + ChipConfig)) & 0xe000) != 0xe000 + && netif_msg_probe(np)) { + u32 chip_config = readl((void *)(ioaddr + ChipConfig)); + rtdm_printk(KERN_INFO "%s: Transceiver default autonegotiation %s " + "10%s %s duplex.\n", + dev->name, + chip_config & CfgAnegEnable ? + "enabled, advertise" : "disabled, force", + chip_config & CfgAneg100 ? "0" : "", + chip_config & CfgAnegFull ? "full" : "half"); + } + if (netif_msg_probe(np)) + rtdm_printk(KERN_INFO + "%s: Transceiver status %#04x advertising %#04x.\n", + dev->name, mdio_read(dev, 1, MII_BMSR), + np->advertising); + + /* save the silicon revision for later querying */ + np->srr = readl((void *)(ioaddr + SiliconRev)); + if (netif_msg_hw(np)) + rtdm_printk(KERN_INFO "%s: silicon revision %#04x.\n", + dev->name, np->srr); + + + return 0; + +err_out_unmap: +#ifdef USE_MEM + iounmap((void *)ioaddr); +err_out_free_res: +#endif + pci_release_regions(pdev); +/*err_out_free_netdev:*/ +/*** RTnet ***/ + rt_rtdev_disconnect(dev); + rtdev_free(dev); +/*** RTnet ***/ +err_out: + return -ENODEV; + +} + + +/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. + The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */ + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need + a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that + made udelay() unreliable. + The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is + depricated. +*/ +#define eeprom_delay(ee_addr) readl((void *)(ee_addr)) + +#define EE_Write0 (EE_ChipSelect) +#define EE_Write1 (EE_ChipSelect | EE_DataIn) + +/* The EEPROM commands include the alway-set leading bit. */ +enum EEPROM_Cmds { + EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), +}; + +static int eeprom_read(long addr, int location) +{ + int i; + int retval = 0; + long ee_addr = addr + EECtrl; + int read_cmd = location | EE_ReadCmd; + writel(EE_Write0, (void *)ee_addr); + + /* Shift the read command bits out. */ + for (i = 10; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; + writel(dataval, (void *)ee_addr); + eeprom_delay(ee_addr); + writel(dataval | EE_ShiftClk, (void *)ee_addr); + eeprom_delay(ee_addr); + } + writel(EE_ChipSelect, (void *)ee_addr); + eeprom_delay(ee_addr); + + for (i = 0; i < 16; i++) { + writel(EE_ChipSelect | EE_ShiftClk, (void *)ee_addr); + eeprom_delay(ee_addr); + retval |= (readl((void *)ee_addr) & EE_DataOut) ? 1 << i : 0; + writel(EE_ChipSelect, (void *)ee_addr); + eeprom_delay(ee_addr); + } + + /* Terminate the EEPROM access. */ + writel(EE_Write0, (void *)ee_addr); + writel(0, (void *)ee_addr); + return retval; +} + +/* MII transceiver control section. + * The 83815 series has an internal transceiver, and we present the + * management registers as if they were MII connected. */ + +static int mdio_read(struct rtnet_device *dev, int phy_id, int reg) +{ + if (phy_id == 1 && reg < 32) + return readl((void *)(dev->base_addr+BasicControl+(reg<<2)))&0xffff; + else + return 0xffff; +} +/*** RTnet +static void mdio_write(struct rtnet_device *dev, int phy_id, int reg, u16 data) +{ + struct netdev_private *np = dev->priv; + if (phy_id == 1 && reg < 32) { + writew(data, dev->base_addr+BasicControl+(reg<<2)); + switch (reg) { + case MII_ADVERTISE: np->advertising = data; break; + } + } +} +RTnet ***/ +/* CFG bits [13:16] [18:23] */ +#define CFG_RESET_SAVE 0xfde000 +/* WCSR bits [0:4] [9:10] */ +#define WCSR_RESET_SAVE 0x61f +/* RFCR bits [20] [22] [27:31] */ +#define RFCR_RESET_SAVE 0xf8500000; + +static void natsemi_reset(struct rtnet_device *dev) +{ + int i; + u32 cfg; + u32 wcsr; + u32 rfcr; + u16 pmatch[3]; + u16 sopass[3]; + struct netdev_private *np = dev->priv; + + /* + * Resetting the chip causes some registers to be lost. + * Natsemi suggests NOT reloading the EEPROM while live, so instead + * we save the state that would have been loaded from EEPROM + * on a normal power-up (see the spec EEPROM map). This assumes + * whoever calls this will follow up with init_registers() eventually. + */ + + /* CFG */ + cfg = readl((void *)(dev->base_addr + ChipConfig)) & CFG_RESET_SAVE; + /* WCSR */ + wcsr = readl((void *)(dev->base_addr + WOLCmd)) & WCSR_RESET_SAVE; + /* RFCR */ + rfcr = readl((void *)(dev->base_addr + RxFilterAddr)) & RFCR_RESET_SAVE; + /* PMATCH */ + for (i = 0; i < 3; i++) { + writel(i*2, (void *)(dev->base_addr + RxFilterAddr)); + pmatch[i] = readw((void *)(dev->base_addr + RxFilterData)); + } + /* SOPAS */ + for (i = 0; i < 3; i++) { + writel(0xa+(i*2), (void *)(dev->base_addr + RxFilterAddr)); + sopass[i] = readw((void *)(dev->base_addr + RxFilterData)); + } + + /* now whack the chip */ + writel(ChipReset, (void *)(dev->base_addr + ChipCmd)); + for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { + if (!(readl((void *)(dev->base_addr + ChipCmd)) & ChipReset)) + break; + udelay(5); + } + if (i==NATSEMI_HW_TIMEOUT) { + rtdm_printk(KERN_WARNING "%s: reset did not complete in %d usec.\n", + dev->name, i*5); + } else if (netif_msg_hw(np)) { + rtdm_printk(KERN_DEBUG "%s: reset completed in %d usec.\n", + dev->name, i*5); + } + + /* restore CFG */ + cfg |= readl((void *)(dev->base_addr + ChipConfig)) & ~CFG_RESET_SAVE; + writel(cfg, (void *)(dev->base_addr + ChipConfig)); + /* restore WCSR */ + wcsr |= readl((void *)(dev->base_addr + WOLCmd)) & ~WCSR_RESET_SAVE; + writel(wcsr, (void *)(dev->base_addr + WOLCmd)); + /* read RFCR */ + rfcr |= readl((void *)(dev->base_addr + RxFilterAddr)) & ~RFCR_RESET_SAVE; + /* restore PMATCH */ + for (i = 0; i < 3; i++) { + writel(i*2, (void *)(dev->base_addr + RxFilterAddr)); + writew(pmatch[i], (void *)(dev->base_addr + RxFilterData)); + } + for (i = 0; i < 3; i++) { + writel(0xa+(i*2), (void *)(dev->base_addr + RxFilterAddr)); + writew(sopass[i], (void *)(dev->base_addr + RxFilterData)); + } + /* restore RFCR */ + writel(rfcr, (void *)(dev->base_addr + RxFilterAddr)); +} + +static void natsemi_reload_eeprom(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + int i; + + writel(EepromReload, (void *)(dev->base_addr + PCIBusCfg)); + for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { + udelay(50); + if (!(readl((void *)(dev->base_addr + PCIBusCfg)) & EepromReload)) + break; + } + if (i==NATSEMI_HW_TIMEOUT) { + rtdm_printk(KERN_WARNING "%s: EEPROM did not reload in %d usec.\n", + dev->name, i*50); + } else if (netif_msg_hw(np)) { + rtdm_printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n", + dev->name, i*50); + } +} + +static void natsemi_stop_rxtx(struct rtnet_device *dev) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = dev->priv; + int i; + + writel(RxOff | TxOff, (void *)(ioaddr + ChipCmd)); + for(i=0;i< NATSEMI_HW_TIMEOUT;i++) { + if ((readl((void *)(ioaddr + ChipCmd)) & (TxOn|RxOn)) == 0) + break; + udelay(5); + } + if (i==NATSEMI_HW_TIMEOUT) { + rtdm_printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n", + dev->name, i*5); + } else if (netif_msg_hw(np)) { + rtdm_printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n", + dev->name, i*5); + } +} + +static int netdev_open(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + long ioaddr = dev->base_addr; + int i; + + /* Reset the chip, just in case. */ + natsemi_reset(dev); + +/*** RTnet ***/ + rt_stack_connect(dev, &STACK_manager); + i = rtdm_irq_request(&np->irq_handle, dev->irq, intr_handler, + RTDM_IRQTYPE_SHARED, "rt_natsemi", dev); +/*** RTnet ***/ +/* i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);*/ + if (i) { + return i; + } + + if (netif_msg_ifup(np)) + rtdm_printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", + dev->name, dev->irq); + i = alloc_ring(dev); + if (i < 0) { + rtdm_irq_free(&np->irq_handle); + return i; + } + init_ring(dev); + init_registers(dev); + /* now set the MAC address according to dev->dev_addr */ + for (i = 0; i < 3; i++) { + u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i]; + + writel(i*2, (void *)(ioaddr + RxFilterAddr)); + writew(mac, (void *)(ioaddr + RxFilterData)); + } + writel(np->cur_rx_mode, (void *)(ioaddr + RxFilterAddr)); + + rtnetif_start_queue(dev); /*** RTnet ***/ + + if (netif_msg_ifup(np)) + rtdm_printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n", + dev->name, (int)readl((void *)(ioaddr + ChipCmd))); + +/*** RTnet ***/ + /* Set the timer to check for link beat. */ +/*** RTnet ***/ + + return 0; +} + +static void do_cable_magic(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + + if (np->srr >= SRR_DP83816_A5) + return; + + /* + * 100 MBit links with short cables can trip an issue with the chip. + * The problem manifests as lots of CRC errors and/or flickering + * activity LED while idle. This process is based on instructions + * from engineers at National. + */ + if (readl((void *)(dev->base_addr + ChipConfig)) & CfgSpeed100) { + u16 data; + + writew(1, (void *)(dev->base_addr + PGSEL)); + /* + * coefficient visibility should already be enabled via + * DSPCFG | 0x1000 + */ + data = readw((void *)(dev->base_addr + TSTDAT)) & 0xff; + /* + * the value must be negative, and within certain values + * (these values all come from National) + */ + if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { + struct netdev_private *np = dev->priv; + + /* the bug has been triggered - fix the coefficient */ + writew(TSTDAT_FIXED, (void *)(dev->base_addr + TSTDAT)); + /* lock the value */ + data = readw((void *)(dev->base_addr + DSPCFG)); + np->dspcfg = data | DSPCFG_LOCK; + writew(np->dspcfg, (void *)(dev->base_addr + DSPCFG)); + } + writew(0, (void *)(dev->base_addr + PGSEL)); + } +} + +static void undo_cable_magic(struct rtnet_device *dev) +{ + u16 data; + struct netdev_private *np = dev->priv; + + if (np->srr >= SRR_DP83816_A5) + return; + + writew(1, (void *)(dev->base_addr + PGSEL)); + /* make sure the lock bit is clear */ + data = readw((void *)(dev->base_addr + DSPCFG)); + np->dspcfg = data & ~DSPCFG_LOCK; + writew(np->dspcfg, (void *)(dev->base_addr + DSPCFG)); + writew(0, (void *)(dev->base_addr + PGSEL)); +} + +static void check_link(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + long ioaddr = dev->base_addr; + int duplex; + int chipcfg = readl((void *)(ioaddr + ChipConfig)); + + if (!(chipcfg & CfgLink)) { + if (rtnetif_carrier_ok(dev)) { + if (netif_msg_link(np)) + rtdm_printk(KERN_NOTICE "%s: link down.\n", + dev->name); + rtnetif_carrier_off(dev); + undo_cable_magic(dev); + } + return; + } + if (!rtnetif_carrier_ok(dev)) { + if (netif_msg_link(np)) + rtdm_printk(KERN_NOTICE "%s: link up.\n", dev->name); + rtnetif_carrier_on(dev); + do_cable_magic(dev); + } + + duplex = np->full_duplex || (chipcfg & CfgFullDuplex ? 1 : 0); + + /* if duplex is set then bit 28 must be set, too */ + if (duplex ^ !!(np->rx_config & RxAcceptTx)) { + if (netif_msg_link(np)) + rtdm_printk(KERN_INFO + "%s: Setting %s-duplex based on negotiated " + "link capability.\n", dev->name, + duplex ? "full" : "half"); + if (duplex) { + np->rx_config |= RxAcceptTx; + np->tx_config |= TxCarrierIgn | TxHeartIgn; + } else { + np->rx_config &= ~RxAcceptTx; + np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); + } + writel(np->tx_config, (void *)(ioaddr + TxConfig)); + writel(np->rx_config, (void *)(ioaddr + RxConfig)); + } +} + +static void init_registers(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + long ioaddr = dev->base_addr; + int i; + + for (i=0;i<NATSEMI_HW_TIMEOUT;i++) { + if (readl((void *)(dev->base_addr + ChipConfig)) & CfgAnegDone) + break; + udelay(10); + } + if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) { + rtdm_printk(KERN_INFO + "%s: autonegotiation did not complete in %d usec.\n", + dev->name, i*10); + } + + /* On page 78 of the spec, they recommend some settings for "optimum + performance" to be done in sequence. These settings optimize some + of the 100Mbit autodetection circuitry. They say we only want to + do this for rev C of the chip, but engineers at NSC (Bradley + Kennedy) recommends always setting them. If you don't, you get + errors on some autonegotiations that make the device unusable. + */ + writew(1, (void *)(ioaddr + PGSEL)); + writew(PMDCSR_VAL, (void *)(ioaddr + PMDCSR)); + writew(TSTDAT_VAL, (void *)(ioaddr + TSTDAT)); + writew(DSPCFG_VAL, (void *)(ioaddr + DSPCFG)); + writew(SDCFG_VAL, (void *)(ioaddr + SDCFG)); + writew(0, (void *)(ioaddr + PGSEL)); + np->dspcfg = DSPCFG_VAL; + + /* Enable PHY Specific event based interrupts. Link state change + and Auto-Negotiation Completion are among the affected. + Read the intr status to clear it (needed for wake events). + */ + readw((void *)(ioaddr + MIntrStatus)); + writew(MICRIntEn, (void *)(ioaddr + MIntrCtrl)); + + /* clear any interrupts that are pending, such as wake events */ + readl((void *)(ioaddr + IntrStatus)); + + writel(np->ring_dma, (void *)(ioaddr + RxRingPtr)); + writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), + (void *)(ioaddr + TxRingPtr)); + + /* Initialize other registers. + * Configure the PCI bus bursts and FIFO thresholds. + * Configure for standard, in-spec Ethernet. + * Start with half-duplex. check_link will update + * to the correct settings. + */ + + /* DRTH: 2: start tx if 64 bytes are in the fifo + * FLTH: 0x10: refill with next packet if 512 bytes are free + * MXDMA: 0: up to 256 byte bursts. + * MXDMA must be <= FLTH + * ECRETRY=1 + * ATP=1 + */ + np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | (0x1002); + writel(np->tx_config, (void *)(ioaddr + TxConfig)); + + /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo + * MXDMA 0: up to 256 byte bursts + */ + np->rx_config = RxMxdma_256 | 0x20; + writel(np->rx_config, (void *)(ioaddr + RxConfig)); + + /* Disable PME: + * The PME bit is initialized from the EEPROM contents. + * PCI cards probably have PME disabled, but motherboard + * implementations may have PME set to enable WakeOnLan. + * With PME set the chip will scan incoming packets but + * nothing will be written to memory. */ + np->SavedClkRun = readl((void *)(ioaddr + ClkRun)); + writel(np->SavedClkRun & ~PMEEnable, (void *)(ioaddr + ClkRun)); + if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { + rtdm_printk(KERN_NOTICE "%s: Wake-up event %#08x\n", + dev->name, readl((void *)(ioaddr + WOLCmd))); + } + + check_link(dev); + __set_rx_mode(dev); + + /* Enable interrupts by setting the interrupt mask. */ + writel(DEFAULT_INTR, (void *)(ioaddr + IntrMask)); + writel(1, (void *)(ioaddr + IntrEnable)); + + writel(RxOn | TxOn, (void *)(ioaddr + ChipCmd)); + writel(StatsClear, (void *)(ioaddr + StatsCtrl)); /* Clear Stats */ +} + +/* + * netdev_timer: + * Purpose: + * 1) check for link changes. Usually they are handled by the MII interrupt + * but it doesn't hurt to check twice. + * 2) check for sudden death of the NIC: + * It seems that a reference set for this chip went out with incorrect info, + * and there exist boards that aren't quite right. An unexpected voltage + * drop can cause the PHY to get itself in a weird state (basically reset). + * NOTE: this only seems to affect revC chips. + * 3) check of death of the RX path due to OOM + */ +/*** RTnet ***/ +/*** RTnet ***/ + +static void dump_ring(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + + if (netif_msg_pktdata(np)) { + int i; + rtdm_printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) { + rtdm_printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", + i, np->tx_ring[i].next_desc, + np->tx_ring[i].cmd_status, + np->tx_ring[i].addr); + } + rtdm_printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) { + rtdm_printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", + i, np->rx_ring[i].next_desc, + np->rx_ring[i].cmd_status, + np->rx_ring[i].addr); + } + } +} + +/*** RTnet ***/ +/*** RTnet ***/ + +static int alloc_ring(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev, + sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), + &np->ring_dma, GFP_ATOMIC); + if (!np->rx_ring) + return -ENOMEM; + np->tx_ring = &np->rx_ring[RX_RING_SIZE]; + return 0; +} + +static void refill_rx(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + + /* Refill the Rx ring buffers. */ + for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { + struct rtskb *skb; + int entry = np->dirty_rx % RX_RING_SIZE; + if (np->rx_skbuff[entry] == NULL) { + skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz); + np->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev, + skb->data, np->rx_buf_sz, DMA_FROM_DEVICE); + np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); + } + np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); + } + if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { + if (netif_msg_rx_err(np)) + rtdm_printk(KERN_WARNING "%s: going OOM.\n", dev->name); + np->oom = 1; + } +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void init_ring(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + int i; + + /* 1) TX ring */ + np->dirty_tx = np->cur_tx = 0; + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma + +sizeof(struct netdev_desc) + *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); + np->tx_ring[i].cmd_status = 0; + } + + /* 2) RX ring */ + np->dirty_rx = 0; + np->cur_rx = RX_RING_SIZE; + np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + np->oom = 0; + np->rx_head_desc = &np->rx_ring[0]; + + /* Please be carefull before changing this loop - at least gcc-2.95.1 + * miscompiles it otherwise. + */ + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma + +sizeof(struct netdev_desc) + *((i+1)%RX_RING_SIZE)); + np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); + np->rx_skbuff[i] = NULL; + } + refill_rx(dev); + dump_ring(dev); +} + +static void drain_tx(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + if (np->tx_skbuff[i]) { + dma_unmap_single(&np->pci_dev->dev, + np->rx_dma[i], np->tx_skbuff[i]->len, + DMA_TO_DEVICE); + dev_kfree_rtskb(np->tx_skbuff[i]); + np->stats.tx_dropped++; + } + np->tx_skbuff[i] = NULL; + } +} + +static void drain_ring(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + int i; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].cmd_status = 0; + np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */ + if (np->rx_skbuff[i]) { + dma_unmap_single(&np->pci_dev->dev, + np->rx_dma[i], np->rx_skbuff[i]->len, + DMA_FROM_DEVICE); + dev_kfree_rtskb(np->rx_skbuff[i]); + } + np->rx_skbuff[i] = NULL; + } + drain_tx(dev); +} + +static void free_ring(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + dma_free_coherent(&np->pci_dev->dev, + sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), + np->rx_ring, np->ring_dma); +} + +static int start_tx(struct rtskb *skb, struct rtnet_device *dev) /*** RTnet ***/ +{ + struct netdev_private *np = dev->priv; + unsigned entry; +/*** RTnet ***/ + rtdm_lockctx_t context; +/*** RTnet ***/ + + /* Note: Ordering is important here, set the field with the + "ownership" bit last, and only then increment cur_tx. */ + + /* Calculate the next Tx descriptor entry. */ + entry = np->cur_tx % TX_RING_SIZE; + + np->tx_skbuff[entry] = skb; + np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, + skb->data,skb->len, DMA_TO_DEVICE); + + np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); + +/* spin_lock_irq(&np->lock);*/ +/*** RTnet ***/ + rtdm_lock_get_irqsave(&np->lock, context); +/*** RTnet ***/ + + if (!np->hands_off) { + /* get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + + *skb->xmit_stamp); + np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); + /* StrongARM: Explicitly cache flush np->tx_ring and + * skb->data,skb->len. */ + wmb(); + np->cur_tx++; + if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { + netdev_tx_done(dev); + if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) + rtnetif_stop_queue(dev); + } + /* Wake the potentially-idle transmit channel. */ + writel(TxOn, (void *)(dev->base_addr + ChipCmd)); + } else { + dev_kfree_rtskb(skb); /*** RTnet ***/ + np->stats.tx_dropped++; + } + +/* spin_unlock_irq(&np->lock);*/ +/*** RTnet ***/ + rtdm_lock_put_irqrestore(&np->lock, context); +/*** RTnet ***/ + +/* dev->trans_start = jiffies;*/ + + if (netif_msg_tx_queued(np)) { + rtdm_printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", + dev->name, np->cur_tx, entry); + } + return 0; +} + +static void netdev_tx_done(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + + for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { + int entry = np->dirty_tx % TX_RING_SIZE; + if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) + break; + if (netif_msg_tx_done(np)) + rtdm_printk(KERN_DEBUG + "%s: tx frame #%d finished, status %#08x.\n", + dev->name, np->dirty_tx, + le32_to_cpu(np->tx_ring[entry].cmd_status)); + if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { + np->stats.tx_packets++; + np->stats.tx_bytes += np->tx_skbuff[entry]->len; + } else { /* Various Tx errors */ + int tx_status = + le32_to_cpu(np->tx_ring[entry].cmd_status); + if (tx_status & (DescTxAbort|DescTxExcColl)) + np->stats.tx_aborted_errors++; + if (tx_status & DescTxFIFO) + np->stats.tx_fifo_errors++; + if (tx_status & DescTxCarrier) + np->stats.tx_carrier_errors++; + if (tx_status & DescTxOOWCol) + np->stats.tx_window_errors++; + np->stats.tx_errors++; + } + dma_unmap_single(&np->pci_dev->dev,np->tx_dma[entry], + np->tx_skbuff[entry]->len, + DMA_TO_DEVICE); + /* Free the original skb. */ + dev_kfree_rtskb(np->tx_skbuff[entry]); /*** RTnet ***/ +/* dev_kfree_skb_irq(np->tx_skbuff[entry]);*/ + np->tx_skbuff[entry] = NULL; + } + if (rtnetif_queue_stopped(dev) + && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { + /* The ring is no longer full, wake queue. */ + rtnetif_wake_queue(dev); + } +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static int intr_handler(rtdm_irq_t *irq_handle) +{ + nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/ + struct rtnet_device *dev = + rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/ + struct netdev_private *np = dev->priv; + unsigned int old_packet_cnt = np->stats.rx_packets; /*** RTnet ***/ + long ioaddr = dev->base_addr; + int boguscnt = max_interrupt_work; + int ret = RTDM_IRQ_NONE; + + if (np->hands_off) + return ret; + do { + /* Reading automatically acknowledges all int sources. */ + u32 intr_status = readl((void *)(ioaddr + IntrStatus)); + + if (netif_msg_intr(np)) + rtdm_printk(KERN_DEBUG + "%s: Interrupt, status %#08x, mask %#08x.\n", + dev->name, intr_status, + readl((void *)(ioaddr + IntrMask))); + + if (intr_status == 0) + break; + + ret = RTDM_IRQ_HANDLED; + + if (intr_status & + (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | + IntrRxErr | IntrRxOverrun)) { + netdev_rx(dev, &time_stamp); + } + + if (intr_status & + (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { + rtdm_lock_get(&np->lock); + netdev_tx_done(dev); + rtdm_lock_put(&np->lock); + } + + /* Abnormal error summary/uncommon events handlers. */ + if (intr_status & IntrAbnormalSummary) + netdev_error(dev, intr_status); + + if (--boguscnt < 0) { + if (netif_msg_intr(np)) + rtdm_printk(KERN_WARNING + "%s: Too much work at interrupt, " + "status=%#08x.\n", + dev->name, intr_status); + break; + } + } while (1); + + if (netif_msg_intr(np)) + rtdm_printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name); + +/*** RTnet ***/ + if (old_packet_cnt != np->stats.rx_packets) + rt_mark_stack_mgr(dev); + return ret; +} + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static void netdev_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp) +{ + struct netdev_private *np = dev->priv; + int entry = np->cur_rx % RX_RING_SIZE; + int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; + s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); + + /* If the driver owns the next entry it's a new packet. Send it up. */ + while (desc_status < 0) { /* e.g. & DescOwn */ + if (netif_msg_rx_status(np)) + rtdm_printk(KERN_DEBUG + " netdev_rx() entry %d status was %#08x.\n", + entry, desc_status); + if (--boguscnt < 0) + break; + if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ + if (desc_status & DescMore) { + if (netif_msg_rx_err(np)) + rtdm_printk(KERN_WARNING + "%s: Oversized(?) Ethernet " + "frame spanned multiple " + "buffers, entry %#08x " + "status %#08x.\n", dev->name, + np->cur_rx, desc_status); + np->stats.rx_length_errors++; + } else { + /* There was an error. */ + np->stats.rx_errors++; + if (desc_status & (DescRxAbort|DescRxOver)) + np->stats.rx_over_errors++; + if (desc_status & (DescRxLong|DescRxRunt)) + np->stats.rx_length_errors++; + if (desc_status & (DescRxInvalid|DescRxAlign)) + np->stats.rx_frame_errors++; + if (desc_status & DescRxCRC) + np->stats.rx_crc_errors++; + } + } else { + struct rtskb *skb; + /* Omit CRC size. */ + int pkt_len = (desc_status & DescSizeMask) - 4; + /* Check if the packet is long enough to accept + * without copying to a minimally-sized skbuff. */ +/*** RTnet ***/ + { + skb = np->rx_skbuff[entry]; + dma_unmap_single(&np->pci_dev->dev, + np->rx_dma[entry], + np->rx_skbuff[entry]->len, + DMA_FROM_DEVICE); + rtskb_put(skb, pkt_len); + np->rx_skbuff[entry] = NULL; + } +/*** RTnet ***/ + skb->protocol = rt_eth_type_trans(skb, dev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + /*dev->last_rx = jiffies;*/ +/*** RTnet ***/ + np->stats.rx_packets++; + np->stats.rx_bytes += pkt_len; + } + entry = (++np->cur_rx) % RX_RING_SIZE; + np->rx_head_desc = &np->rx_ring[entry]; + desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); + } + refill_rx(dev); + + /* Restart Rx engine if stopped. */ + if (np->oom) + ; +/* mod_timer(&np->timer, jiffies + 1);*/ + else + writel(RxOn, (void *)(dev->base_addr + ChipCmd)); +} + +static void netdev_error(struct rtnet_device *dev, int intr_status) +{ + struct netdev_private *np = dev->priv; + long ioaddr = dev->base_addr; + + rtdm_lock_get(&np->lock); + if (intr_status & LinkChange) { + u16 adv = mdio_read(dev, 1, MII_ADVERTISE); + u16 lpa = mdio_read(dev, 1, MII_LPA); + if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE + && netif_msg_link(np)) { + rtdm_printk(KERN_INFO + "%s: Autonegotiation advertising" + " %#04x partner %#04x.\n", dev->name, + adv, lpa); + } + + /* read MII int status to clear the flag */ + readw((void *)(ioaddr + MIntrStatus)); + check_link(dev); + } + if (intr_status & StatsMax) { + __get_stats(dev); + } + if (intr_status & IntrTxUnderrun) { + if ((np->tx_config & TxDrthMask) < 62) + np->tx_config += 2; + if (netif_msg_tx_err(np)) + rtdm_printk(KERN_NOTICE + "%s: increased Tx threshold, txcfg %#08x.\n", + dev->name, np->tx_config); + writel(np->tx_config, (void *)(ioaddr + TxConfig)); + } + if (intr_status & WOLPkt && netif_msg_wol(np)) { + int wol_status = readl((void *)(ioaddr + WOLCmd)); + rtdm_printk(KERN_NOTICE "%s: Link wake-up event %#08x\n", + dev->name, wol_status); + } + if (intr_status & RxStatusFIFOOver) { + if (netif_msg_rx_err(np) && netif_msg_intr(np)) { + rtdm_printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", + dev->name); + } + np->stats.rx_fifo_errors++; + } + /* Hmmmmm, it's not clear how to recover from PCI faults. */ + if (intr_status & IntrPCIErr) { + rtdm_printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, + intr_status & IntrPCIErr); + np->stats.tx_fifo_errors++; + np->stats.rx_fifo_errors++; + } + rtdm_lock_put(&np->lock); +} + +static void __get_stats(struct rtnet_device *dev) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = dev->priv; + + /* The chip only need report frame silently dropped. */ + np->stats.rx_crc_errors += readl((void *)(ioaddr + RxCRCErrs)); + np->stats.rx_missed_errors += readl((void *)(ioaddr + RxMissed)); +} + +static struct net_device_stats *get_stats(struct rtnet_device *rtdev) +{ + struct netdev_private *np = rtdev->priv; + rtdm_lockctx_t context; + + /* The chip only need report frame silently dropped. */ + rtdm_lock_get_irqsave(&np->lock, context); + if (rtnetif_running(rtdev) && !np->hands_off) + __get_stats(rtdev); + rtdm_lock_put_irqrestore(&np->lock, context); + + return &np->stats; +} + +#define HASH_TABLE 0x200 +static void __set_rx_mode(struct rtnet_device *dev) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = dev->priv; + u8 mc_filter[64]; /* Multicast hash filter */ + u32 rx_mode; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + /* Unconditionally log net taps. */ + rtdm_printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", + dev->name); + rx_mode = RxFilterEnable | AcceptBroadcast + | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; + } else if (dev->flags & IFF_ALLMULTI) { + rx_mode = RxFilterEnable | AcceptBroadcast + | AcceptAllMulticast | AcceptMyPhys; + } else { + int i; + + memset(mc_filter, 0, sizeof(mc_filter)); + rx_mode = RxFilterEnable | AcceptBroadcast + | AcceptMulticast | AcceptMyPhys; + for (i = 0; i < 64; i += 2) { + writew(HASH_TABLE + i, (void *)(ioaddr + RxFilterAddr)); + writew((mc_filter[i+1]<<8) + mc_filter[i], + (void *)(ioaddr + RxFilterData)); + } + } + writel(rx_mode, (void *)(ioaddr + RxFilterAddr)); + np->cur_rx_mode = rx_mode; +} +/*** RTnet +static void set_rx_mode(struct rtnet_device *dev) +{ + struct netdev_private *np = dev->priv; + spin_lock_irq(&np->lock); + if (!np->hands_off) + __set_rx_mode(dev); + spin_unlock_irq(&np->lock); +} +RTnet ***/ +/*** RTnet ***/ +/*** RTnet ***/ + +static void enable_wol_mode(struct rtnet_device *dev, int enable_intr) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = dev->priv; + + if (netif_msg_wol(np)) + rtdm_printk(KERN_INFO "%s: remaining active for wake-on-lan\n", + dev->name); + + /* For WOL we must restart the rx process in silent mode. + * Write NULL to the RxRingPtr. Only possible if + * rx process is stopped + */ + writel(0, (void *)(ioaddr + RxRingPtr)); + + /* read WoL status to clear */ + readl((void *)(ioaddr + WOLCmd)); + + /* PME on, clear status */ + writel(np->SavedClkRun | PMEEnable | PMEStatus, (void *)(ioaddr + ClkRun)); + + /* and restart the rx process */ + writel(RxOn, (void *)(ioaddr + ChipCmd)); + + if (enable_intr) { + /* enable the WOL interrupt. + * Could be used to send a netlink message. + */ + writel(WOLPkt | LinkChange, (void *)(ioaddr + IntrMask)); + writel(1, (void *)(ioaddr + IntrEnable)); + } +} + +static int netdev_close(struct rtnet_device *dev) +{ + int i; + long ioaddr = dev->base_addr; + struct netdev_private *np = dev->priv; + + if (netif_msg_ifdown(np)) + rtdm_printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %#04x.\n", + dev->name, (int)readl((void *)(ioaddr + ChipCmd))); + if (netif_msg_pktdata(np)) + rtdm_printk(KERN_DEBUG + "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, np->cur_tx, np->dirty_tx, + np->cur_rx, np->dirty_rx); + + /* + * FIXME: what if someone tries to close a device + * that is suspended? + * Should we reenable the nic to switch to + * the final WOL settings? + */ +/*** RTnet *** + del_timer_sync(&np->timer); + *** RTnet ***/ +/* disable_irq(dev->irq);*/ + rtdm_irq_disable(&np->irq_handle); + rtdm_lock_get(&np->lock); + /* Disable interrupts, and flush posted writes */ + writel(0, (void *)(ioaddr + IntrEnable)); + readl((void *)(ioaddr + IntrEnable)); + np->hands_off = 1; + rtdm_lock_put(&np->lock); + +/*** RTnet ***/ + if ( (i=rtdm_irq_free(&np->irq_handle))<0 ) + return i; + + rt_stack_disconnect(dev); +/*** RTnet ***/ + +/* enable_irq(dev->irq);*/ + +/* free_irq(dev->irq, dev);*/ + + /* Interrupt disabled, interrupt handler released, + * queue stopped, timer deleted, rtnl_lock held + * All async codepaths that access the driver are disabled. + */ + rtdm_lock_get(&np->lock); + np->hands_off = 0; + readl((void *)(ioaddr + IntrMask)); + readw((void *)(ioaddr + MIntrStatus)); + + /* Freeze Stats */ + writel(StatsFreeze, (void *)(ioaddr + StatsCtrl)); + + /* Stop the chip's Tx and Rx processes. */ + natsemi_stop_rxtx(dev); + + __get_stats(dev); + rtdm_lock_put(&np->lock); + + /* clear the carrier last - an interrupt could reenable it otherwise */ + rtnetif_carrier_off(dev); + rtnetif_stop_queue(dev); + + dump_ring(dev); + drain_ring(dev); + free_ring(dev); + + { + u32 wol = readl((void *)(ioaddr + WOLCmd)) & WakeOptsSummary; + if (wol) { + /* restart the NIC in WOL mode. + * The nic must be stopped for this. + */ + enable_wol_mode(dev, 0); + } else { + /* Restore PME enable bit unmolested */ + writel(np->SavedClkRun, (void *)(ioaddr + ClkRun)); + } + } + + return 0; +} + + +static void natsemi_remove1 (struct pci_dev *pdev) +{ + + /*** RTnet ***/ + struct rtnet_device *dev = pci_get_drvdata(pdev); + + rt_unregister_rtnetdev(dev); + rt_rtdev_disconnect(dev); +/*** RTnet ***/ + + pci_release_regions (pdev); + iounmap ((char *) dev->base_addr); + rtdev_free(dev); /*** RTnet ***/ + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM + +/* + * The ns83815 chip doesn't have explicit RxStop bits. + * Kicking the Rx or Tx process for a new packet reenables the Rx process + * of the nic, thus this function must be very careful: + * + * suspend/resume synchronization: + * entry points: + * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler, + * start_tx, tx_timeout + * + * No function accesses the hardware without checking np->hands_off. + * the check occurs under spin_lock_irq(&np->lock); + * exceptions: + * * netdev_ioctl: noncritical access. + * * netdev_open: cannot happen due to the device_detach + * * netdev_close: doesn't hurt. + * * netdev_timer: timer stopped by natsemi_suspend. + * * intr_handler: doesn't acquire the spinlock. suspend calls + * disable_irq() to enforce synchronization. + * + * Interrupts must be disabled, otherwise hands_off can cause irq storms. + */ + +#endif /* CONFIG_PM */ + +static struct pci_driver natsemi_driver = { + .name = DRV_NAME, + .id_table = natsemi_pci_tbl, + .probe = natsemi_probe1, + .remove = natsemi_remove1, +/*#ifdef CONFIG_PM*/ +}; + +static int __init natsemi_init_mod (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + rtdm_printk(version); +#endif + + return pci_register_driver (&natsemi_driver); +} + +static void __exit natsemi_exit_mod (void) +{ + pci_unregister_driver (&natsemi_driver); +} + +module_init(natsemi_init_mod); +module_exit(natsemi_exit_mod); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c new file mode 100644 index 0000000..eebb66e --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c @@ -0,0 +1,1657 @@ +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ +/* + * Copyright 1996-1999 Thomas Bogendoerfer + * + * Derived from the lance driver written 1993,1994,1995 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver is for PCnet32 and PCnetPCI based ethercards + */ +/************************************************************************** + * 23 Oct, 2000. + * Fixed a few bugs, related to running the controller in 32bit mode. + * + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * + * Ported to RTnet: September 2003, Jan Kiszka <Jan.Kiszka@web.de> + *************************************************************************/ + +#define DRV_NAME "pcnet32-rt" +#define DRV_VERSION "1.27a-RTnet-0.2" +#define DRV_RELDATE "2003-09-24" +#define PFX DRV_NAME ": " + +static const char *version = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Jan.Kiszka@web.de\n"; + +#include <linux/module.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/crc32.h> +#include <linux/uaccess.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/dma.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> + +/*** RTnet ***/ +#include <rtnet_port.h> + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +#define DEFAULT_RX_POOL_SIZE 16 + +static int cards[MAX_UNITS] = { [0 ...(MAX_UNITS - 1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); +/*** RTnet ***/ + +/* + * PCI device identifiers for "new style" Linux PCI Device Drivers + */ +static struct pci_device_id pcnet32_pci_tbl[] = { + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, + PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, + 0, 0 }, + { + 0, + } +}; + +MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); + +static int cards_found = -1; +static int pcnet32_have_pci; + +/* + * VLB I/O addresses + */ +static unsigned int pcnet32_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0 }; + +static int pcnet32_debug = 1; +static int tx_start = + 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ +static int pcnet32vlb; /* check for VLB cards ? */ + +static struct rtnet_device *pcnet32_dev; /*** RTnet ***/ + +static int max_interrupt_work = 80; +/*** RTnet *** +static int rx_copybreak = 200; + *** RTnet ***/ + +#define PCNET32_PORT_AUI 0x00 +#define PCNET32_PORT_10BT 0x01 +#define PCNET32_PORT_GPSI 0x02 +#define PCNET32_PORT_MII 0x03 + +#define PCNET32_PORT_PORTSEL 0x03 +#define PCNET32_PORT_ASEL 0x04 +#define PCNET32_PORT_100 0x40 +#define PCNET32_PORT_FD 0x80 + +#define PCNET32_DMA_MASK 0xffffffff + +/* + * table to translate option values from tulip + * to internal options + */ +static unsigned char options_mapping[] = { + PCNET32_PORT_ASEL, /* 0 Auto-select */ + PCNET32_PORT_AUI, /* 1 BNC/AUI */ + PCNET32_PORT_AUI, /* 2 AUI/BNC */ + PCNET32_PORT_ASEL, /* 3 not supported */ + PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ + PCNET32_PORT_ASEL, /* 5 not supported */ + PCNET32_PORT_ASEL, /* 6 not supported */ + PCNET32_PORT_ASEL, /* 7 not supported */ + PCNET32_PORT_ASEL, /* 8 not supported */ + PCNET32_PORT_MII, /* 9 MII 10baseT */ + PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ + PCNET32_PORT_MII, /* 11 MII (autosel) */ + PCNET32_PORT_10BT, /* 12 10BaseT */ + PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ + PCNET32_PORT_MII | PCNET32_PORT_100 | + PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */ + PCNET32_PORT_ASEL /* 15 not supported */ +}; + +static int options[MAX_UNITS]; +static int full_duplex[MAX_UNITS]; + +/* + * Theory of Operation + * + * This driver uses the same software structure as the normal lance + * driver. So look for a verbose description in lance.c. The differences + * to the normal lance driver is the use of the 32bit mode of PCnet32 + * and PCnetPCI chips. Because these chips are 32bit chips, there is no + * 16MB limitation and we don't need bounce buffers. + */ + +/* + * History: + * v0.01: Initial version + * only tested on Alpha Noname Board + * v0.02: changed IRQ handling for new interrupt scheme (dev_id) + * tested on a ASUS SP3G + * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL + * looks like the 974 doesn't like stopping and restarting in a + * short period of time; now we do a reinit of the lance; the + * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr> + * and hangs the machine (thanks to Klaus Liedl for debugging) + * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32, + * made it standalone (no need for lance.c) + * v0.13: added additional PCI detecting for special PCI devices (Compaq) + * v0.14: stripped down additional PCI probe (thanks to David C Niemi + * and sveneric@xs4all.nl for testing this on their Compaq boxes) + * v0.15: added 79C965 (VLB) probe + * added interrupt sharing for PCI chips + * v0.16: fixed set_multicast_list on Alpha machines + * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c + * v0.19: changed setting of autoselect bit + * v0.20: removed additional Compaq PCI probe; there is now a working one + * in arch/i386/bios32.c + * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu + * v0.22: added printing of status to ring dump + * v0.23: changed enet_statistics to net_devive_stats + * v0.90: added multicast filter + * added module support + * changed irq probe to new style + * added PCnetFast chip id + * added fix for receive stalls with Intel saturn chipsets + * added in-place rx skbs like in the tulip driver + * minor cleanups + * v0.91: added PCnetFast+ chip id + * back port to 2.0.x + * v1.00: added some stuff from Donald Becker's 2.0.34 version + * added support for byte counters in net_dev_stats + * v1.01: do ring dumps, only when debugging the driver + * increased the transmit timeout + * v1.02: fixed memory leak in pcnet32_init_ring() + * v1.10: workaround for stopped transmitter + * added port selection for modules + * detect special T1/E1 WAN card and setup port selection + * v1.11: fixed wrong checking of Tx errors + * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu) + * added save original kmalloc addr for freeing (mcr@solidum.com) + * added support for PCnetHome chip (joe@MIT.EDU) + * rewritten PCI card detection + * added dwio mode to get driver working on some PPC machines + * v1.21: added mii selection and mii ioctl + * v1.22: changed pci scanning code to make PPC people happy + * fixed switching to 32bit mode in pcnet32_open() (thanks + * to Michael Richard <mcr@solidum.com> for noticing this one) + * added sub vendor/device id matching (thanks again to + * Michael Richard <mcr@solidum.com>) + * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>) + * v1.23 fixed small bug, when manual selecting MII speed/duplex + * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO + * underflows. Added tx_start_pt module parameter. Increased + * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO + * for FAST[+] chipsets. <kaf@fc.hp.com> + * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com> + * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com> + * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France + * <jamey@crl.dec.com> + * - Fixed a few bugs, related to running the controller in 32bit mode. + * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker + * v1.27 improved CSR/PROM address detection, lots of cleanups, + * new pcnet32vlb module option, HP-PARISC support, + * added module parameter descriptions, + * initial ethtool support - Helge Deller <deller@gmx.de> + * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp> + * use alloc_etherdev and register_netdev + * fix pci probe not increment cards_found + * FD auto negotiate error workaround for xSeries250 + * clean up and using new mii module + */ + +/* + * Set the number of Tx and Rx buffers, using Log_2(# buffers). + * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. + * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). + */ +#ifndef PCNET32_LOG_TX_BUFFERS +#define PCNET32_LOG_TX_BUFFERS 4 +#define PCNET32_LOG_RX_BUFFERS 3 /*** RTnet ***/ +#endif + +#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12) + +#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) +#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4) + +#define PKT_BUF_SZ 1544 + +/* Offsets from base I/O address. */ +#define PCNET32_WIO_RDP 0x10 +#define PCNET32_WIO_RAP 0x12 +#define PCNET32_WIO_RESET 0x14 +#define PCNET32_WIO_BDP 0x16 + +#define PCNET32_DWIO_RDP 0x10 +#define PCNET32_DWIO_RAP 0x14 +#define PCNET32_DWIO_RESET 0x18 +#define PCNET32_DWIO_BDP 0x1C + +#define PCNET32_TOTAL_SIZE 0x20 + +/* The PCNET32 Rx and Tx ring descriptors. */ +struct pcnet32_rx_head { + u32 base; + s16 buf_length; + s16 status; + u32 msg_length; + u32 reserved; +}; + +struct pcnet32_tx_head { + u32 base; + s16 length; + s16 status; + u32 misc; + u32 reserved; +}; + +/* The PCNET32 32-Bit initialization block, described in databook. */ +struct pcnet32_init_block { + u16 mode; + u16 tlen_rlen; + u8 phys_addr[6]; + u16 reserved; + u32 filter[2]; + /* Receive and transmit ring base, along with extra bits. */ + u32 rx_ring; + u32 tx_ring; +}; + +/* PCnet32 access functions */ +struct pcnet32_access { + u16 (*read_csr)(unsigned long, int); + void (*write_csr)(unsigned long, int, u16); + u16 (*read_bcr)(unsigned long, int); + void (*write_bcr)(unsigned long, int, u16); + u16 (*read_rap)(unsigned long); + void (*write_rap)(unsigned long, u16); + void (*reset)(unsigned long); +}; + +/* + * The first three fields of pcnet32_private are read by the ethernet device + * so we allocate the structure should be allocated by pci_alloc_consistent(). + */ +struct pcnet32_private { + /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ + struct pcnet32_rx_head rx_ring[RX_RING_SIZE]; + struct pcnet32_tx_head tx_ring[TX_RING_SIZE]; + struct pcnet32_init_block init_block; + dma_addr_t dma_addr; /* DMA address of beginning of this object, + returned by pci_alloc_consistent */ + struct pci_dev + *pci_dev; /* Pointer to the associated pci device structure */ + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + /*** RTnet ***/ + struct rtskb *tx_skbuff[TX_RING_SIZE]; + struct rtskb *rx_skbuff[RX_RING_SIZE]; + /*** RTnet ***/ + dma_addr_t tx_dma_addr[TX_RING_SIZE]; + dma_addr_t rx_dma_addr[RX_RING_SIZE]; + struct pcnet32_access a; + rtdm_lock_t lock; /* Guard lock */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + struct net_device_stats stats; + char tx_full; + int options; + int shared_irq : 1, /* shared irq possible */ + ltint : 1, /* enable TxDone-intr inhibitor */ + dxsuflo : 1, /* disable transmit stop on uflo */ + mii : 1; /* mii port available */ + struct rtnet_device *next; /*** RTnet ***/ + struct mii_if_info mii_if; + rtdm_irq_t irq_handle; +}; + +static void pcnet32_probe_vlbus(void); +static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); +static int pcnet32_probe1(unsigned long, unsigned int, int, struct pci_dev *); +/*** RTnet ***/ +static int pcnet32_open(struct rtnet_device *); +static int pcnet32_init_ring(struct rtnet_device *); +static int pcnet32_start_xmit(struct rtskb *, struct rtnet_device *); +static int pcnet32_rx(struct rtnet_device *, nanosecs_abs_t *time_stamp); +//static void pcnet32_tx_timeout (struct net_device *dev); +static int pcnet32_interrupt(rtdm_irq_t *irq_handle); +static int pcnet32_close(struct rtnet_device *); +static struct net_device_stats *pcnet32_get_stats(struct rtnet_device *); +//static void pcnet32_set_multicast_list(struct net_device *); +//static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); +//static int mdio_read(struct net_device *dev, int phy_id, int reg_num); +//static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); +/*** RTnet ***/ + +enum pci_flags_bit { + PCI_USES_IO = 1, + PCI_USES_MEM = 2, + PCI_USES_MASTER = 4, + PCI_ADDR0 = 0x10 << 0, + PCI_ADDR1 = 0x10 << 1, + PCI_ADDR2 = 0x10 << 2, + PCI_ADDR3 = 0x10 << 3, +}; + +static u16 pcnet32_wio_read_csr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RDP); +} + +static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_RDP); +} + +static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_BDP); +} + +static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_BDP); +} + +static u16 pcnet32_wio_read_rap(unsigned long addr) +{ + return inw(addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_write_rap(unsigned long addr, u16 val) +{ + outw(val, addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_reset(unsigned long addr) +{ + inw(addr + PCNET32_WIO_RESET); +} + +static int pcnet32_wio_check(unsigned long addr) +{ + outw(88, addr + PCNET32_WIO_RAP); + return (inw(addr + PCNET32_WIO_RAP) == 88); +} + +static struct pcnet32_access pcnet32_wio = { + read_csr: pcnet32_wio_read_csr, + write_csr: pcnet32_wio_write_csr, + read_bcr: pcnet32_wio_read_bcr, + write_bcr: pcnet32_wio_write_bcr, + read_rap: pcnet32_wio_read_rap, + write_rap: pcnet32_wio_write_rap, + reset: pcnet32_wio_reset +}; + +static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_RDP) & 0xffff); +} + +static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_RDP); +} + +static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_BDP) & 0xffff); +} + +static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_BDP); +} + +static u16 pcnet32_dwio_read_rap(unsigned long addr) +{ + return (inl(addr + PCNET32_DWIO_RAP) & 0xffff); +} + +static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) +{ + outl(val, addr + PCNET32_DWIO_RAP); +} + +static void pcnet32_dwio_reset(unsigned long addr) +{ + inl(addr + PCNET32_DWIO_RESET); +} + +static int pcnet32_dwio_check(unsigned long addr) +{ + outl(88, addr + PCNET32_DWIO_RAP); + return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88); +} + +static struct pcnet32_access pcnet32_dwio = { + read_csr: pcnet32_dwio_read_csr, + write_csr: pcnet32_dwio_write_csr, + read_bcr: pcnet32_dwio_read_bcr, + write_bcr: pcnet32_dwio_write_bcr, + read_rap: pcnet32_dwio_read_rap, + write_rap: pcnet32_dwio_write_rap, + reset: pcnet32_dwio_reset +}; + +/* only probes for non-PCI devices, the rest are handled by + * pci_register_driver via pcnet32_probe_pci */ + +static void pcnet32_probe_vlbus(void) +{ + unsigned int *port, ioaddr; + + /* search for PCnet32 VLB cards at known addresses */ + for (port = pcnet32_portlist; (ioaddr = *port); port++) { + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, + "pcnet32_probe_vlbus")) { + /* check if there is really a pcnet chip on that ioaddr */ + if ((inb(ioaddr + 14) == 0x57) && + (inb(ioaddr + 15) == 0x57)) { + pcnet32_probe1(ioaddr, 0, 0, NULL); + } else { + release_region(ioaddr, PCNET32_TOTAL_SIZE); + } + } + } +} + +static int pcnet32_probe_pci(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned long ioaddr; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err); + return err; + } + pci_set_master(pdev); + + ioaddr = pci_resource_start(pdev, 0); + if (!ioaddr) { + printk(KERN_ERR PFX "card has no PCI IO resources, aborting\n"); + return -ENODEV; + } + + err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK); + if (err) { + printk(KERN_ERR PFX + "architecture does not support 32bit PCI busmaster DMA\n"); + return err; + } + + return pcnet32_probe1(ioaddr, pdev->irq, 1, pdev); +} + +/* pcnet32_probe1 + * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. + * pdev will be NULL when called from pcnet32_probe_vlbus. + */ +static int pcnet32_probe1(unsigned long ioaddr, unsigned int irq_line, + int shared, struct pci_dev *pdev) +{ + struct pcnet32_private *lp; + dma_addr_t lp_dma_addr; + int i, media; + int fdx, mii, fset, dxsuflo, ltint; + int chip_version; + char *chipname; + struct rtnet_device *dev; /*** RTnet ***/ + struct pcnet32_access *a = NULL; + u8 promaddr[6]; + + // *** RTnet *** + cards_found++; + if (cards[cards_found] == 0) + return -ENODEV; + // *** RTnet *** + + /* reset the chip */ + pcnet32_wio_reset(ioaddr); + + /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ + if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { + a = &pcnet32_wio; + } else { + pcnet32_dwio_reset(ioaddr); + if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && + pcnet32_dwio_check(ioaddr)) { + a = &pcnet32_dwio; + } else + return -ENODEV; + } + + chip_version = + a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); + if (pcnet32_debug > 2) + printk(KERN_INFO " PCnet chip version is %#x.\n", + chip_version); + if ((chip_version & 0xfff) != 0x003) + return -ENODEV; + + /* initialize variables */ + fdx = mii = fset = dxsuflo = ltint = 0; + chip_version = (chip_version >> 12) & 0xffff; + + switch (chip_version) { + case 0x2420: + chipname = "PCnet/PCI 79C970"; /* PCI */ + break; + case 0x2430: + if (shared) + chipname = + "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ + else + chipname = "PCnet/32 79C965"; /* 486/VL bus */ + break; + case 0x2621: + chipname = "PCnet/PCI II 79C970A"; /* PCI */ + fdx = 1; + break; + case 0x2623: + chipname = "PCnet/FAST 79C971"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + ltint = 1; + break; + case 0x2624: + chipname = "PCnet/FAST+ 79C972"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2625: + chipname = "PCnet/FAST III 79C973"; /* PCI */ + fdx = 1; + mii = 1; + break; + case 0x2626: + chipname = "PCnet/Home 79C978"; /* PCI */ + fdx = 1; + /* + * This is based on specs published at www.amd.com. This section + * assumes that a card with a 79C978 wants to go into 1Mb HomePNA + * mode. The 79C978 can also go into standard ethernet, and there + * probably should be some sort of module option to select the + * mode by which the card should operate + */ + /* switch to home wiring mode */ + media = a->read_bcr(ioaddr, 49); + if (pcnet32_debug > 2) + printk(KERN_DEBUG PFX "media reset to %#x.\n", media); + a->write_bcr(ioaddr, 49, media); + break; + case 0x2627: + chipname = "PCnet/FAST III 79C975"; /* PCI */ + fdx = 1; + mii = 1; + break; + default: + printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n", + chip_version); + return -ENODEV; + } + + /* + * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit + * starting until the packet is loaded. Strike one for reliability, lose + * one for latency - although on PCI this isnt a big loss. Older chips + * have FIFO's smaller than a packet, so you can't do this. + */ + + if (fset) { + a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0800)); + a->write_csr(ioaddr, 80, + (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); + dxsuflo = 1; + ltint = 1; + } + + /*** RTnet ***/ + dev = rt_alloc_etherdev(0, RX_RING_SIZE * 2 + TX_RING_SIZE); + if (dev == NULL) + return -ENOMEM; + rtdev_alloc_name(dev, "rteth%d"); + rt_rtdev_connect(dev, &RTDEV_manager); + dev->vers = RTDEV_VERS_2_0; + dev->sysbind = &pdev->dev; + /*** RTnet ***/ + + printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); + + /* In most chips, after a chip reset, the ethernet address is read from the + * station address PROM at the base address and programmed into the + * "Physical Address Registers" CSR12-14. + * As a precautionary measure, we read the PROM values and complain if + * they disagree with the CSRs. Either way, we use the CSR values, and + * double check that they are valid. + */ + for (i = 0; i < 3; i++) { + unsigned int val; + val = a->read_csr(ioaddr, i + 12) & 0x0ffff; + /* There may be endianness issues here. */ + dev->dev_addr[2 * i] = val & 0x0ff; + dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; + } + + /* read PROM address and compare with CSR address */ + for (i = 0; i < 6; i++) + promaddr[i] = inb(ioaddr + i); + + if (memcmp(promaddr, dev->dev_addr, 6) || + !is_valid_ether_addr(dev->dev_addr)) { +#ifndef __powerpc__ + if (is_valid_ether_addr(promaddr)) { +#else + if (!is_valid_ether_addr(dev->dev_addr) && + is_valid_ether_addr(promaddr)) { +#endif + printk(" warning: CSR address invalid,\n"); + printk(KERN_INFO " using instead PROM address of"); + memcpy(dev->dev_addr, promaddr, 6); + } + } + + /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ + if (!is_valid_ether_addr(dev->dev_addr)) + memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); + + for (i = 0; i < 6; i++) + printk(" %2.2x", dev->dev_addr[i]); + + if (((chip_version + 1) & 0xfffe) == + 0x2624) { /* Version 0x2623 or 0x2624 */ + i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ + printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i); + switch (i >> 10) { + case 0: + printk(" 20 bytes,"); + break; + case 1: + printk(" 64 bytes,"); + break; + case 2: + printk(" 128 bytes,"); + break; + case 3: + printk("~220 bytes,"); + break; + } + i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ + printk(" BCR18(%x):", i & 0xffff); + if (i & (1 << 5)) + printk("BurstWrEn "); + if (i & (1 << 6)) + printk("BurstRdEn "); + if (i & (1 << 7)) + printk("DWordIO "); + if (i & (1 << 11)) + printk("NoUFlow "); + i = a->read_bcr(ioaddr, 25); + printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 26); + printk(" SRAM_BND=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 27); + if (i & (1 << 14)) + printk("LowLatRx"); + } + + dev->base_addr = ioaddr; + if (request_region(ioaddr, PCNET32_TOTAL_SIZE, chipname) == NULL) + return -EBUSY; + + /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */ + if ((lp = dma_alloc_coherent(&pdev->dev, sizeof(*lp), &lp_dma_addr, + GFP_ATOMIC)) == + NULL) { + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return -ENOMEM; + } + + memset(lp, 0, sizeof(*lp)); + lp->dma_addr = lp_dma_addr; + lp->pci_dev = pdev; + + rtdm_lock_init(&lp->lock); + + dev->priv = lp; + lp->name = chipname; + lp->shared_irq = shared; + lp->mii_if.full_duplex = fdx; + lp->dxsuflo = dxsuflo; + lp->ltint = ltint; + lp->mii = mii; + if ((cards_found >= MAX_UNITS) || + (options[cards_found] > (int)sizeof(options_mapping))) + lp->options = PCNET32_PORT_ASEL; + else + lp->options = options_mapping[options[cards_found]]; + /*** RTnet *** + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = mdio_read; + lp->mii_if.mdio_write = mdio_write; + *** RTnet ***/ + + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && + ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) + lp->options |= PCNET32_PORT_FD; + + if (!a) { + printk(KERN_ERR PFX "No access methods\n"); + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp, + lp->dma_addr); + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return -ENODEV; + } + lp->a = *a; + + /* detect special T1/E1 WAN card by checking for MAC address */ + if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && + dev->dev_addr[2] == 0x75) + lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; + + lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ + lp->init_block.tlen_rlen = + le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + lp->init_block.rx_ring = (u32)le32_to_cpu( + lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)); + lp->init_block.tx_ring = (u32)le32_to_cpu( + lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)); + + /* switch pcnet32 to 32bit mode */ + a->write_bcr(ioaddr, 20, 2); + + a->write_csr( + ioaddr, 1, + (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) & + 0xffff); + a->write_csr( + ioaddr, 2, + (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >> + 16); + + if (irq_line) { + dev->irq = irq_line; + } + + if (dev->irq >= 2) + printk(" assigned IRQ %d.\n", dev->irq); + else { + unsigned long irq_mask = probe_irq_on(); + + /* + * To auto-IRQ we enable the initialization-done and DMA error + * interrupts. For ISA boards we get a DMA error, but VLB and PCI + * boards will work. + */ + /* Trigger an initialization just for the interrupt. */ + a->write_csr(ioaddr, 0, 0x41); + mdelay(1); + + dev->irq = probe_irq_off(irq_mask); + if (dev->irq) + printk(", probed IRQ %d.\n", dev->irq); + else { + printk(", failed to detect IRQ line.\n"); + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp, + lp->dma_addr); + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return -ENODEV; + } + } + + /* The PCNET32-specific entries in the device structure. */ + dev->open = &pcnet32_open; + dev->hard_start_xmit = &pcnet32_start_xmit; + dev->stop = &pcnet32_close; + dev->get_stats = &pcnet32_get_stats; + /*** RTnet *** + dev->set_multicast_list = &pcnet32_set_multicast_list; + dev->do_ioctl = &pcnet32_ioctl; + dev->tx_timeout = pcnet32_tx_timeout; + dev->watchdog_timeo = (5*HZ); + *** RTnet ***/ + + lp->next = pcnet32_dev; + pcnet32_dev = dev; + + /* Fill in the generic fields of the device structure. */ + /*** RTnet ***/ + if ((i = rt_register_rtnetdev(dev))) { + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp, + lp->dma_addr); + release_region(ioaddr, PCNET32_TOTAL_SIZE); + rtdev_free(dev); + return i; + } + /*** RTnet ***/ + + printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); + return 0; +} + +static int pcnet32_open(struct rtnet_device *dev) /*** RTnet ***/ +{ + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + u16 val; + int i; + + /*** RTnet ***/ + if (dev->irq == 0) + return -EAGAIN; + + rt_stack_connect(dev, &STACK_manager); + + i = rtdm_irq_request(&lp->irq_handle, dev->irq, pcnet32_interrupt, + RTDM_IRQTYPE_SHARED, "rt_pcnet32", dev); + if (i) + return i; + /*** RTnet ***/ + + /* Check for a valid station address */ + if (!is_valid_ether_addr(dev->dev_addr)) + return -EINVAL; + + /* Reset the PCNET32 */ + lp->a.reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ + lp->a.write_bcr(ioaddr, 20, 2); + + if (pcnet32_debug > 1) + printk(KERN_DEBUG + "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", + dev->name, dev->irq, + (u32)(lp->dma_addr + + offsetof(struct pcnet32_private, tx_ring)), + (u32)(lp->dma_addr + + offsetof(struct pcnet32_private, rx_ring)), + (u32)(lp->dma_addr + + offsetof(struct pcnet32_private, init_block))); + + /* set/reset autoselect bit */ + val = lp->a.read_bcr(ioaddr, 2) & ~2; + if (lp->options & PCNET32_PORT_ASEL) + val |= 2; + lp->a.write_bcr(ioaddr, 2, val); + + /* handle full duplex setting */ + if (lp->mii_if.full_duplex) { + val = lp->a.read_bcr(ioaddr, 9) & ~3; + if (lp->options & PCNET32_PORT_FD) { + val |= 1; + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) + val |= 2; + } else if (lp->options & PCNET32_PORT_ASEL) { + /* workaround of xSeries250, turn on for 79C975 only */ + i = ((lp->a.read_csr(ioaddr, 88) | + (lp->a.read_csr(ioaddr, 89) << 16)) >> + 12) & + 0xffff; + if (i == 0x2627) + val |= 3; + } + lp->a.write_bcr(ioaddr, 9, val); + } + + /* set/reset GPSI bit in test register */ + val = lp->a.read_csr(ioaddr, 124) & ~0x10; + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) + val |= 0x10; + lp->a.write_csr(ioaddr, 124, val); + + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { + val = lp->a.read_bcr(ioaddr, 32) & + ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */ + if (lp->options & PCNET32_PORT_FD) + val |= 0x10; + if (lp->options & PCNET32_PORT_100) + val |= 0x08; + lp->a.write_bcr(ioaddr, 32, val); + } else { + if (lp->options & + PCNET32_PORT_ASEL) { /* enable auto negotiate, setup, disable fd */ + val = lp->a.read_bcr(ioaddr, 32) & ~0x98; + val |= 0x20; + lp->a.write_bcr(ioaddr, 32, val); + } + } + +#ifdef DO_DXSUFLO + if (lp->dxsuflo) { /* Disable transmit stop on underflow */ + val = lp->a.read_csr(ioaddr, 3); + val |= 0x40; + lp->a.write_csr(ioaddr, 3, val); + } +#endif + + if (lp->ltint) { /* Enable TxDone-intr inhibitor */ + val = lp->a.read_csr(ioaddr, 5); + val |= (1 << 14); + lp->a.write_csr(ioaddr, 5, val); + } + + lp->init_block.mode = + le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + if (pcnet32_init_ring(dev)) + return -ENOMEM; + + /* Re-initialize the PCNET32, and start it when done. */ + lp->a.write_csr( + ioaddr, 1, + (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) & + 0xffff); + lp->a.write_csr( + ioaddr, 2, + (lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >> + 16); + + lp->a.write_csr(ioaddr, 4, 0x0915); + lp->a.write_csr(ioaddr, 0, 0x0001); + + rtnetif_start_queue(dev); /*** RTnet ***/ + + i = 0; + while (i++ < 100) + if (lp->a.read_csr(ioaddr, 0) & 0x0100) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + lp->a.write_csr(ioaddr, 0, 0x0042); + + if (pcnet32_debug > 2) + printk(KERN_DEBUG + "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", + dev->name, i, + (u32)(lp->dma_addr + + offsetof(struct pcnet32_private, init_block)), + lp->a.read_csr(ioaddr, 0)); + + return 0; /* Always succeed */ +} + +/* + * The LANCE has been halted for one reason or another (busmaster memory + * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, + * etc.). Modern LANCE variants always reload their ring-buffer + * configuration when restarted, so we must reinitialize our ring + * context before restarting. As part of this reinitialization, + * find all packets still on the Tx ring and pretend that they had been + * sent (in effect, drop the packets on the floor) - the higher-level + * protocols will time out and retransmit. It'd be better to shuffle + * these skbs to a temp list and then actually re-Tx them after + * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com + */ + +/*** RTnet *** +static void +pcnet32_purge_tx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = dev->priv; + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + if (lp->tx_skbuff[i]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); + dev_kfree_skb(lp->tx_skbuff[i]); + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } + } +} + *** RTnet ***/ + +/* Initialize the PCNET32 Rx and Tx rings. */ +static int pcnet32_init_ring(struct rtnet_device *dev) /*** RTnet ***/ +{ + struct pcnet32_private *lp = dev->priv; + int i; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < RX_RING_SIZE; i++) { + struct rtskb *rx_skbuff = lp->rx_skbuff[i]; /*** RTnet ***/ + if (rx_skbuff == NULL) { + if (!(rx_skbuff = lp->rx_skbuff[i] = + rtnetdev_alloc_rtskb( + dev, + PKT_BUF_SZ))) { /*** RTnet ***/ + /* there is not much, we can do at this point */ + printk(KERN_ERR + "%s: pcnet32_init_ring rtnetdev_alloc_rtskb failed.\n", + dev->name); + return -1; + } + rtskb_reserve(rx_skbuff, 2); /*** RTnet ***/ + } + lp->rx_dma_addr[i] = + dma_map_single(&lp->pci_dev->dev, rx_skbuff->tail, + rx_skbuff->len, DMA_FROM_DEVICE); + lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ); + lp->rx_ring[i].status = le16_to_cpu(0x8000); + } + /* The Tx buffer address is filled in as needed, but we do need to clear + the upper ownership bit. */ + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_ring[i].base = 0; + lp->tx_ring[i].status = 0; + lp->tx_dma_addr[i] = 0; + } + + lp->init_block.tlen_rlen = + le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.rx_ring = (u32)le32_to_cpu( + lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)); + lp->init_block.tx_ring = (u32)le32_to_cpu( + lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)); + return 0; +} + +/*** RTnet ***/ +/*** RTnet ***/ + +static int pcnet32_start_xmit(struct rtskb *skb, + struct rtnet_device *dev) /*** RTnet ***/ +{ + struct pcnet32_private *lp = dev->priv; + unsigned long ioaddr = dev->base_addr; + u16 status; + int entry; + rtdm_lockctx_t context; + + if (pcnet32_debug > 3) { + rtdm_printk(KERN_DEBUG + "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); + } + + /*** RTnet ***/ + rtdm_lock_get_irqsave(&lp->lock, context); + /*** RTnet ***/ + + /* Default status -- will not enable Successful-TxDone + * interrupt when that option is available to us. + */ + status = 0x8300; + if ((lp->ltint) && ((lp->cur_tx - lp->dirty_tx == TX_RING_SIZE / 2) || + (lp->cur_tx - lp->dirty_tx >= TX_RING_SIZE - 2))) { + /* Enable Successful-TxDone interrupt if we have + * 1/2 of, or nearly all of, our ring buffer Tx'd + * but not yet cleaned up. Thus, most of the time, + * we will not enable Successful-TxDone interrupts. + */ + status = 0x9300; + } + + /* Fill in a Tx ring entry */ + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & TX_RING_MOD_MASK; + + /* Caution: the write order is important here, set the base address + with the "ownership" bits last. */ + + lp->tx_ring[entry].length = le16_to_cpu(-skb->len); + + lp->tx_ring[entry].misc = 0x00000000; + + lp->tx_skbuff[entry] = skb; + lp->tx_dma_addr[entry] = dma_map_single(&lp->pci_dev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); + + /*** RTnet ***/ + /* get and patch time stamp just before the transmission */ + if (skb->xmit_stamp) + *skb->xmit_stamp = + cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp); + /*** RTnet ***/ + + wmb(); + lp->tx_ring[entry].status = le16_to_cpu(status); + + lp->cur_tx++; + lp->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ + lp->a.write_csr(ioaddr, 0, 0x0048); + + //dev->trans_start = jiffies; /*** RTnet ***/ + + if (lp->tx_ring[(entry + 1) & TX_RING_MOD_MASK].base == 0) + rtnetif_start_queue(dev); /*** RTnet ***/ + else { + lp->tx_full = 1; + rtnetif_stop_queue(dev); /*** RTnet ***/ + } + /*** RTnet ***/ + rtdm_lock_put_irqrestore(&lp->lock, context); + /*** RTnet ***/ + return 0; +} + +/* The PCNET32 interrupt handler. */ +static int pcnet32_interrupt(rtdm_irq_t *irq_handle) /*** RTnet ***/ +{ + nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/ + struct rtnet_device *dev = rtdm_irq_get_arg( + irq_handle, struct rtnet_device); /*** RTnet ***/ + struct pcnet32_private *lp; + unsigned long ioaddr; + u16 csr0, rap; + int boguscnt = max_interrupt_work; + int must_restart; + unsigned int old_packet_cnt; /*** RTnet ***/ + int ret = RTDM_IRQ_NONE; + + /*** RTnet *** + if (!dev) { + rtdm_printk (KERN_DEBUG "%s(): irq %d for unknown device\n", + __FUNCTION__, irq); + return; + } + *** RTnet ***/ + + ioaddr = dev->base_addr; + lp = dev->priv; + old_packet_cnt = lp->stats.rx_packets; /*** RTnet ***/ + + rtdm_lock_get(&lp->lock); /*** RTnet ***/ + + rap = lp->a.read_rap(ioaddr); + while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8600 && --boguscnt >= 0) { + /* Acknowledge all of the current interrupt sources ASAP. */ + lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f); + + ret = RTDM_IRQ_HANDLED; + + must_restart = 0; + + if (pcnet32_debug > 5) + rtdm_printk( + KERN_DEBUG + "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", + dev->name, csr0, lp->a.read_csr(ioaddr, 0)); + + if (csr0 & 0x0400) /* Rx interrupt */ + pcnet32_rx(dev, &time_stamp); + + if (csr0 & 0x0200) { /* Tx-done interrupt */ + unsigned int dirty_tx = lp->dirty_tx; + + while (dirty_tx < lp->cur_tx) { + int entry = dirty_tx & TX_RING_MOD_MASK; + int status = (short)le16_to_cpu( + lp->tx_ring[entry].status); + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x4000) { + /* There was an major error, log it. */ + int err_status = le32_to_cpu( + lp->tx_ring[entry].misc); + lp->stats.tx_errors++; + if (err_status & 0x04000000) + lp->stats.tx_aborted_errors++; + if (err_status & 0x08000000) + lp->stats.tx_carrier_errors++; + if (err_status & 0x10000000) + lp->stats.tx_window_errors++; +#ifndef DO_DXSUFLO + if (err_status & 0x40000000) { + lp->stats.tx_fifo_errors++; + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + rtdm_printk( + KERN_ERR + "%s: Tx FIFO error! CSR0=%4.4x\n", + dev->name, csr0); + must_restart = 1; + } +#else + if (err_status & 0x40000000) { + lp->stats.tx_fifo_errors++; + if (!lp->dxsuflo) { /* If controller doesn't recover ... */ + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + rtdm_printk( + KERN_ERR + "%s: Tx FIFO error! CSR0=%4.4x\n", + dev->name, + csr0); + must_restart = 1; + } + } +#endif + } else { + if (status & 0x1800) + lp->stats.collisions++; + lp->stats.tx_packets++; + } + + /* We must free the original skb */ + if (lp->tx_skbuff[entry]) { + dma_unmap_single( + &lp->pci_dev->dev, + lp->tx_dma_addr[entry], + lp->tx_skbuff[entry]->len, + DMA_TO_DEVICE); + dev_kfree_rtskb( + lp->tx_skbuff[entry]); /*** RTnet ***/ + lp->tx_skbuff[entry] = 0; + lp->tx_dma_addr[entry] = 0; + } + dirty_tx++; + } + + if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { + rtdm_printk( + KERN_ERR + "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dev->name, dirty_tx, lp->cur_tx, + lp->tx_full); + dirty_tx += TX_RING_SIZE; + } + + if (lp->tx_full && + rtnetif_queue_stopped(dev) && /*** RTnet ***/ + dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + rtnetif_wake_queue(dev); /*** RTnet ***/ + } + lp->dirty_tx = dirty_tx; + } + + /* Log misc errors. */ + if (csr0 & 0x4000) + lp->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) { + /* + * this happens when our receive ring is full. This shouldn't + * be a problem as we will see normal rx interrupts for the frames + * in the receive ring. But there are some PCI chipsets (I can reproduce + * this on SP3G with Intel saturn chipset) which have sometimes problems + * and will fill up the receive ring with error descriptors. In this + * situation we don't get a rx interrupt, but a missed frame interrupt sooner + * or later. So we try to clean up our receive ring here. + */ + pcnet32_rx(dev, &time_stamp); + lp->stats.rx_errors++; /* Missed a Rx frame. */ + } + if (csr0 & 0x0800) { + rtdm_printk( + KERN_ERR + "%s: Bus master arbitration failure, status %4.4x.\n", + dev->name, csr0); + /* unlike for the lance, there is no restart needed */ + } + + /*** RTnet ***/ + /*** RTnet ***/ + } + + /* Clear any other interrupt, and set interrupt enable. */ + lp->a.write_csr(ioaddr, 0, 0x7940); + lp->a.write_rap(ioaddr, rap); + + if (pcnet32_debug > 4) + rtdm_printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); + + /*** RTnet ***/ + rtdm_lock_put(&lp->lock); + + if (old_packet_cnt != lp->stats.rx_packets) + rt_mark_stack_mgr(dev); + + return ret; + /*** RTnet ***/ +} + +static int pcnet32_rx(struct rtnet_device *dev, + nanosecs_abs_t *time_stamp) /*** RTnet ***/ +{ + struct pcnet32_private *lp = dev->priv; + int entry = lp->cur_rx & RX_RING_MOD_MASK; + + /* If we own the next entry, it's a new packet. Send it up. */ + while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { + int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; + + if (status != 0x03) { /* There was an error. */ + /* + * There is a tricky error noted by John Murphy, + * <murf@perftech.com> to Russ Nelson: Even with full-sized + * buffers it's possible for a jabber packet to use two + * buffers, with only the last correctly noting the error. + */ + if (status & + 0x01) /* Only count a general error at the */ + lp->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x20) + lp->stats.rx_frame_errors++; + if (status & 0x10) + lp->stats.rx_over_errors++; + if (status & 0x08) + lp->stats.rx_crc_errors++; + if (status & 0x04) + lp->stats.rx_fifo_errors++; + lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); + } else { + /* Malloc up new buffer, compatible with net-2e. */ + short pkt_len = + (le32_to_cpu(lp->rx_ring[entry].msg_length) & + 0xfff) - + 4; + struct rtskb *skb; /*** RTnet ***/ + + if (pkt_len < 60) { + rtdm_printk(KERN_ERR "%s: Runt packet!\n", + dev->name); + lp->stats.rx_errors++; + } else { + /*** RTnet ***/ + /*int rx_in_place = 0;*/ + + /*if (pkt_len > rx_copybreak)*/ { + struct rtskb *newskb; + + if ((newskb = rtnetdev_alloc_rtskb( + dev, PKT_BUF_SZ))) { + rtskb_reserve(newskb, 2); + skb = lp->rx_skbuff[entry]; + dma_unmap_single( + &lp->pci_dev->dev, + lp->rx_dma_addr[entry], + skb->len, + DMA_FROM_DEVICE); + rtskb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr + [entry] = dma_map_single( + &lp->pci_dev->dev, + newskb->tail, + newskb->len, + DMA_FROM_DEVICE); + lp->rx_ring[entry] + .base = le32_to_cpu( + lp->rx_dma_addr[entry]); + /*rx_in_place = 1;*/ + } else + skb = NULL; + } /*else { + skb = dev_alloc_skb(pkt_len+2); + }*/ + /*** RTnet ***/ + + if (skb == NULL) { + int i; + rtdm_printk( + KERN_ERR + "%s: Memory squeeze, deferring packet.\n", + dev->name); + for (i = 0; i < RX_RING_SIZE; i++) + if ((short)le16_to_cpu( + lp->rx_ring[(entry + + i) & + RX_RING_MOD_MASK] + .status) < + 0) + break; + + if (i > RX_RING_SIZE - 2) { + lp->stats.rx_dropped++; + lp->rx_ring[entry].status |= + le16_to_cpu(0x8000); + lp->cur_rx++; + } + break; + } + /*** RTnet ***/ + lp->stats.rx_bytes += skb->len; + skb->protocol = rt_eth_type_trans(skb, dev); + skb->time_stamp = *time_stamp; + rtnetif_rx(skb); + ///dev->last_rx = jiffies; + /*** RTnet ***/ + lp->stats.rx_packets++; + } + } + /* + * The docs say that the buffer length isn't touched, but Andrew Boyd + * of QNX reports that some revs of the 79C965 clear it. + */ + lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ); + lp->rx_ring[entry].status |= le16_to_cpu(0x8000); + entry = (++lp->cur_rx) & RX_RING_MOD_MASK; + } + + return 0; +} + +static int pcnet32_close(struct rtnet_device *dev) /*** RTnet ***/ +{ + unsigned long ioaddr = dev->base_addr; + struct pcnet32_private *lp = dev->priv; + int i; + + rtnetif_stop_queue(dev); /*** RTnet ***/ + + lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + + if (pcnet32_debug > 1) + printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, lp->a.read_csr(ioaddr, 0)); + + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ + lp->a.write_csr(ioaddr, 0, 0x0004); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a.write_bcr(ioaddr, 20, 4); + + /*** RTnet ***/ + if ((i = rtdm_irq_free(&lp->irq_handle)) < 0) + return i; + + rt_stack_disconnect(dev); + /*** RTnet ***/ + + /* free all allocated skbuffs */ + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_ring[i].status = 0; + if (lp->rx_skbuff[i]) { + dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[i], + lp->rx_skbuff[i]->len, + DMA_FROM_DEVICE); + dev_kfree_rtskb(lp->rx_skbuff[i]); /*** RTnet ***/ + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } + + for (i = 0; i < TX_RING_SIZE; i++) { + if (lp->tx_skbuff[i]) { + dma_unmap_single(&lp->pci_dev->dev, lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + DMA_TO_DEVICE); + dev_kfree_rtskb(lp->tx_skbuff[i]); /*** RTnet ***/ + } + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } + + return 0; +} + +/*** RTnet ***/ +static struct net_device_stats *pcnet32_get_stats(struct rtnet_device *rtdev) +{ + struct pcnet32_private *lp = rtdev->priv; + unsigned long ioaddr = rtdev->base_addr; + rtdm_lockctx_t context; + u16 saved_addr; + + rtdm_lock_get_irqsave(&lp->lock, context); + saved_addr = lp->a.read_rap(ioaddr); + lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); + lp->a.write_rap(ioaddr, saved_addr); + rtdm_lock_put_irqrestore(&lp->lock, context); + + return &lp->stats; +} + +/*** RTnet ***/ + +static struct pci_driver pcnet32_driver = { + name: DRV_NAME, + probe: pcnet32_probe_pci, + id_table: pcnet32_pci_tbl, +}; + +/* An additional parameter that may be passed in... */ +static int local_debug = -1; +static int tx_start_pt = -1; + +module_param_named(debug, local_debug, int, 0444); +MODULE_PARM_DESC(debug, DRV_NAME " debug level (0-6)"); +module_param(max_interrupt_work, int, 0444); +MODULE_PARM_DESC(max_interrupt_work, + DRV_NAME " maximum events handled per interrupt"); +/*** RTnet *** +MODULE_PARM(rx_copybreak, "i"); +MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); + *** RTnet ***/ +module_param(tx_start_pt, int, 0444); +MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); +module_param(pcnet32vlb, int, 0444); +MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); +module_param_array(options, int, NULL, 0444); +MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); +module_param_array(full_duplex, int, NULL, 0444); +MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); + +MODULE_AUTHOR("Jan Kiszka"); +MODULE_DESCRIPTION("RTnet Driver for PCnet32 and PCnetPCI based ethercards"); +MODULE_LICENSE("GPL"); + +static int __init pcnet32_init_module(void) +{ + printk(KERN_INFO "%s", version); + + if (local_debug > 0) + pcnet32_debug = local_debug; + + if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) + tx_start = tx_start_pt; + + /* find the PCI devices */ + if (!pci_register_driver(&pcnet32_driver)) + pcnet32_have_pci = 1; + + /* should we find any remaining VLbus devices ? */ + if (pcnet32vlb) + pcnet32_probe_vlbus(); + + if (cards_found) + printk(KERN_INFO PFX "%d cards_found.\n", cards_found); + + return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; +} + +static void __exit pcnet32_cleanup_module(void) +{ + struct rtnet_device *next_dev; /*** RTnet ***/ + + /* No need to check MOD_IN_USE, as sys_delete_module() checks. */ + while (pcnet32_dev) { + struct pcnet32_private *lp = pcnet32_dev->priv; + next_dev = lp->next; + /*** RTnet ***/ + rt_unregister_rtnetdev(pcnet32_dev); + rt_rtdev_disconnect(pcnet32_dev); + /*** RTnet ***/ + release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp, + lp->dma_addr); + /*** RTnet ***/ + rtdev_free(pcnet32_dev); + /*** RTnet ***/ + pcnet32_dev = next_dev; + } + + if (pcnet32_have_pci) + pci_unregister_driver(&pcnet32_driver); +} + +module_init(pcnet32_init_module); +module_exit(pcnet32_cleanup_module); diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c new file mode 100644 index 0000000..9b2ac74 --- /dev/null +++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c @@ -0,0 +1,2027 @@ +/* +========================================================================= + r8169.c: A RealTek RTL8169s/8110s Gigabit Ethernet driver for Linux kernel 2.4.x. + -------------------------------------------------------------------- + + History: + Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>. + May 20 2002 - Add link status force-mode and TBI mode support. +========================================================================= + +RTL8169_VERSION "1.1" <2002/10/4> + + The bit4:0 of MII register 4 is called "selector field", and have to be + 00001b to indicate support of IEEE std 802.3 during NWay process of + exchanging Link Code Word (FLP). + +RTL8169_VERSION "1.2" <2003/6/17> + Update driver module name. + Modify ISR. + Add chip mcfg. + +RTL8169_VERSION "1.3" <2003/6/20> + Add chip pcfg. + Add priv->phy_timer_t, rtl8169_phy_timer_t_handler() + Add rtl8169_hw_PHY_config() + Add rtl8169_hw_PHY_reset() + +RTL8169_VERSION "1.4" <2003/7/14> + Add tx_bytes, rx_bytes. + +RTL8169_VERSION "1.5" <2003/7/18> + Set 0x0000 to PHY at offset 0x0b. + Modify chip mcfg, pcfg + Force media for multiple card. +RTL8169_VERSION "1.6" <2003/8/25> + Modify receive data buffer. + +RTL8169_VERSION "1.7" <2003/9/18> + Add Jumbo Frame support. + +RTL8169_VERSION "1.8" <2003/10/21> + Performance and CPU Utilizaion Enhancement. + +RTL8169_VERSION "1.9" <2003/12/29> + Enable Tx/Rx flow control. + +RTL8169_VERSION "2.0" <2004/03/26> + Beta version. + Support for linux 2.6.x + +RTL8169_VERSION "2.1" <2004/07/05> + Modify parameters. + +RTL8169_VERSION "2.2" <2004/08/09> + Add.pci_dma_sync_single. + Add pci_alloc_consistent()/pci_free_consistent(). + Revise parameters. + Recognize our interrupt for linux 2.6.x. +*/ + +/* + * Ported to RTnet by Klaus Keppler <klaus.keppler@gmx.de> + * All RTnet porting stuff may be used and distributed according to the + * terms of the GNU General Public License (GPL). + * + * Version 2.2-04 <2005/08/22> + * Initial release of this driver, based on RTL8169 driver v2.2 + * + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/version.h> + +#include <linux/timer.h> +#include <linux/init.h> + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#include <linux/pci-aspm.h> +#endif + +#include <rtnet_port.h> /*** RTnet ***/ + +#define RTL8169_VERSION "2.2-04" +#define MODULENAME "rt_r8169" +#define RTL8169_DRIVER_NAME MODULENAME " RTnet Gigabit Ethernet driver " RTL8169_VERSION +#define PFX MODULENAME ": " + +//#define RTL8169_DEBUG +#undef RTL8169_JUMBO_FRAME_SUPPORT /*** RTnet: no not enable! ***/ +#undef RTL8169_HW_FLOW_CONTROL_SUPPORT + + +#undef RTL8169_IOCTL_SUPPORT /*** RTnet: do not enable! ***/ +#undef RTL8169_DYNAMIC_CONTROL +#undef RTL8169_USE_IO + + +#ifdef RTL8169_DEBUG + #define assert(expr) \ + if(!(expr)) { printk( "Assertion failed! %s,%s,%s,line=%d\n", #expr,__FILE__,__FUNCTION__,__LINE__); } + /*** RTnet / <kk>: rt_assert must be used instead of assert() within interrupt context! ***/ + #define rt_assert(expr) \ + if(!(expr)) { rtdm_printk( "Assertion failed! %s,%s,%s,line=%d\n", #expr,__FILE__,__FUNCTION__,__LINE__); } + /*** RTnet / <kk>: RT_DBG_PRINT must be used instead of DBG_PRINT() within interrupt context! ***/ + #define DBG_PRINT( fmt, args...) printk("r8169: " fmt, ## args); + #define RT_DBG_PRINT( fmt, args...) rtdm_printk("r8169: " fmt, ## args); +#else + #define assert(expr) do {} while (0) + #define rt_assert(expr) do {} while (0) + #define DBG_PRINT( fmt, args...) ; + #define RT_DBG_PRINT( fmt, args...) ; +#endif // end of #ifdef RTL8169_DEBUG + +/* media options */ +#define MAX_UNITS 8 +static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/*** RTnet ***/ +static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 }; +module_param_array(cards, int, NULL, 0444); +MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)"); +/*** /RTnet ***/ + +/* <kk> Enable debugging output */ +#define DEBUG_RX_SYNC 1 +#define DEBUG_RX_OTHER 2 +#define DEBUG_TX_SYNC 4 +#define DEBUG_TX_OTHER 8 +#define DEBUG_RUN 16 +static int local_debug = -1; +static int r8169_debug = -1; +module_param_named(debug, local_debug, int, 0444); +MODULE_PARM_DESC(debug, MODULENAME " debug level (bit mask, see docs!)"); + + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 20; + +/* MAC address length*/ +#define MAC_ADDR_LEN 6 + +#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define ETTh 0x3F /* 0x3F means NO threshold */ + +#define ETH_HDR_LEN 14 +#define DEFAULT_MTU 1500 +#define DEFAULT_RX_BUF_LEN 1536 + + +#ifdef RTL8169_JUMBO_FRAME_SUPPORT +#define MAX_JUMBO_FRAME_MTU ( 10000 ) +#define MAX_RX_SKBDATA_SIZE ( MAX_JUMBO_FRAME_MTU + ETH_HDR_LEN ) +#else +#define MAX_RX_SKBDATA_SIZE 1600 +#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT + + +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +//#define NUM_TX_DESC 64 /* Number of Tx descriptor registers*/ +//#define NUM_RX_DESC 64 /* Number of Rx descriptor registers*/ + +#define TX_RING_SIZE 16 /*** RTnet ***/ +#define NUM_TX_DESC TX_RING_SIZE /* Number of Tx descriptor registers*/ /*** RTnet ***/ +#define RX_RING_SIZE 8 /*** RTnet ***/ +#define NUM_RX_DESC RX_RING_SIZE /* Number of Rx descriptor registers*/ /*** RTnet ***/ + +#define RTL_MIN_IO_SIZE 0x80 +#define TX_TIMEOUT (6*HZ) +//#define RTL8169_TIMER_EXPIRE_TIME 100 //100 /*** RTnet ***/ + + +#ifdef RTL8169_USE_IO +#define RTL_W8(reg, val8) outb ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) outw ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) outl ((val32), ioaddr + (reg)) +#define RTL_R8(reg) inb (ioaddr + (reg)) +#define RTL_R16(reg) inw (ioaddr + (reg)) +#define RTL_R32(reg) ((unsigned long) inl (ioaddr + (reg))) +#else +/* write/read MMIO register */ +#define RTL_W8(reg, val8) writeb ((val8), (void *)ioaddr + (reg)) +#define RTL_W16(reg, val16) writew ((val16), (void *)ioaddr + (reg)) +#define RTL_W32(reg, val32) writel ((val32), (void *)ioaddr + (reg)) +#define RTL_R8(reg) readb ((void *)ioaddr + (reg)) +#define RTL_R16(reg) readw ((void *)ioaddr + (reg)) +#define RTL_R32(reg) ((unsigned long) readl ((void *)ioaddr + (reg))) +#endif + +#define MCFG_METHOD_1 0x01 +#define MCFG_METHOD_2 0x02 +#define MCFG_METHOD_3 0x03 +#define MCFG_METHOD_4 0x04 + +#define PCFG_METHOD_1 0x01 //PHY Reg 0x03 bit0-3 == 0x0000 +#define PCFG_METHOD_2 0x02 //PHY Reg 0x03 bit0-3 == 0x0001 +#define PCFG_METHOD_3 0x03 //PHY Reg 0x03 bit0-3 == 0x0002 + + +#ifdef RTL8169_DYNAMIC_CONTROL +#include "r8169_callback.h" +#endif //end #ifdef RTL8169_DYNAMIC_CONTROL + + +const static struct { + const char *name; + u8 mcfg; /* depend on RTL8169 docs */ + u32 RxConfigMask; /* should clear the bits supported by this chip */ +} rtl_chip_info[] = { + { "RTL8169", MCFG_METHOD_1, 0xff7e1880 }, + { "RTL8169s/8110s", MCFG_METHOD_2, 0xff7e1880 }, + { "RTL8169s/8110s", MCFG_METHOD_3, 0xff7e1880 }, +}; + + +static struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, 2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, 1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, 1 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, 1 }, /* <kk> D-Link DGE-528T */ + {0,}, +}; + + +MODULE_DEVICE_TABLE (pci, rtl8169_pci_tbl); + + +enum RTL8169_registers { + MAC0 = 0x0, + MAR0 = 0x8, + TxDescStartAddr = 0x20, + TxHDescStartAddr= 0x28, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + RxMissed = 0x4C, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5C, + PHYAR = 0x60, + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6A, + PHYstatus = 0x6C, + RxMaxSize = 0xDA, + CPlusCmd = 0xE0, + RxDescStartAddr = 0xE4, + ETThReg = 0xEC, + FuncEvent = 0xF0, + FuncEventMask = 0xF4, + FuncPresetState = 0xF8, + FuncForceEvent = 0xFC, +}; + +enum RTL8169_register_content { + /*InterruptStatusBits*/ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x80, + RxFIFOOver = 0x40, + LinkChg = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + /*RxStatusDesc*/ + RxRES = 0x00200000, + RxCRC = 0x00080000, + RxRUNT= 0x00100000, + RxRWT = 0x00400000, + + /*ChipCmdBits*/ + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /*Cfg9346Bits*/ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, + + /*rx_mode_bits*/ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, + + /*RxConfigBits*/ + RxCfgFIFOShift = 13, + RxCfgDMAShift = 8, + + /*TxConfigBits*/ + TxInterFrameGapShift = 24, + TxDMAShift = 8, + + /* Config2 register */ + MSIEnable = (1 << 5), + + /*rtl8169_PHYstatus*/ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /*GIGABIT_PHY_registers*/ + PHY_CTRL_REG = 0, + PHY_STAT_REG = 1, + PHY_AUTO_NEGO_REG = 4, + PHY_1000_CTRL_REG = 9, + + /*GIGABIT_PHY_REG_BIT*/ + PHY_Restart_Auto_Nego = 0x0200, + PHY_Enable_Auto_Nego = 0x1000, + + //PHY_STAT_REG = 1; + PHY_Auto_Neco_Comp = 0x0020, + + //PHY_AUTO_NEGO_REG = 4; + PHY_Cap_10_Half = 0x0020, + PHY_Cap_10_Full = 0x0040, + PHY_Cap_100_Half = 0x0080, + PHY_Cap_100_Full = 0x0100, + + //PHY_1000_CTRL_REG = 9; + PHY_Cap_1000_Full = 0x0200, + PHY_Cap_1000_Half = 0x0100, + + PHY_Cap_PAUSE = 0x0400, + PHY_Cap_ASYM_PAUSE = 0x0800, + + PHY_Cap_Null = 0x0, + + /*_MediaType*/ + _10_Half = 0x01, + _10_Full = 0x02, + _100_Half = 0x04, + _100_Full = 0x08, + _1000_Full = 0x10, + + /*_TBICSRBit*/ + TBILinkOK = 0x02000000, +}; + + + +enum _DescStatusBit { + OWNbit = 0x80000000, + EORbit = 0x40000000, + FSbit = 0x20000000, + LSbit = 0x10000000, +}; + + +struct TxDesc { + u32 status; + u32 vlan_tag; + u32 buf_addr; + u32 buf_Haddr; +}; + +struct RxDesc { + u32 status; + u32 vlan_tag; + u32 buf_addr; + u32 buf_Haddr; +}; + + +typedef struct timer_list rt_timer_t; + +enum rtl8169_features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), +}; + + +struct rtl8169_private { + unsigned long ioaddr; /* memory map physical address*/ + struct pci_dev *pci_dev; /* Index of PCI device */ + struct net_device_stats stats; /* statistics of net device */ + rtdm_lock_t lock; /* spin lock flag */ /*** RTnet ***/ + int chipset; + int mcfg; + int pcfg; +/* rt_timer_t r8169_timer; */ /*** RTnet ***/ +/* unsigned long expire_time; */ /*** RTnet ***/ + + unsigned long phy_link_down_cnt; + unsigned long cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + unsigned long cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + unsigned long dirty_tx; + struct TxDesc *TxDescArray; /* Index of 256-alignment Tx Descriptor buffer */ + struct RxDesc *RxDescArray; /* Index of 256-alignment Rx Descriptor buffer */ + struct rtskb *Tx_skbuff[NUM_TX_DESC];/* Index of Transmit data buffer */ /*** RTnet ***/ + struct rtskb *Rx_skbuff[NUM_RX_DESC];/* Receive data buffer */ /*** RTnet ***/ + unsigned char drvinit_fail; + + dma_addr_t txdesc_array_dma_addr[NUM_TX_DESC]; + dma_addr_t rxdesc_array_dma_addr[NUM_RX_DESC]; + dma_addr_t rx_skbuff_dma_addr[NUM_RX_DESC]; + + void *txdesc_space; + dma_addr_t txdesc_phy_dma_addr; + int sizeof_txdesc_space; + + void *rxdesc_space; + dma_addr_t rxdesc_phy_dma_addr; + int sizeof_rxdesc_space; + + int curr_mtu_size; + int tx_pkt_len; + int rx_pkt_len; + + int hw_rx_pkt_len; + + int rx_buf_size; /*** RTnet / <kk> ***/ + +#ifdef RTL8169_DYNAMIC_CONTROL + struct r8169_cb_t rt; +#endif //end #ifdef RTL8169_DYNAMIC_CONTROL + + unsigned char linkstatus; + rtdm_irq_t irq_handle; /*** RTnet ***/ + + unsigned features; +}; + + +MODULE_AUTHOR ("Realtek, modified for RTnet by Klaus.Keppler@gmx.de"); +MODULE_DESCRIPTION ("RealTek RTL-8169 Gigabit Ethernet driver"); +module_param_array(media, int, NULL, 0444); +MODULE_LICENSE("GPL"); + + +static int rtl8169_open (struct rtnet_device *rtdev); +static int rtl8169_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev); + +static int rtl8169_interrupt(rtdm_irq_t *irq_handle); + +static void rtl8169_init_ring (struct rtnet_device *rtdev); +static void rtl8169_hw_start (struct rtnet_device *rtdev); +static int rtl8169_close (struct rtnet_device *rtdev); +static void rtl8169_set_rx_mode (struct rtnet_device *rtdev); +/* static void rtl8169_tx_timeout (struct net_device *dev); */ /*** RTnet ***/ +static struct net_device_stats *rtl8169_get_stats(struct rtnet_device *netdev); + +#ifdef RTL8169_JUMBO_FRAME_SUPPORT +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); +#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT + +static void rtl8169_hw_PHY_config (struct rtnet_device *rtdev); +/* static void rtl8169_hw_PHY_reset(struct net_device *dev); */ /*** RTnet ***/ +static const u16 rtl8169_intr_mask = LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK | SYSErr; /*** <kk> added SYSErr ***/ +static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift) | 0x0000000E; + +/*** <kk> these functions are backported from Linux-2.6.12's r8169.c driver ***/ +static void rtl8169_irq_mask_and_ack(unsigned long ioaddr); +/* static void rtl8169_asic_down(unsigned long ioaddr); */ /*** RTnet ***/ +static void rtl8169_pcierr_interrupt(struct rtnet_device *rtdev); + +#define RTL8169_WRITE_GMII_REG_BIT( ioaddr, reg, bitnum, bitval )\ +{ \ + int val; \ + if( bitval == 1 ){ val = ( RTL8169_READ_GMII_REG( ioaddr, reg ) | (bitval<<bitnum) ) & 0xffff ; } \ + else{ val = ( RTL8169_READ_GMII_REG( ioaddr, reg ) & (~(0x0001<<bitnum)) ) & 0xffff ; } \ + RTL8169_WRITE_GMII_REG( ioaddr, reg, val ); \ +} + + + +#ifdef RTL8169_DEBUG +unsigned alloc_rxskb_cnt = 0; +#define RTL8169_ALLOC_RXSKB(bufsize) dev_alloc_skb(bufsize); alloc_rxskb_cnt ++ ; +#define RTL8169_FREE_RXSKB(skb) kfree_skb(skb); alloc_rxskb_cnt -- ; +#define RTL8169_NETIF_RX(skb) netif_rx(skb); alloc_rxskb_cnt -- ; +#else +#define RTL8169_ALLOC_RXSKB(bufsize) dev_alloc_skb(bufsize); +#define RTL8169_FREE_RXSKB(skb) kfree_skb(skb); +#define RTL8169_NETIF_RX(skb) netif_rx(skb); +#endif //end #ifdef RTL8169_DEBUG + + +//================================================================= +// PHYAR +// bit Symbol +// 31 Flag +// 30-21 reserved +// 20-16 5-bit GMII/MII register address +// 15-0 16-bit GMII/MII register data +//================================================================= +void RTL8169_WRITE_GMII_REG( unsigned long ioaddr, int RegAddr, int value ) +{ + int i; + + RTL_W32 ( PHYAR, 0x80000000 | (RegAddr&0xFF)<<16 | value); + udelay(1000); + + for( i = 2000; i > 0 ; i -- ){ + // Check if the RTL8169 has completed writing to the specified MII register + if( ! (RTL_R32(PHYAR)&0x80000000) ){ + break; + } + else{ + udelay(100); + }// end of if( ! (RTL_R32(PHYAR)&0x80000000) ) + }// end of for() loop +} +//================================================================= +int RTL8169_READ_GMII_REG( unsigned long ioaddr, int RegAddr ) +{ + int i, value = -1; + + RTL_W32 ( PHYAR, 0x0 | (RegAddr&0xFF)<<16 ); + udelay(1000); + + for( i = 2000; i > 0 ; i -- ){ + // Check if the RTL8169 has completed retrieving data from the specified MII register + if( RTL_R32(PHYAR) & 0x80000000 ){ + value = (int)( RTL_R32(PHYAR)&0xFFFF ); + break; + } + else{ + udelay(100); + }// end of if( RTL_R32(PHYAR) & 0x80000000 ) + }// end of for() loop + return value; +} + + +#ifdef RTL8169_IOCTL_SUPPORT +#include "r8169_ioctl.c" +#endif //end #ifdef RTL8169_IOCTL_SUPPORT + + +#ifdef RTL8169_DYNAMIC_CONTROL +#include "r8169_callback.c" +#endif + + + +//====================================================================================================== +//====================================================================================================== +static int rtl8169_init_board ( struct pci_dev *pdev, struct rtnet_device **dev_out, unsigned long *ioaddr_out, int region) +{ + unsigned long ioaddr = 0; + struct rtnet_device *rtdev; + struct rtl8169_private *priv; + int rc, i; + unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; + + + assert (pdev != NULL); + assert (ioaddr_out != NULL); + + *ioaddr_out = 0; + *dev_out = NULL; + + /*** RTnet ***/ + rtdev = rt_alloc_etherdev(sizeof(struct rtl8169_private), + RX_RING_SIZE * 2 + TX_RING_SIZE); + if (rtdev == NULL) { + printk (KERN_ERR PFX "unable to alloc new ethernet\n"); + return -ENOMEM; + } + rtdev_alloc_name(rtdev, "rteth%d"); + rt_rtdev_connect(rtdev, &RTDEV_manager); + rtdev->vers = RTDEV_VERS_2_0; + rtdev->sysbind = &pdev->dev; + /*** /RTnet ***/ + + priv = rtdev->priv; + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + // enable device (incl. PCI PM wakeup and hotplug setup) + rc = pci_enable_device (pdev); + if (rc) + goto err_out; + + if (pci_set_mwi(pdev) < 0) + printk("R8169: Mem-Wr-Inval unavailable\n"); + + mmio_start = pci_resource_start (pdev, region); + mmio_end = pci_resource_end (pdev, region); + mmio_flags = pci_resource_flags (pdev, region); + mmio_len = pci_resource_len (pdev, region); + + // make sure PCI base addr 1 is MMIO + if (!(mmio_flags & IORESOURCE_MEM)) { + printk (KERN_ERR PFX "region #%d not an MMIO resource, aborting\n", region); + rc = -ENODEV; + goto err_out; + } + + // check for weird/broken PCI region reporting + if ( mmio_len < RTL_MIN_IO_SIZE ) { + printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out; + } + + + rc = pci_request_regions (pdev, rtdev->name); + if (rc) + goto err_out; + + // enable PCI bus-mastering + pci_set_master (pdev); + +#ifdef RTL8169_USE_IO + ioaddr = pci_resource_start(pdev, 0); +#else + // ioremap MMIO region + ioaddr = (unsigned long)ioremap (mmio_start, mmio_len); + if (ioaddr == 0) { + printk (KERN_ERR PFX "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res; + } +#endif + + // Soft reset the chip. + RTL_W8 ( ChipCmd, CmdReset); + + // Check that the chip has finished the reset. + for (i = 1000; i > 0; i--){ + if ( (RTL_R8(ChipCmd) & CmdReset) == 0){ + break; + } + else{ + udelay (10); + } + } + + { + u8 cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (region) { + if (pci_enable_msi(pdev)) + printk("R8169: no MSI, Back to INTx.\n"); + else { + cfg2 |= MSIEnable; + priv->features |= RTL_FEATURE_MSI; + } + } + RTL_W8(Config2, cfg2); + } + + // identify config method + { + unsigned long val32 = (RTL_R32(TxConfig)&0x7c800000); + + if( val32 == (0x1<<28) ){ + priv->mcfg = MCFG_METHOD_4; + } + else if( val32 == (0x1<<26) ){ + priv->mcfg = MCFG_METHOD_3; + } + else if( val32 == (0x1<<23) ){ + priv->mcfg = MCFG_METHOD_2; + } + else if( val32 == 0x00000000 ){ + priv->mcfg = MCFG_METHOD_1; + } + else{ + priv->mcfg = MCFG_METHOD_1; + } + } + + { + unsigned char val8 = (unsigned char)(RTL8169_READ_GMII_REG(ioaddr,3)&0x000f); + if( val8 == 0x00 ){ + priv->pcfg = PCFG_METHOD_1; + } + else if( val8 == 0x01 ){ + priv->pcfg = PCFG_METHOD_2; + } + else if( val8 == 0x02 ){ + priv->pcfg = PCFG_METHOD_3; + } + else{ + priv->pcfg = PCFG_METHOD_3; + } + } + + + for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--){ + if (priv->mcfg == rtl_chip_info[i].mcfg) { + priv->chipset = i; + goto match; + } + } + + //if unknown chip, assume array element #0, original RTL-8169 in this case + printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8169\n", pci_name(pdev)); + priv->chipset = 0; + +match: + *ioaddr_out = ioaddr; + *dev_out = rtdev; + return 0; + +#ifndef RTL8169_USE_IO +err_out_free_res: +#endif + pci_release_regions (pdev); /*** <kk> moved outside of #ifdev ***/ + +err_out: + /*** RTnet ***/ + rt_rtdev_disconnect(rtdev); + rtdev_free(rtdev); + /*** /RTnet ***/ + return rc; +} + + + + + + + +//====================================================================================================== +static int rtl8169_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct rtnet_device *rtdev = NULL; /*** RTnet ***/ + struct rtl8169_private *priv = NULL; + unsigned long ioaddr = 0; + static int board_idx = -1; + int region = ent->driver_data; + int i; + int option = -1, Cap10_100 = 0, Cap1000 = 0; + + + assert (pdev != NULL); + assert (ent != NULL); + + board_idx++; + + /*** RTnet ***/ + if (board_idx >= MAX_UNITS) { + return -ENODEV; + } + if (cards[board_idx] == 0) + return -ENODEV; + /*** RTnet ***/ + + i = rtl8169_init_board (pdev, &rtdev, &ioaddr, region); + if (i < 0) { + return i; + } + + priv = rtdev->priv; + + assert (ioaddr != 0); + assert (rtdev != NULL); + assert (priv != NULL); + + // Get MAC address // + for (i = 0; i < MAC_ADDR_LEN ; i++){ + rtdev->dev_addr[i] = RTL_R8( MAC0 + i ); + } + + rtdev->open = rtl8169_open; + rtdev->hard_start_xmit = rtl8169_start_xmit; + rtdev->get_stats = rtl8169_get_stats; + rtdev->stop = rtl8169_close; + /* dev->tx_timeout = rtl8169_tx_timeout; */ /*** RTnet ***/ + /* dev->set_multicast_list = rtl8169_set_rx_mode; */ /*** RTnet ***/ + /* dev->watchdog_timeo = TX_TIMEOUT; */ /*** RTnet ***/ + rtdev->irq = pdev->irq; + rtdev->base_addr = (unsigned long) ioaddr; + +#ifdef RTL8169_JUMBO_FRAME_SUPPORT + rtdev->change_mtu = rtl8169_change_mtu; +#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT + +#ifdef RTL8169_IOCTL_SUPPORT + rtdev->do_ioctl = rtl8169_ioctl; +#endif //end #ifdef RTL8169_IOCTL_SUPPORT + +#ifdef RTL8169_DYNAMIC_CONTROL + priv->rt.dev = rtdev; +#endif //end #ifdef RTL8169_DYNAMIC_CONTROL + + priv = rtdev->priv; // private data // + priv->pci_dev = pdev; + priv->ioaddr = ioaddr; + +//#ifdef RTL8169_JUMBO_FRAME_SUPPORT + priv->curr_mtu_size = rtdev->mtu; + priv->tx_pkt_len = rtdev->mtu + ETH_HDR_LEN; + priv->rx_pkt_len = rtdev->mtu + ETH_HDR_LEN; + priv->hw_rx_pkt_len = priv->rx_pkt_len + 8; +//#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT + + DBG_PRINT("-------------------------- \n"); + DBG_PRINT("dev->mtu = %d \n", rtdev->mtu); + DBG_PRINT("priv->curr_mtu_size = %d \n", priv->curr_mtu_size); + DBG_PRINT("priv->tx_pkt_len = %d \n", priv->tx_pkt_len); + DBG_PRINT("priv->rx_pkt_len = %d \n", priv->rx_pkt_len); + DBG_PRINT("priv->hw_rx_pkt_len = %d \n", priv->hw_rx_pkt_len); + DBG_PRINT("-------------------------- \n"); + + rtdm_lock_init(&priv->lock); /*** RTnet ***/ + + /*** RTnet ***/ + if (rt_register_rtnetdev(rtdev) < 0) { + /* clean up... */ + pci_release_regions (pdev); + rt_rtdev_disconnect(rtdev); + rtdev_free(rtdev); + return -ENODEV; + } + /*** /RTnet ***/ + + pci_set_drvdata(pdev, rtdev); // pdev->driver_data = data; + + printk (KERN_DEBUG "%s: Identified chip type is '%s'.\n", rtdev->name, rtl_chip_info[priv->chipset].name); + printk (KERN_INFO "%s: %s at 0x%lx, " + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " + "IRQ %d\n", + rtdev->name, + RTL8169_DRIVER_NAME, + rtdev->base_addr, + rtdev->dev_addr[0], rtdev->dev_addr[1], + rtdev->dev_addr[2], rtdev->dev_addr[3], + rtdev->dev_addr[4], rtdev->dev_addr[5], + rtdev->irq); + + // Config PHY + rtl8169_hw_PHY_config(rtdev); + + DBG_PRINT("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8( 0x82, 0x01 ); + + if( priv->mcfg < MCFG_METHOD_3 ){ + DBG_PRINT("Set PCI Latency=0x40\n"); + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); + } + + if( priv->mcfg == MCFG_METHOD_2 ){ + DBG_PRINT("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8( 0x82, 0x01 ); + DBG_PRINT("Set PHY Reg 0x0bh = 0x00h\n"); + RTL8169_WRITE_GMII_REG( ioaddr, 0x0b, 0x0000 ); //w 0x0b 15 0 0 + } + + // if TBI is not endbled + if( !(RTL_R8(PHYstatus) & TBI_Enable) ){ + int val = RTL8169_READ_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG ); + +#ifdef RTL8169_HW_FLOW_CONTROL_SUPPORT + val |= PHY_Cap_PAUSE | PHY_Cap_ASYM_PAUSE ; +#endif //end #define RTL8169_HW_FLOW_CONTROL_SUPPORT + + option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; + // Force RTL8169 in 10/100/1000 Full/Half mode. + if( option > 0 ){ + printk(KERN_INFO "%s: Force-mode Enabled. \n", rtdev->